OpenEuler Linux 部署 HadoopHA

OpenEuler Linux 部署 HadoopHA

openEuler Linux 部署 HadoopHA

主机名 IP地址
spark01 192.168.28.11
spark02 192.168.28.12
spark03 192.168.28.13

升级操作系统和软件

yum -y update

升级后建议重启

安装常用软件

yum -y install gcc gcc-c++ autoconf automake cmake make rsync vim man zip unzip net-tools zlib zlib-devel openssl openssl-devel pcre-devel tcpdump lrzsz tar wget

修改主机名

hostnamectl set-hostname hadoop
或者
vim /etc/hostname 
spark01

reboot

修改IP地址

vim /etc/sysconfig/network-scripts/ifcfg-ens160

网卡 配置文件示例

TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=none
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
NAME=ens160
UUID=943779e9-249c-44bb-b272-d49ea5831ed4
DEVICE=ens160
ONBOOT=yes
IPADDR=192.168.226.11
PREFIX=24
GATEWAY=192.168.226.2
DNS1=192.168.226.2

保存后
nmcli con up ens160
重启网络服务

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld
vim /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
# SELINUX=enforcing
# SELINUXTYPE= can take one of three two values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
# SELINUXTYPE=targeted 

SELINUX=disabled

执行下面命令

setenforce 0

或者

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0

创建软件安装目录并上传软件,配置环境变量

mkdir -p /opt/soft
cd /opt/soft
# 上传jdk zookeeper
tar -zxvf jdk-8u361-linux-x64.tar.gz
mv jdk1.8.0_361 jdk8
tar -zxvf hadoop-3.3.5.tar.gz
mv hadoop-3.3.5 hadoop3

vim /etc/profile
	
export JAVA_HOME=/opt/soft/jdk8
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin

export HADOOP_HOME=/opt/soft/hadoop3

export HADOOP_INSTALL=${HADOOP_HOME}
export HADOOP_MAPRED_HOME=${HADOOP_HOME}
export HADOOP_COMMON_HOME=${HADOOP_HOME}
export HADOOP_HDFS_HOME=${HADOOP_HOME}
export YARN_HOME=${HADOOP_HOME}
export PATH=${PATH}:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop

export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_SECONDARYNAMENODE_USER=root
export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

编辑完成后使用source命令使文件~/.bash_profile生效执行以下命令
source /etc/profile
检查环境变量
printenv

修改域名映射

vim /etc/hosts
192.168.2826.11 spark01
192.168.226.12 spark02
192.168.226.13 spark03

修改后建议重启

修改Hadoop配置文件 在hadoop解压后的目录找到 etc/hadoop目录

cd /opt/soft/hadoop3

修改如下配置文件

hadoop-env.sh

core-site.xml

hdfs-site.xml

workers

mapred-site.xml

yarn-site.xml

hadoop-env.sh 文件末尾追加

export JAVA_HOME=/opt/soft/jdk8
export HDFS_NAMENODE_USER=root
export HDFS_DATANODE_USER=root
export HDFS_ZKFC_USER=root
export HDFS_JOURNALNODE_USER=root

export YARN_RESOURCEMANAGER_USER=root
export YARN_NODEMANAGER_USER=root

core-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://lihaozhe</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/data</value>
</property>
<property>
<name>hadoop.http.staticuser.user</name>
<value>root</value>
</property>
<property>
   <name>ha.zookeeper.quorum</name>
   <value>spark01:2181,spark02:2181,spark03:2181</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>

<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>

<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
</configuration>

hdfs-site.xml

<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
  <name>dfs.nameservices</name>
  <value>lihaozhe</value>
</property>
<property>
  <name>dfs.ha.namenodes.lihaozhe</name>
  <value>nn1,nn2</value>
</property>
<property>
  <name>dfs.namenode.rpc-address.lihaozhe.nn1</name>
  <value>spark01:8020</value>
</property>
<property>
  <name>dfs.namenode.rpc-address.lihaozhe.nn2</name>
  <value>spark02:8020</value>
</property>
<property>
  <name>dfs.namenode.http-address.lihaozhe.nn1</name>
  <value>spark01:9870</value>
</property>
<property>
  <name>dfs.namenode.http-address.lihaozhe.nn2</name>
  <value>spark02:9870</value>
</property>
<property>
  <name>dfs.namenode.shared.edits.dir</name>
  <value>qjournal://spark01:8485;spark02:8485;spark03:8485/lihaozhe</value>
</property>
<property>
  <name>dfs.client.failover.proxy.provider.lihaozhe</name>
  <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
      <name>dfs.ha.fencing.methods</name>
      <value>sshfence</value>
</property>
<property>
      <name>dfs.ha.fencing.ssh.private-key-files</name>
      <value>/root/.ssh/id_rsa</value>
</property>

<property>
  <name>dfs.journalnode.edits.dir</name>
  <value>/home/hadoop/journalnode/data</value>
</property>

<property>
   <name>dfs.ha.automatic-failover.enabled</name>
   <value>true</value>
 </property>
<property>
  <name>dfs.safemode.threshold.pct</name>
  <value>1</value>
</property>
</configuration>

workers

192.168.226.11
192.168.226.12
192.168.226.13

mapred-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
</property>
<property>
	<name>mapreduce.application.classpath</name>
        <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
</property>
</configuration>

yarn-site.xml

<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->
<property>
  <name>yarn.resourcemanager.ha.enabled</name>
  <value>true</value>
</property>
<property>
  <name>yarn.resourcemanager.cluster-id</name>
  <value>cluster1</value>
</property>
<property>
  <name>yarn.resourcemanager.ha.rm-ids</name>
  <value>rm1,rm2</value>
</property>
<property>
  <name>yarn.resourcemanager.hostname.rm1</name>
  <value>spark01</value>
</property>
<property>
  <name>yarn.resourcemanager.hostname.rm2</name>
  <value>spark02</value>
</property>
<property>
  <name>yarn.resourcemanager.webapp.address.rm1</name>
  <value>spark01:8088</value>
</property>
<property>
  <name>yarn.resourcemanager.webapp.address.rm2</name>
  <value>spark02:8088</value>
</property>
<property>
  <name>yarn.resourcemanager.zk-address</name>
  <value>spark01:2181,spark02:2181,spark03:2181</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
 
<property>
    <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
</configuration>

配置ssh免密钥登录

创建本地秘钥并将公共秘钥写入认证文件

ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
# 或者
ssh-copy-id spark01
ssh-copy-id spark02
ssh-copy-id spark03
scp -rv  ~/.ssh root@spark02:~/
scp -rv  ~/.ssh root@spark03:~/
# 远程登录自己
ssh spark01
# Are you sure you want to continue connecting (yes/no)? 此处输入yes
# 登录成功后exit或者logout返回
exit

拷贝配置文件分发到其它服务器或者使用脚本分发

scp -v /etc/profile root@spark02:/etc
scp -v /etc/profile root@spark03:/etc
scp -rv /opt/soft/hadoop3/etc/hadoop/* root@spark02:/opt/soft/hadoop3/etc/hadoop/
scp -rv /opt/soft/hadoop3/etc/hadoop/* root@spark03:/opt/soft/hadoop3/etc/hadoop/

在各服务器上使环境变量生效

source /etc/profile

Hadoop初始化

# 创建数据目录
mkdir -p /home/hadoop/data /home/hadoop/journalnode/data 
1.	启动三个zookeeper:zkServer.sh start
2.	启动三个JournalNode:hadoop-daemon.sh start journalnode 
7.	在其中一个namenode上格式化:hdfs namenode -format
8.	把刚刚格式化之后的元数据拷贝到另外一个namenode上
    a)	启动刚刚格式化的namenode :hadoop-daemon.sh start namenode
    b)	在没有格式化的namenode上执行:hdfs namenode -bootstrapStandby
    c)	启动第二个namenode: hadoop-daemon.sh start namenode
9.	在其中一个namenode上初始化hdfs zkfc -formatZK
10.	停止上面节点:stop-dfs.sh
11.	全面启动:start-dfs.sh
12. 启动resourcemanager节点 yarn-daemon.sh start resourcemanager
start-yarn.sh

http://dl.bintray.com/sequenceiq/sequenceiq-bin/hadoop-native-64-2.5.0.tar


13、安全模式

hdfs dfsadmin -safemode enter  
hdfs dfsadmin -safemode leave


14、查看哪些节点是namenodes并获取其状态
hdfs getconf -namenodes
hdfs haadmin -getServiceState spark01

15、强制切换状态
hdfs haadmin -transitionToActive --forcemanual spark01

重点提示:

# 关机之前 依关闭服务
stop-yarn.sh
stop-dfs.sh
# 开机后 依次开启服务
start-dfs.sh
start-yarn.sh

或者

# 关机之前关闭服务
stop-all.sh
# 开机后开启服务
start-all.sh
#jps 检查进程正常后开启胡哦关闭在再做其它操作
posted @ 2023-04-11 21:43  JD_L  阅读(41)  评论(0编辑  收藏  举报