环境准备
| IP | HOSTNAME | SYSTEM |
| 192.168.131.129 |
hadoop-master |
CentOS 7.6 |
| 192.168.131.135 |
hadoop-slave1 |
CentOS 7.6 |
| 192.168.131.137 |
hadoop-slave2 |
CentOS 7.6 |
[root@localhost ~]
CentOS Linux release 7.6.1810 (Core)
[root@localhost ~]
SELinux status: disabled
[root@localhost ~]
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@localhost ~]
[root@localhost ~]
[root@localhost ~]
配置免密
[root@hadoop-master ~]
192.168.131.129 hadoop-master
192.168.131.135 hadoop-slave1
192.168.131.137 hadoop-slave2
EOF
[root@hadoop-master ~]
[root@hadoop-master ~]
[root@hadoop-master ~]
[root@hadoop-master ~]
hosts 100% 248 151.9KB/s 00:00
[root@hadoop-master ~]
hosts 100% 248 220.9KB/s 00:00
配置java环境
[root@hadoop-master ~]
[root@hadoop-master ~]
[root@hadoop-master ~]
total 0
drwxr-xr-x. 2 root root 6 Apr 11 2018 bin
drwxr-xr-x. 2 root root 6 Apr 11 2018 etc
drwxr-xr-x. 2 root root 6 Apr 11 2018 games
drwxr-xr-x. 2 root root 6 Apr 11 2018 include
lrwxrwxrwx 1 root root 24 Sep 7 14:49 java -> /usr/local/jdk1.8.0_211/
drwxr-xr-x 7 10 143 245 Apr 2 2019 jdk1.8.0_211
drwxr-xr-x. 2 root root 6 Apr 11 2018 lib
drwxr-xr-x. 2 root root 6 Apr 11 2018 lib64
drwxr-xr-x. 2 root root 6 Apr 11 2018 libexec
drwxr-xr-x. 2 root root 6 Apr 11 2018 sbin
drwxr-xr-x. 5 root root 49 Mar 30 2019 share
drwxr-xr-x. 2 root root 6 Apr 11 2018 src
[root@hadoop-master ~]
export JAVA_HOME=/usr/local/java/
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
[root@hadoop-master ~]
[root@hadoop-master ~]
java version "1.8.0_211"
Java(TM) SE Runtime Environment (build 1.8.0_211-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.211-b12, mixed mode)
部署hadoop
[root@hadoop-master ~]
[root@hadoop-master ~]
[root@hadoop-master ~]
export HADOOP_HOME=/opt/hadoop-2.7.7
export PATH=$HADOOP_HOME/bin:$PATH
[root@hadoop-master ~]
[root@hadoop-master ~]
[root@hadoop-master hadoop-2.7.7]
bin etc include lib libexec LICENSE.txt NOTICE.txt README.txt sbin share
[root@hadoop-master hadoop-2.7.7]
[root@hadoop-master hadoop]
total 152
-rw-r--r-- 1 1000 ftp 4436 Jul 19 2018 capacity-scheduler.xml
-rw-r--r-- 1 1000 ftp 1335 Jul 19 2018 configuration.xsl
-rw-r--r-- 1 1000 ftp 318 Jul 19 2018 container-executor.cfg
-rw-r--r-- 1 1000 ftp 774 Jul 19 2018 core-site.xml
-rw-r--r-- 1 1000 ftp 3670 Jul 19 2018 hadoop-env.cmd
-rw-r--r-- 1 1000 ftp 4224 Jul 19 2018 hadoop-env.sh
-rw-r--r-- 1 1000 ftp 2598 Jul 19 2018 hadoop-metrics2.properties
-rw-r--r-- 1 1000 ftp 2490 Jul 19 2018 hadoop-metrics.properties
-rw-r--r-- 1 1000 ftp 9683 Jul 19 2018 hadoop-policy.xml
-rw-r--r-- 1 1000 ftp 775 Jul 19 2018 hdfs-site.xml
-rw-r--r-- 1 1000 ftp 1449 Jul 19 2018 httpfs-env.sh
-rw-r--r-- 1 1000 ftp 1657 Jul 19 2018 httpfs-log4j.properties
-rw-r--r-- 1 1000 ftp 21 Jul 19 2018 httpfs-signature.secret
-rw-r--r-- 1 1000 ftp 620 Jul 19 2018 httpfs-site.xml
-rw-r--r-- 1 1000 ftp 3518 Jul 19 2018 kms-acls.xml
-rw-r--r-- 1 1000 ftp 1527 Jul 19 2018 kms-env.sh
-rw-r--r-- 1 1000 ftp 1631 Jul 19 2018 kms-log4j.properties
-rw-r--r-- 1 1000 ftp 5540 Jul 19 2018 kms-site.xml
-rw-r--r-- 1 1000 ftp 11801 Jul 19 2018 log4j.properties
-rw-r--r-- 1 1000 ftp 951 Jul 19 2018 mapred-env.cmd
-rw-r--r-- 1 1000 ftp 1383 Jul 19 2018 mapred-env.sh
-rw-r--r-- 1 1000 ftp 4113 Jul 19 2018 mapred-queues.xml.template
-rw-r--r-- 1 1000 ftp 758 Jul 19 2018 mapred-site.xml.template
-rw-r--r-- 1 1000 ftp 10 Jul 19 2018 slaves
-rw-r--r-- 1 1000 ftp 2316 Jul 19 2018 ssl-client.xml.example
-rw-r--r-- 1 1000 ftp 2697 Jul 19 2018 ssl-server.xml.example
-rw-r--r-- 1 1000 ftp 2250 Jul 19 2018 yarn-env.cmd
-rw-r--r-- 1 1000 ftp 4567 Jul 19 2018 yarn-env.sh
-rw-r--r-- 1 1000 ftp 690 Jul 19 2018 yarn-site.xml
修改hadoop-env.sh和yarn-env.sh中JAVA_HOME参数
[root@hadoop-master hadoop]
[root@hadoop-master hadoop]
[root@hadoop-master hadoop]
export JAVA_HOME=/usr/local/java
[root@hadoop-master hadoop]
export JAVA_HOME=/usr/local/java
编辑core-site.xml
[root@hadoop-master hadoop]# vim core-site.xml
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://192.168.131.129:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/tmp</value>
</property>
</configuration>
编辑hdfs-site.xml
[root@hadoop-master hadoop]# vim hdfs-site.xml
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/opt/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/opt/hadoop/dfs/data</value>
</property>
</configuration>
编辑mapred-site.xml
[root@hadoop-master hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@hadoop-master hadoop]# vim mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>192.168.131.129:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>192.168.131.129:19888</value>
</property>
</configuration>
编辑yarn-site.xml
[root@hadoop-master hadoop]# vim yarn-site.xml
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>192.168.131.129:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>192.168.131.129:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>192.168.131.129:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>192.168.131.129:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>192.168.131.129:8088</value>
</property>
</configuration>
编辑slaves
[root@hadoop-master hadoop]
hadoop-slave1
hadoop-slave2
把配置分发到各个节点
[root@hadoop-master opt]
[root@hadoop-master opt]
在主节点启动hadoop
初始化namenode
[root@hadoop-master ~]
启动HDFS
[root@hadoop-master ~]
[root@hadoop-master sbin]
启动YARN
[root@hadoop-master sbin]
测试
浏览器访问
YARN WEB地址:http://192.168.131.129:8088/
HDFS WEB地址:http://192.168.131.129:50070/