Loading

hadoop伪单机版安装

基本步骤和配置文件,配置文件配置了远程可访问

#!/bin/env bash


## 首先安装java  jdk  关闭防火墙,selinux 

## yum  安装 jdk

 systemctl   stop  firewalld 
 systemctl   disable   firewalld
 setenforce  0


echo -e "\033[31m 解压hadoop ,并添加环境变量 \033[0m"
tar xf  hadoop-2.9.0.tar.gz -C xxxxxxxxxxxxxxxxxx/
echo -e "\033[31m export PATH=xxxxxxxxxxxxxxxxxx/hadoop-2.9.0/bin:\$PATH >> .bashrc  \033[0m"
echo "export PATH=xxxxxxxxxxxxxxxxxx/hadoop-2.9.0/bin:\$PATH" >> ~/.bashrc
source ~/.bashrc
sleep  2



#验证
hadoop version
sleep 3
echo -e "\033[31m 修改java_home:etc/hadoop/hadoop-env.sh  \033[0m"
cd xxxxxxxxxxxxxxxxxx/hadoop-2.9.0
sed -ri  's#(export JAVA_HOME=)(.*)#\1/usr/java/jdk1.8.0_221-amd64#'   etc/hadoop/hadoop-env.sh




#完善配置文件
echo -e "\033[31m 配置etc/hadoop/core-site.xml  \033[0m"
cd xxxxxxxxxxxxxxxxxx/hadoop-2.9.0
cat > etc/hadoop/core-site.xml << 'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://0.0.0.0:9000</value>
    </property>
       <!--  存储NameNode持久化的数据,DataNode块数据  -->
       <!--  手工创建$HADOOP_HOME/data/tmp  -->
       <property>
             <name>hadoop.tmp.dir</name>
             <value>xxxxxxxxxxxxxxxxxx/hadoop-2.9.0/data/tmp</value>
       </property>
</configuration>
EOF

## 创建文件存储目录,默认/tmp 下,可能会开机重启自动删除
mkdir  -p  xxxxxxxxxxxxxxxxxx/hadoop-2.9.0/data/tmp
sleep 3





echo -e "\033[31m 配置etc/hadoop/hdfs-site.xml  \033[0m"
cat > etc/hadoop/hdfs-site.xml << 'EOF'
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
    <property>
        <name>dfs.replication</name>
        <value>1</value>
    </property>
    <property>
        <name>dfs.permissions</name>
        <value>false</value>
    </property>
</configuration>
EOF


echo -e "\033[31m 配置ssh免密  \033[0m"
ssh localhost
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
#ssh-copy-id  localhost
cat  ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600  ~/.ssh/authorized_keys


##一般手动可以验证ssh  localhost
echo -e "\033[31m  hdfs namenode  -format \033[0m"
hdfs namenode  -format
sleep 3

##需要输入yes
sbin/start-dfs.sh

echo -e "\033[31m 测试 \033[0m"
bin/hdfs dfs -mkdir /user
bin/hdfs dfs -mkdir /user/data
bin/hdfs dfs -mkdir input
bin/hdfs dfs -put etc/hadoop/*.xml input
# hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.0.jar grep /user/test/input output 'dfs[a-z.]+'
# hdfs dfs -cat output/*
# bin/hdfs dfs -get output output
# cat output/*
# sleep 10

echo -e "\033[31m 配置mapred-site.xml \033[0m"
cat > etc/hadoop/mapred-site.xml  << 'EOF'
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
        <property>
        <name>mapreduce.application.classpath</name>
        <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
    </property>
       <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>0.0.0.0:19888</value>
   </property>
</configuration>
EOF
echo -e "\033[31m 配置yarn-site.xml \033[0m"

cat > etc/hadoop/yarn-site.xml << 'EOF'
<?xml version="1.0"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Site specific YARN configuration properties -->

<configuration>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
   <property>
  <name>yarn.resourcemanager.webapp.address</name>
  <value>${yarn.resourcemanager.hostname}:8099</value>
</property>
   <property>
        <name>yarn.nodemanager.env-whitelist</name>
        <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
    </property>
  
<property>
    <name>yarn.resourcemanager.address</name>
    <value>0.0.0.0:8032</value>
  </property>
  <property>
    <name>yarn.resourcemanager.scheduler.address</name>
    <value>0.0.0.0:8030</value>
  </property>
  <property>
    <name>yarn.resourcemanager.resource-tracker.address</name>
    <value>0.0.0.0:8031</value>
  </property>
</configuration>
EOF




sleep 15
sbin/start-yarn.sh
sleep 15

#solve "Name node is in safe mode"  error 
#bin/hadoop dfsadmin -safemode leave


##产生jobhistory
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.0.jar grep /user/test/input output 'dfs[a-z.]+'
bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.9.0.jar pi 3 3
#hdfs dfs -cat output/*

# sbin/stop-yarn.sh
# sbin/stop-dfs.sh

echo -e "\033[31m 启动jobhistory \033[0m"
sbin/mr-jobhistory-daemon.sh start historyserver
echo
ss -tanlp| grep 19888
echo -e "\033[31m http://ip:19888/jobhistory \033[0m"
ss -tanlp| grep 19888


###  命令详解
https://blog.csdn.net/weixin_33940102/article/details/91736848
## 其他的
https://www.cnblogs.com/ee900222/p/hadoop_1.html
posted @ 2020-04-28 18:45  Lust4Life  阅读(154)  评论(0)    收藏  举报