Hbase集群部署

1、主机初始化

#所有节点
#修改主机名
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2

cat <<EOF >> /etc/hosts
192.168.2.66 master
192.168.2.67 node1
192.168.2.68 node2
EOF

#设置yum源
cd /etc/yum.repos.d/
rename repo repo.bak *
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all && yum makecache

#修改配置文件
cat <<EOF | tee /etc/profile.d/my.sh
PS1='\[\e[1;34m\][\u@\h \w]\$ \[\e[0m\]'
alias vi="vim"
HISTSIZE=10000
HISTTIMEFORMAT="%F %T "
EOF
source /etc/profile.d/my.sh

#关闭防火墙
systemctl stop firewalld.service && systemctl disable firewalld.service 
#关闭SELinux
setenforce 0 && sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
#同步时间
yum -y install ntpdate && timedatectl set-timezone Asia/Shanghai && ntpdate ntp2.aliyun.com && hwclock --systohc

#安装JDK
yum install -y java-1.8.0-openjdk-devel.x86_64
#配置环境变量
cat <<EOF | sudo tee /etc/profile.d/my-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar
export PATH=$PATH:\$JAVA_HOME/bin
EOF
source /etc/profile.d/my-env.sh

#管理节点
#使用免密登录
ssh-keygen -t rsa -P ""
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 0600 ~/.ssh/authorized_keys
ssh-copy-id 192.168.2.67
ssh-copy-id 192.168.2.68

 

2、部署zookeeper

#zookeeper安装
#下载zookeeper
wget https://mirrors.tuna.tsinghua.edu.cn/apache/zookeeper/stable/apache-zookeeper-3.5.5-bin.tar.gz
tar -zxf apache-zookeeper-3.5.5-bin.tar.gz
mv apache-zookeeper-3.5.5-bin /usr/local/
cp /usr/local/apache-zookeeper-3.5.5-bin/conf/zoo_sample.cfg /usr/local/apache-zookeeper-3.5.5-bin/conf/zoo.cfg

#创建数据存储位置
mkdir -p /hadoop/zookeeper/{data,logs}

#修改默认配置
vi /usr/local/apache-zookeeper-3.5.5-bin/conf/zoo.cfg
dataDir=/hadoop/zookeeper/data
dataLogDir=/hadoop/zookeeper/logs
server.0=master:2888:3888
server.1=node1:2888:3888
server.2=node2:2888:3888

#将文件复制到子节点
scp -r /usr/local/apache-zookeeper-3.5.5-bin node1:/usr/local/
scp -r /usr/local/apache-zookeeper-3.5.5-bin node2:/usr/local/

#在每个节点创建数据存储目录,并指定不同的myid
echo "0" > /hadoop/zookeeper/data/myid
echo "1" > /hadoop/zookeeper/data/myid
echo "2" > /hadoop/zookeeper/data/myid

#设置环境变量
cat <<EOF | sudo tee /etc/profile.d/my-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar
export ZOOKEEPER_HOME=/usr/local/apache-zookeeper-3.5.5-bin
export PATH=$PATH:\$JAVA_HOME/bin:\$ZOOKEEPER_HOME/bin
EOF
source /etc/profile.d/my-env.sh

#启停zookeeper服务
zkServer.sh start
zkServer.sh stop
#服务状态查看
zkServer.sh status
#调试命令
zkServer.sh start-foreground
#连接服务节点,写入数据
zkCli.sh -server node1
create /test data
#连接另一个服务器节点,查询数据,删除原有数据
zkCli.sh -server node2
ls /
#删除zookeeper中的实例
rmr /hbase

 

 3、部署hadoop

#下载hadoop
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.8.5/hadoop-2.8.5.tar.gz
tar -xzf hadoop-2.8.5.tar.gz
mv hadoop-2.8.5 /usr/local/

#创建数据存储位置
mkdir -p /hadoop/hdfs/{name,data,tmp}

-----------------------------------------------------------
vi /usr/local/hadoop-2.8.5/etc/hadoop/core-site.xml
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://master:9000</value>
  </property>

  <property>
    <name>io.file.buffer.size</name>
    <value>4096</value>
  </property>

  <property>
    <name>hadoop.tmp.dir</name>
    <value>/hadoop/hdfs/tmp</value>
  </property>
</configuration>
------------------------------------------------------
vi /usr/local/hadoop-2.8.5/etc/hadoop/hdfs-site.xml
<configuration>
<!--配置副本数--> 
  <property>
    <name>dfs.replication</name>
    <value>2</value>
  </property> 
  
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>/hadoop/hdfs/name</value>
  </property>

  <property>
    <name>dfs.datanode.data.dir</name>
    <value>/hadoop/hdfs/data</value>
  </property>

  <property>
    <name>dfs.http.address</name>
    <value>master:50070</value>
  </property>

  <property>
    <name>dfs.secondary.http.address</name>
    <value>master:50090</value>
  </property>

  <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
  </property>

  <property>
    <name>dfs.permissions</name>
    <value>false</value>
  </property>
</configuration>
-------------------------------------------------------
cp /usr/local/hadoop-2.8.5/etc/hadoop/mapred-site.xml.template /usr/local/hadoop-2.8.5/etc/hadoop/mapred-site.xml
vi /usr/local/hadoop-2.8.5/etc/hadoop/mapred-site.xml
<configuration>
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value>
  </property>

  <property>
    <name>mapreduce.jobhistory.address</name>
    <value>master:10020</value>
  </property>

  <property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>master:19888</value>
  </property>
</configuration>
------------------------------------------------------
vi /usr/local/hadoop-2.8.5/etc/hadoop/yarn-site.xml
<configuration>
    <property>
        <name>yarn.resourcemanager.hostname</name>
        <value>master</value>
    </property>
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>master:8032</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>master:8030</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>master:8031</value>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>master:8033</value>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>master:8088</value>
    </property>
</configuration>
--------------------------------------------------------
vi /usr/local/hadoop-2.8.5/etc/hadoop/slaves 
node1
node2

#将Hadoop文件复制到子节点
scp -r /usr/local/hadoop-2.8.5 node1:/usr/local/
scp -r /usr/local/hadoop-2.8.5 node2:/usr/local/

#配置环境变量
cat <<EOF | sudo tee /etc/profile.d/my-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar
export ZOOKEEPER_HOME=/usr/local/apache-zookeeper-3.5.5-bin
export HADOOP_HOME=/usr/local/hadoop-2.8.5
export PATH=$PATH:\$JAVA_HOME/bin:\$ZOOKEEPER_HOME/bin:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin
EOF
source /etc/profile.d/my-env.sh

#查看版本
hadoop version

#格式化namenode
hdfs namenode -format

#启动服务
start-dfs.sh && start-yarn.sh

#查看集群概述信息
http://192.168.2.66:50070
#查看集群节点信息
http://192.168.2.66:8088
#创建、查看文件
hdfs dfs -mkdir /test
hadoop fs -ls /test

 

4、部署hbase

#下载HBase
wget https://mirrors.tuna.tsinghua.edu.cn/apache/hbase/2.1.6/hbase-2.1.6-bin.tar.gz
tar -zxf hbase-2.1.6-bin.tar.gz
mv hbase-2.1.6 /usr/local/

#创建文件存储位置
mkdir -p /hadoop/hbase/{tmp,logs}

#修改hbase环境变量文件
vi /usr/local/hbase-2.1.6/conf/hbase-env.sh
export JAVA_HOME=${JAVA_HOME}
export HBASE_CLASSPATH=/usr/local/hbase-2.1.6/lib
export HBASE_PID_DIR=/hadoop/hbase/tmp
export HBASE_LOG_DIR=/hadoop/hbase/logs
export HBASE_MANAGES_ZK=false

#修改hbase配置文件
vi /usr/local/hbase-2.1.6/conf/hbase-site.xml
<configuration>
   <property>
      <name>hbase.tmp.dir</name>
      <value>/hadoop/hbase/tmp</value>
    </property>
    <property>
      <name>hbase.rootdir</name>
      <value>hdfs://master:9000/hbase</value>
    </property>
    <property>
      <name>hbase.cluster.distributed</name>
      <value>true</value>
    </property>
    <property>
      <name>hbase.zookeeper.quorum</name>
      <value>master,node1,node2</value>
    </property>
    <property>
      <name>hbase.zookeeper.property.clientPort</name>
      <value>2181</value>
    </property>
    <property>
      <name>hbase.zookeeper.property.dataDir</name>
      <value>/hadoop/zookeeper/data</value>
      <description>property from zoo.cfg,the directory where the snapshot is stored</description>
    </property>
</configuration>

#指定regionserver
vi /usr/local/hbase-2.1.6/conf/regionservers
master
node1
node2

#指定候补管理节点
vi /usr/local/hbase-2.1.6/conf/backup-masters
node1

#jar包复制
cp /usr/local/hbase-2.1.6/lib/client-facing-thirdparty/htrace-core-3.1.0-incubating.jar /usr/local/hbase-2.1.6/lib/

#设置环境变量
cat <<EOF | sudo tee /etc/profile.d/my-env.sh
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.222.b10-1.el7_7.x86_64
export CLASSPATH=.:\$JAVA_HOME/lib/dt.jar:\$JAVA_HOME/lib/tools.jar
export ZOOKEEPER_HOME=/usr/local/apache-zookeeper-3.5.5-bin
export HADOOP_HOME=/usr/local/hadoop-2.8.5
export HBASE_HOME=/usr/local/hbase-2.1.6
export PATH=$PATH:\$JAVA_HOME/bin:\$ZOOKEEPER_HOME/bin:\$HADOOP_HOME/bin:\$HADOOP_HOME/sbin:\$HBASE_HOME/bin
EOF
source /etc/profile.d/my-env.sh

#将文件复制到子节点
scp -r /usr/local/hbase-2.1.6 node1:/usr/local/
scp -r /usr/local/hbase-2.1.6 node2:/usr/local/


#启停hbase
start-hbase.sh
stop-hbase.sh

#使用HBaseShell,查看服务状态
hbase shell
status

#查看hbase运行情况
http://192.168.2.66:16010

#设置Hadoop为非安全模式
hadoop dfsadmin -safemode get
hadoop dfsadmin -safemode leave

#启动/停止整个HBase集群服务
start-dfs.sh
start-yarn.sh
zkServer.sh start
start-hbase.sh

stop-hbase.sh
zkServer.sh stop
stop-yarn.sh
stop-dfs.sh

 

posted @ 2019-09-22 15:16  BicycleBoy  阅读(292)  评论(0编辑  收藏  举报