Hadoop 3.0.3 + Hive3.0安装

环境初始化

  1. 预先安装mysql

解压安装包

安装Hive

1.解压

tar -zxf apache-hive-3.1.0-bin.tar.gz

2、Set the environment variable HIVE_HOME to point to the installation directory(配置环境变量)

#Set the environment variable HIVE_HOME
export HIVE_HOME="/usr/lib/hive-current"
if [[ -n $HIVE_HOME ]]; then
  export PATH=$HIVE_HOME/bin:$PATH
fi

编辑hive-site.xml(服务端)

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
  <property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>gggg</value>
    <description>Username to use against metastore database</description>
  </property>
  <property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>123456</value>
    <description>password to use against metastore database</description>
  </property>
  <property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://mysql_host/hive?createDatabaseIfNotExist=true&amp;characterEncoding=UTF-8</value>
    <description>JDBC connect string for a JDBC metastore</description>
  </property>
  <property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
    <description>Driver class name for a JDBC metastore</description>
  </property>
  
  <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/user/hive/warehouse</value>
  </property>
  
  <property>
     <name>hive.server2.webui.host</name>
     <value>192.168.0.50</value>
  </property>

  <property>
     <name>hive.server2.webui.port</name>
     <value>10002</value>
  </property>
</configuration>

 

hive-site.xml (客户端)

<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
  <property>
    <name>hive.metastore.uris</name>
    <value>thrift://master:9083</value>
    <description>JDBC connect string for a JDBC metastore</description>
  </property>
  <property>
    <name>hive.metastore.local</name>
    <value>false</value>
    <description>this is local store</description>
  </property>
  <property>
     <name>hive.metastore.warehouse.dir</name>
     <value>/user/hive/warehouse</value>
  </property>
</configuration>

启动HIVE

#run  metastore
hive --service metastore &

#run hiveserver2
hive --service hiveserver2  --hiveconf hive.root.logger=INFO,console &

客户端连接

键入 hive
或
beeline
bin/beeline -u jdbc:hive2://master:10000
用户名/密码:(hadoop/ )

作者:xiao_kong
链接:https://www.jianshu.com/p/6aa63f6c83e7
来源:简书
简书著作权归作者所有,任何形式的转载都请联系作者获得授权并注明出处。

 

1,下载,解压,变名
    wget http://mirrors.hust.edu.cn/apache/hive/hive-3.0.0/apache-hive-3.0.0-bin.tar.gz
    tar -xzvf apache-hive-3.0.0-bin.tar.gz 
    mv apache-hive-3.0.0-bin hive
 
2,配置环境 vim /etc/profile (在后面追加)
    export HIVE_HOME=/home/hive
 
3,安装Mysql
    sudo apt-get install mysql-server
    sudo apt-get install libmysql-java
    ln -s /usr/share/java/mysql-connector-java.jar $HIVE_HOME/lib/mysql-connector-java.jar
 
4,导入数据
    $ mysql -u root -p
    mysql> CREATE DATABASE metastore;
    mysql> USE metastore;
    mysql> SOURCE $HIVE_HOME/scripts/metastore/upgrade/mysql/hive-schema-3.0.0.mysql.sql;
    mysql> CREATE USER 'hive'@'%' IDENTIFIED BY 'hive'; 
    mysql> GRANT all on *.* to 'hive'@localhost identified by 'hive';
    mysql> flush privileges;
 
5,配置hive 环境 (/home/hive/conf)
  cp hive-env.sh.template hive-env.sh   
  vim hive-env.sh
      export HADOOP_HOME=/home/hadoop
      export HIVE_CONF_DIR=/home/hive/conf
 
  cp hive-default.xml.template hive-site.xml
  vim hive-site.xml (配置路径与mysql)
     <property>
        <name>system:java.io.tmpdir</name>
        <value>/user/hive/warehouse</value>
      </property>
      <property>
        <name>system:user.name</name>
        <value>${user.name}</value>
      </property>
     <property>
        <name>hive.metastore.db.type</name>
        <value>mysql</value>
     </property>
     <property>
        <name>javax.jdo.option.ConnectionURL</name>
        <value>jdbc:mysql://localhost:3306/metastore?createDatabaseIfNotExist=true</value>
     </property>
     <property>
        <name>javax.jdo.option.ConnectionDriverName</name>
        <value>com.mysql.jdbc.Driver</value>
     </property>
    <property>
      <name>javax.jdo.option.ConnectionUserName</name>
      <value>hive</value>
      <description>user name for connecting to mysql server</description>
    </property>
    <property>
      <name>javax.jdo.option.ConnectionPassword</name>
      <value>hive</value>
      <description>password for connecting to mysql server</description>
    </property>
 
6, 创建临时目录
    $HADOOP_HOME/bin/hadoop fs -mkdir -p /tmp
    $HADOOP_HOME/bin/hadoop fs -mkdir -p /user/hive/warehouse
    $HADOOP_HOME/bin/hadoop fs -chmod g+w   /tmp
    $HADOOP_HOME/bin/hadoop fs -chmod g+w   /user/hive/warehouse
 
7,初始化hive
    schematool -dbType mysql -initSchema
 
8,启动 metastore服务 (不启用会报:HiveException java.lang.RuntimeException: Unable to instantiate org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient。)
    ./hive --service metastore &
 
9,进入Hive
    $HIVE_HOME/bin/hive
    #创建表
    hive (default)> CREATE TABLE IF NOT EXISTS test_table
                 (col1 int COMMENT 'Integer Column',
                     col2 string COMMENT 'String Column'
                 )
                 COMMENT 'This is test table'
                 ROW FORMAT DELIMITED
                 FIELDS TERMINATED BY ','
                 STORED AS TEXTFILE;
 
    hive (default)> show tables;
        tab_name
        test_table
 
    #写入
    hive (default)> insert into test_table values(1,'aaa');
        MapReduce Jobs Launched: 
        Stage-Stage-1: Map: 1  Reduce: 1   Cumulative CPU: 5.54 sec   HDFS Read: 15408 HDFS Write: 243 SUCCESS
        Total MapReduce CPU Time Spent: 5 seconds 540 msec
        OK
        col1    col2
        Time taken: 26.271 seconds
      
    #查询
    hive (default)select * from test_table;
        test_table.col1    test_table.col2
            2    bbb
            3    ccc
            4    ddd
        Time taken: 0.205 seconds, Fetched: 3 row(s)
 
10,had001 jps
    root@had001:/home/hive# jps
        6675 SecondaryNameNode
        6426 NameNode
        6908 ResourceManager
        8382 Jps
11,had002,had003 jps
    root@had002:~# jps
        3300 DataNode
        3430 NodeManager
        5610 Jps
 
    #查看是否能连接had001
    root@had002:~# /home/hadoop/bin/hdfs dfsadmin -report
    root@had003:~# /home/hadoop/bin/hdfs dfsadmin -report
 
 
    #正常有data目录
    root@had002:~# tree /usr/local/hadoop/tmp
    /usr/local/hadoop/tmp
    ├── dfs
    │   └── data
    │       ├── current
    │       │   ├── BP-1834162669-172.17.252.52-1532682436448
    │       │   │   ├── current
    │       │   │   │   ├── finalized
 
        
12,错误
    1,Exception in thread "main" java.lang.RuntimeException: com.ctc.wstx.exc.WstxParsingException: Illegal character entity: expansion character (code 0x8
 at [row,col,system-id]: [3213,96,"file:/home/appleyuchi/apache-hive-3.0.0-bin/conf/hive-site.xml"]
    解决:
    /home/appleyuchi/apache-hive-3.0.0-bin/conf/hive-site.xml
    上面的第3213行,第96个字符是非法字符,注释掉就行了
 
    2,hadoop cluder could only be written to 0 of the 1 minReplication nodes
    原因是had002,had003连不了had001

 

参考:

https://www.jianshu.com/p/6aa63f6c83e7

https://blog.csdn.net/w116858389/article/details/81254221

 

posted @ 2019-03-06 16:58  逐梦客!  阅读(847)  评论(0)    收藏  举报