下载地址:http://mirror.bit.edu.cn/apache/hive/hive-3.1.2/apache-hive-3.1.2-bin.tar.gz
# 拷贝安装包进Master容器
docker cp apache-hive-3.1.2-bin.tar.gz Master:/usr/local
# 进入容器
docker exec -it Master bash
cd /usr/local/
# 解压安装包
tar xvf apache-hive-3.1.2-bin.tar.gz
root@Master:/usr/local/apache-hive-3.1.2-bin/conf# vim hive-site.xml
在最前面添加下面配置:
<property> <name>system:java.io.tmpdir</name> <value>/tmp/hive/java</value> </property> <property> <name>system:user.name</name> <value>${user.name}</value> </property>cp hive-env.sh.template hive-env.sh
export HIVE_CONF_DIR=/usr/local/apache-hive-3.1.2-bin/conf
export HIVE_AUX_JARS_PATH=/usr/local/apache-hive-3.1.2-bin/lib
vim /etc/profile
文本最后添加
export HIVE_HOME="/usr/local/apache-hive-3.1.2-bin"
export PATH=$PATH:$HIVE_HOME/bin
重新加载配置
source /etc/profile
配置mysql作为元数据库
如果mysql已经有了 这里就不用安装了 直接用现成的
如果访问宿主机用IP 172.19.0.1
如果没有现安装一个,回宿主机
#拉取镜像 docker pull mysql:8:0.18 #建立容器 docker run --name mysql_hive -p 3306:3306 --net hadoop --ip 172.19.0.5 -v /root/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=abc123456 -d mysql:8.0.18 #进入容器 docker exec -it mysql_hive bash #进入myslq mysql -uroot -p #密码上面建立容器时候已经设置abc123456 #建立hive数据库 create database hive; #修改远程连接权限 ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'abc123456';
回去Master容器,修改关联数据库的配置
docker exec -it Master bash
vim /usr/local/apache-hive-3.1.2-bin/conf/hive-site.xml
搜索关键词修改数据库url、驱动、用户名,url根据上面建容器时候地址。
#还请注意hive配置文件里面使用&作为分隔,高版本myssql需要SSL验证,在这里设置关闭 <property> <name>javax.jdo.option.ConnectionUserName</name> <value>root</value> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>abc123456</value> </property> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://172.19.0.5:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value> </property> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <property> <name>hive.metastore.schema.verification</name> <value>false</value> <property><property> <name>hive.metastore.event.db.notification.api.auth</name> <value>false</value> </property>
mysql驱动上传到hive的lib下
docker cp mysql-connector-java-8.0.30.jar Master:/usr/local/apache-hive-3.1.2-bin/lib
jar包配置修改
对hive的lib文件夹下的部分文件做修改,不然初始化数据库的时候会报错
#slf4j这个包hadoop及hive两边只能有一个,这里删掉hive这边
root@Master:/usr/local/apache-hive-3.1.2-bin/lib# rm log4j-slf4j-impl-2.10.0.jar
#guava这个包hadoop及hive两边只删掉版本低的那个,把版本高的复制过去,这里删掉hive,复制hadoop的过去
root@Master:/usr/local/hadoop/share/hadoop/common/lib# cp guava-27.0-jre.jar /usr/local/apache-hive-3.1.2-bin/lib
root@Master:/usr/local/hadoop/share/hadoop/common/lib# rm /usr/local/apache-hive-3.1.2-bin/lib/guava-19.0.jar
#把文件hive-site.xml第3223行前后的特殊字符删除 description 中的
root@Master: vim /usr/local/apache-hive-3.1.2-bin/conf/hive-site.xml
<property>
<name>hive.txn.xlock.iow</name>
<value>true</value>
<description>
Ensures commands with OVERWRITE (such as INSERT OVERWRITE) acquire Exclusive locks for transactional tables. This ensures that inserts (w/o overwrite) running concurrently
are not hidden by the INSERT OVERWRITE.
</description>
</property>
否则下一步会报错
Exception in thread "main" java.lang.RuntimeException: com.ctc.wstx.exc.WstxParsingException: Illegal character entity: expansion character (code 0x8
at [row,col,system-id]: [3223,96,"file:/usr/local/apache-hive-3.1.2-bin/conf/hive-site.xml"]
at org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:3024)
at org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:2973)
at org.apache.hadoop.conf.Configuration.getProps(Configuration.java:2848)
at org.apache.hadoop.conf.Configuration.get(Configuration.java:1460)
at org.apache.hadoop.hive.conf.HiveConf.getVar(HiveConf.java:4996)
at org.apache.hadoop.hive.conf.HiveConf.getVar(HiveConf.java:5069)
at org.apache.hadoop.hive.conf.HiveConf.initialize(HiveConf.java:5156)
at org.apache.hadoop.hive.conf.HiveConf.<init>(HiveConf.java:5104)
at org.apache.hive.beeline.HiveSchemaTool.<init>(HiveSchemaTool.java:96)
at org.apache.hive.beeline.HiveSchemaTool.main(HiveSchemaTool.java:1473)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:323)
at org.apache.hadoop.util.RunJar.main(RunJar.java:236)
初始化元数据库
root@Master:/usr/local/apache-hive-3.1.2-bin/bin# schematool -initSchema -dbType mysql
成功后提示:
Metastore connection URL: jdbc:mysql://172.19.0.5:3306/hive?createDatabaseIfNotExist=true&useSSL=false
Metastore Connection Driver : com.mysql.jdbc.Driver
Metastore connection User: root
Starting metastore schema initialization to 3.1.0
Initialization script hive-schema-3.1.0.mysql.sql
Initialization script completed
schemaTool completed
刚才的mysql库上应该已经创建好表了
验证一下
我们先创建一个数据文件放到/usr/local下
cd /usr/local
vim test.txt
1,zhang
2,li
3,wang
进入hive交互界面
root@Master:/usr/local# hive
找不到命令的话 source /etc/profile
Hive Session ID = 7bec2ab6-e06d-4dff-8d53-a64611875aeb
Logging initialized using configuration in jar:file:/usr/local/apache-hive-3.1.2-bin/lib/hive-common-3.1.2.jar!/hive-log4j2.properties Async: true
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
Hive Session ID = 5cdee915-5c95-4834-bd3b-bec6f3d90e5b
hive> create table test(
> id int
> ,name string
> )
> row format delimited
> fields terminated by ',';
OK
Time taken: 1.453 seconds
hive> load data local inpath '/usr/local/test.txt' into table test;
Loading data to table default.test
OK
Time taken: 0.63 seconds
hive> select * from test;
OK
1 zhang
2 li
3 wang
Time taken: 1.611 seconds, Fetched: 3 row(s)
还是在Master中
一、修改hadoop的一些权限配置
root@Master:/usr/local# vim /usr/local/hadoop/etc/hadoop/core-site.xml
加入以下配置
<property> <name>hadoop.proxyuser.root.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.root.groups</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.root.users</name> <value>*</value> </property>
重启hdfs:
root@Master:/usr/local/hadoop/sbin# ./stop-dfs.sh
Stopping namenodes on [Master]
Stopping datanodes
Stopping secondary namenodes [Master]
root@Master:/usr/local/hadoop/sbin# ./start-dfs.sh
Starting namenodes on [Master]
Starting datanodes
Starting secondary namenodes [Master]
二、启动hiverserver2
到hive/bin目录下
/usr/local/apache-hive-3.1.2-bin/bin#
./hiveserver2
或后台启动
nohup hiveserver2 >/dev/null 2>/dev/null &
如果提示Cannot find hadoop installation: $HADOOP_HOME or $HADOOP_PREFIX must be set or hadoop must be in the path
source /etc/profile
查看端口号 netstat -ntulp |grep 10000
!connect jdbc:hive2://localhost:10000/default
输入帐号root
密码root
查询
select * from test;