第4次实践作业

(1)使用Docker-compose实现Tomcat+Nginx负载均衡

  • nginx反向代理原理

  • 反向代理(Reverse Proxy)方式是指以代理服务器来接受internet上的连接请求,然后将请求转发给内部网络上的服务器,并将从服务器上得到的结果返回给internet上请求连接的客户端,此时代理服务器对外就表现为一个服务器(原始服务器对于客户端是透明的)。而正向代理(Forward Proxy),客户端请求时要指定原始服务器,代理再向原始服务器转交请求并将获得的内容返回给客户端。

  • ngix代理tomcat集群,代理两个以上tomcat

    ①拉取tomcat和nginx镜像

    ②创建如下项目结构

    ③编辑各文件

    docker-compose.yml

    version: "3.8"
    services:
        nginx:
            image: nginx
            container_name: tn_nginx
            ports:
                - 8080:80
            volumes:
                - ./nginx/default.conf:/etc/nginx/conf.d/default.conf # 挂载配置文件
            depends_on:
                - tomcat01
                - tomcat02
                - tomcat03
    
        tomcat01:
            hostname: tomcat01
            image: tomcat
            container_name: tomcat1
            volumes:
                - ./webapps:/usr/local/tomcat/webapps/ROOT # 挂载web目录
    
    
        tomcat02:
            hostname: tomcat02
            image: tomcat
            container_name: tomcat2
            volumes:
                - ./webapps:/usr/local/tomcat/webapps/ROOT # 挂载web目录
    
    
        tomcat03:
            hostname: tomcat03
            image: tomcat
            container_name: tomcat3
            volumes:
                - ./webapps:/usr/local/tomcat/webapps/ROOT # 挂载web目录
    

    default.conf

    upstream tomcats {
            server tomcat1:8080 max_fails=3 fail_timeout=30s;
            server tomcat2:8080 max_fails=3 fail_timeout=30s;
            server tomcat3:8080 max_fails=3 fail_timeout=30s;
    }
    
    server {
            listen 80;
            server_name localhost server;
    
            location / {
                    proxy_set_header  X-Real-IP $remote_addr;
                    proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;
                    proxy_set_header  Host $http_host;
                    proxy_pass http://tomcats;
                    proxy_redirect off;
            }
    }
    

    index.jsp

    <%@ page language="java" contentType="text/html; charset=utf-8"  import="java.net.InetAddress"
        pageEncoding="utf-8"%>
    <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
    <html>
    <head>
    <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
    <title>Nginx+Tomcat负载均衡</title>
    </head>
    <body>
         <%
             InetAddress addr = InetAddress.getLocalHost();
             out.println("主机名:"+addr.getHostName());
          %>
    </body>
    </html>
    

    browse.sh

    for((i=1;i<=15;i++));
    do
    content=$(curl -s localhost:8080);
    echo ${content#*<body>};
    done
    

    ④负载均衡配置

    • 在不修改配置文件的情况下,默认为轮询访问,即三个tomcat轮流访问

    • 修改default.conf中的upstream为如下

      upstream tomcats { # 三个服务器权重为1:2:3
              server tomcat1:8080 weight=1 max_fails=3 fail_timeout=30s;
              server tomcat2:8080 weight=2 max_fails=3 fail_timeout=30s;
              server tomcat3:8080 weight=3 max_fails=3 fail_timeout=30s;
      }
      

      重新构建后再执行browse.sh

      发现访问概率 tomcat3 > tomcat2 > tomcat1

(2)使用Docker-compose部署javaweb运行环境

①项目结构

②编辑配置文件

docker-compose.yml

version: "3.8"
services:
    mysql:
        image: mysql_sp
        container_name: spring_mysql
        build:
            context: ./mysql
            dockerfile: Dockerfile
        volumes:
            - ./mysql/setup.sh:/mysql/setup.sh
            - ./mysql/schema.sql:/mysql/schema.sql
            - ./mysql/privileges.sql:/mysql/privileges.sql
        ports:
            - 8083:3306  # 方便在外部查询

    nginx:
        image: nginx
        container_name: tn_nginx
        ports:
            - 8082:80
        volumes:
            - ./nginx/default.conf:/etc/nginx/conf.d/default.conf # 挂载配置文件
        depends_on:
            - tomcat01
            - tomcat02
            - tomcat03

    tomcat01:
        hostname: tomcat01
        image: tomcat
        container_name: tomcat1
        volumes:
            - ./webapps:/usr/local/tomcat/webapps # 挂载web目录


    tomcat02:
        hostname: tomcat02
        image: tomcat
        container_name: tomcat2
        volumes:
            - ./webapps:/usr/local/tomcat/webapps # 挂载web目录


    tomcat03:
        hostname: tomcat03
        image: tomcat
        container_name: tomcat3
        volumes:
            - ./webapps:/usr/local/tomcat/webapps # 挂载web目录

Dockerfile

FROM mysql:5.7

#允许免密登录
ENV MYSQL_ALLOW_EMPTY_PASSWORD yes

#设置root密码
ENV MYSQL_ROOT_PASSWORD admin


#设置容器启动时执行的命令
CMD ["sh","/mysql/setup.sh"]

#设置暴露端口
EXPOSE 3306

setup.sh

#!/bin/bash
set -e

#查看mysql服务的状态,方便调试,这条语句可以删除
echo `service mysql status`

echo '1.启动mysql....'
#启动mysql
service mysql start
sleep 3
echo `service mysql status`

echo '2.开始导入数据....'
#导入数据
mysql < /mysql/schema.sql
echo '3.导入数据完毕....'

sleep 3
echo `service mysql status`

#增加用户docker
mysql < /mysql/privileges.sql
echo '成功添加用户spring'

#sleep 3
echo `service mysql status`
echo `mysql容器启动完毕,且数据导入成功`

tail -f /dev/null

schema.sql

create database `springtest` default character set utf8 collate utf8_general_ci;

use springtest;

DROP TABLE IF EXISTS `_User`;
CREATE TABLE `_User`  (
  `userId` int(11) NOT NULL AUTO_INCREMENT,
  `userName` char(10) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
  `userSex` char(2) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,
  `contactType` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '联系类型:
QQ/TEL',
  `contactDetail` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '具体号码
',
  `openid` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '微信唯一标识码',
  `grade` varchar(255) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL COMMENT '所在年级',
  `creditIndex` int(255) NOT NULL DEFAULT 60 COMMENT '默认值60,完成任务时增加',
  PRIMARY KEY (`userId`) USING BTREE,
  UNIQUE INDEX `UserName`(`userName`) USING BTREE,
  UNIQUE INDEX `openid`(`openid`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 182 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Compact;  

privileges.sql

use mysql;
select host, user from user;
create user spring identified by '123456';
-- 将springtest数据库的权限授权给创建的用户spring,密码为123456:
grant all on springtest.* to spring@'%' identified by '123456' with grant option;
-- 这一条命令一定要有:
flush privileges;

③打包springboot项目成ROOT.war包并移动至webapps下.由于原来写过相关项目,稍微修改下连接数据库的信息(要和构建mysql时一致)并去除tomcat即可用(如下)

war包参考

④运行docker-compose up -d --build指令

**⑤查看启动日志docker logs tomcat1**

docker logs spring_mysql

⑥使用postman进行注册接口的测试

⑦在navicat中能够查到对应信息(或进入容器查询)

⑧配置反向代理和负载均衡

default.conf

upstream tomcats {
        server tomcat1:8080 weight=1 max_fails=3 fail_timeout=30s;
        server tomcat2:8080 weight=2 max_fails=3 fail_timeout=30s;
        server tomcat3:8080 weight=3 max_fails=3 fail_timeout=30s;
}

server {
        listen 80;
        server_name localhost server;

        location / {
                proxy_set_header  X-Real-IP $remote_addr;
                proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;
                proxy_set_header  Host $http_host;
                proxy_pass http://tomcats;
                proxy_redirect off;
        }
}

(3)使用Docker搭建大数据集群环境

一、相关环境配置

①安装Ubuntu

docker pull ubuntu

②运行镜像并与本地/home/admin/build文件夹共享

③Ubuntu系统初始化,安装所需软件

apt-get update && apt-get install vim && apt-get install ssh

④设置sshd自启 vim ~/.bashrc,在文末添加

/etc/init.d/ssh start

⑤配置sshd

ssh-keygen -t rsa && cat id_rsa.pub >> authorized_keys
  • 使用ssh localhost测试免密登录

⑥安装JDK8

apt-get install openjdk-8-jdk

⑦配置环境变量

编辑~/.bashrc在最后输入如下内容:

export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
export PATH=$PATH:$JAVA_HOME/bin

⑧保存镜像文件**

二、Hadoop的安装

由于版本较新,不适合所给教程,配置参考此文

①下载hadoop-3.2.1.tar.gz至宿主机的/home/admin/build

②进入容器执行

cd /root/build
tar -zxvf hadoop-3.2.1.tar.gz -C /usr/local
mv hadoop3.2.1 hadoop	#重命名
vim ~/.bashrc 中加入如下
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$HADOOP_HOME/bin 
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
  • 验证hadoop指令

③修改hadoop_env.sh加入如下内容

export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/

④修改/usr/local/hadoop/etc/hadoop下的文件

  • core-site.xml

    <configuration>
        <property>
            <name>hadoop.tmp.dir</name>
            <value>file:/usr/local/hadoop/tmp</value>
            <description>Abase for other temporary directories.</description>
        </property>
        <property>
            <name>fs.defaultFS</name>
            <value>hdfs://master:9000</value>
        </property>
    </configuration>
    

    复制时可能会缩进错乱,解决方法 先:set paste进入paste模式后再进行复制粘贴

  • hdfs-site.xml

    <configuration>
        <property>
            <name>dfs.replication</name>
            <value>3</value>
        </property>
        <property>
            <name>dfs.name.dir</name>
            <value>/usr/local/hadoop/hdfs/name</value>
        </property>
        <property>
            <name>dfs.data.dir</name>
            <value>/usr/local/hadoop/hdfs/data</value>
        </property>
    </configuration>
    
  • mapred-site.xml

    <configuration>
      <property>
          <name>mapreduce.framework.name</name>
          <value>yarn</value>
      </property>
      <property>
        <name>mapreduce.application.classpath</name>
        <value>$HADOOP_HOME/share/hadoop/mapreduce/*:$HADOOP_HOME/share/hadoop/mapreduce/lib/*</value>
      </property>
    </configuration>
    
  • yarn-site.xml

    <configuration>
    <!-- Site specific YARN configuration properties -->
        <property>
            <name>yarn.nodemanager.aux-services</name>
            <value>mapreduce_shuffle</value>
        </property>
        <property>
                <name>yarn.resourcemanager.hostname</name>
                <value>master</value>
        </property>
        <property>
            <name>yarn.nodemanager.env-whitelist</name>
            <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME</value>
        </property>
    </configuration>
    

⑤保存该镜像

docker commit c8a ubuntu/hadoopinstalled

⑥开启三个容器运行ubuntu/hadoopinstalled镜像

# 第一个终端
docker run -itd -h master --name master ubuntu/hadoopinstalled bash
# 第二个终端
docker run -itd -h slave01 --name slave01 ubuntu/hadoopinstalled bash
# 第三个终端
docker run -itd -h slave02 --name slave02 ubuntu/hadoopinstalled bash

⑦通过/etc/hosts分别获取三个容器的ip地址,发现为如下

172.18.0.2	master
172.18.0.3	slave01
172.18.0.4	slave02

将上述复制到三个容器的/etc/hosts中,并用ssh slave01ssh slave02验证免密登录

⑧在etc/hadoop/workers添加slave01slave02

⑨在master终端上,进入/usr/local/hadoop并执行如下命令:

hadoop3.0以上需要修改一些文件才能正常运行,参考

bin/hdfs namenode -format
sbin/start-all.sh

三、运行Hadoop实例程序

  • 放置输入文件
./bin/hdfs dfs -mkdir -p /user/root/input &&
./bin/hdfs dfs -put ./etc/hadoop/*.xml input &&
./bin/hdfs dfs -ls /user/root/input

  • 执行实例程序

    ./bin/hadoop jar ./share/hadoop/mapreduce/hadoop-mapreduce-examples-3.2.1.jar grep input output 'dfs[a-z.]+'
    

  • 获取输出内容

    hdfs dfs -cat output/*
    

(4)总结及踩过的坑

  • 合计时间
    • 两个晚上+一个下午,15小时左右
    • Springboot在部署到tomcat上时较为曲折,比如引用了阿里的druid时怎么都启动不了,后来只好删去
    • Hadoop中,由于教程较为古老,很多配置不适合最新版本。在hdfs端的user最好与master端相同(也就是root),否则相对路径可能找不到
  • 配置文件及程序包已在实验过程中给出
posted @ 2020-05-15 17:47  Dicky99  阅读(298)  评论(0编辑  收藏  举报