1、实现haproxy+keepalived高可用集群转发
2、tomcat session cluster的实现

-----------------------------------------------------------------------------------------------------------------

1 实现haproxy+keepalived高可用集群转发

1.1 环境

Keepalived主节点、HAProxy 1为同一主机
Keepalived从节点、HAProxy 2为同一主机

1.2 Keepalived主节点配置

global_defs {
   ...
   vrrp_iptables
}

vrrp_instance test_web {
    state MASTER
    interface eth0
    virtual_router_id 41
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1q2w3e4r
    }
    virtual_ipaddress {
        10.0.0.248/24 dev eth0 label eth0:0
    }
}

1.3 Keepalived从节点配置

global_defs {
   ...
   vrrp_iptables
}

vrrp_instance test_web {
    state BACKUP
    interface eth0
    virtual_router_id 41
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1q2w3e4r
    }
    virtual_ipaddress {
        10.0.0.248/24 dev eth0 label eth0:0
    }
}

1.4 HAProxy 1、2配置

listen test_web
 bind 10.0.0.248:80
 mode tcp
 balance roundrobin
 server web1 10.0.0.27:80 weight 1 check inter 3s fall 3 rise 5
 server web2 10.0.0.37:80 weight 1 check inter 3s fall 3 rise 5
 
#HAProxy 1、2修改内核参数
# echo 'net.ipv4.ip_nonlocal_bind = 1' >> /etc/sysctl.conf        #允许监听非本机地址
# sysctl -p /etc/sysctl.conf

2 tomcat session cluster的实现

2.1 实现 session 复制集群

2.1.1 环境

10.0.0.7     haproxy: 1.5.18
10.0.0.27    tomcat1: 8.5.85    JDK: 1.8.0_191
10.0.0.37    tomcat2: 8.5.85    JDK: 1.8.0_191

2.1.2 haproxy 配置

# tail -n6 /etc/haproxy/haproxy.cfg 
listen test_web
 bind 10.0.0.7:80
 mode http
 balance roundrobin
 server web1 10.0.0.27:8080 weight 1 check inter 3s fall 3 rise 5
 server web2 10.0.0.37:8080 weight 2 check inter 3s fall 3 rise 5

2.1.3 tomcat server.xml 配置

#更改tomcat1 server.xml文件,在<Engine>下新增<Cluster>,新增自定义站点<Host>
# vi /usr/local/tomcat/conf/server.xml
    ......
    <Engine name="Catalina" defaultHost="localhost">
        <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"
                 channelSendOptions="8">

          <Manager className="org.apache.catalina.ha.session.DeltaManager"
                   expireSessionsOnShutdown="false"
                   notifyListenersOnReplication="true"/>

          <Channel className="org.apache.catalina.tribes.group.GroupChannel">
            <Membership className="org.apache.catalina.tribes.membership.McastService"
                        address="230.100.100.7"        #相同组播地址端口为同一组
                        port="45564"
                        frequency="500"
                        dropTime="3000"/>
            <Receiver className="org.apache.catalina.tribes.transport.nio.NioReceiver"
                      address="10.0.0.27"    #单播复制,默认为auto,须更改为本机可对外通信地址,避免绑定127.0.0.1    
                      port="4000"
                      autoBind="100"
                      selectorTimeout="5000"
                      maxThreads="6"/>

            <Sender className="org.apache.catalina.tribes.transport.ReplicationTransmitter">
              <Transport className="org.apache.catalina.tribes.transport.nio.PooledParallelSender"/>
            </Sender>
            <Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"/>
            <Interceptor className="org.apache.catalina.tribes.group.interceptors.MessageDispatchInterceptor"/>
          </Channel>

          <Valve className="org.apache.catalina.ha.tcp.ReplicationValve"
                 filter=""/>
          <Valve className="org.apache.catalina.ha.session.JvmRouteBinderValve"/>

          <Deployer className="org.apache.catalina.ha.deploy.FarmWarDeployer"
                    tempDir="/tmp/war-temp/"
                    deployDir="/tmp/war-deploy/"
                    watchDir="/tmp/war-listen/"
                    watchEnabled="false"/>

          <ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener"/>
        </Cluster>
        ......
      <Host name="www.testou.com"  appBase="/data/testdir"
            unpackWARs="true" autoDeploy="true">
      </Host>
      ......

===================================================================================

#更改tomcat2 server.xml文件,在<Engine>下新增<Cluster>,新增自定义站点<Host>
# vi /usr/local/tomcat/conf/server.xml
    ......
    <Engine name="Catalina" defaultHost="localhost">
        <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"
                 channelSendOptions="8">

          <Manager className="org.apache.catalina.ha.session.DeltaManager"
                   expireSessionsOnShutdown="false"
                   notifyListenersOnReplication="true"/>

          <Channel className="org.apache.catalina.tribes.group.GroupChannel">
            <Membership className="org.apache.catalina.tribes.membership.McastService"
                        address="230.100.100.7"        #相同组播地址端口为同一组
                        port="45564"
                        frequency="500"
                        dropTime="3000"/>
            <Receiver className="org.apache.catalina.tribes.transport.nio.NioReceiver"
                      address="10.0.0.37"    #单播复制,默认为auto,须更改为本机可对外通信地址,避免绑定127.0.0.1
                      port="4000"
                      autoBind="100"
                      selectorTimeout="5000"
                      maxThreads="6"/>

            <Sender className="org.apache.catalina.tribes.transport.ReplicationTransmitter">
              <Transport className="org.apache.catalina.tribes.transport.nio.PooledParallelSender"/>
            </Sender>
            <Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"/>
            <Interceptor className="org.apache.catalina.tribes.group.interceptors.MessageDispatchInterceptor"/>
          </Channel>

          <Valve className="org.apache.catalina.ha.tcp.ReplicationValve"
                 filter=""/>
          <Valve className="org.apache.catalina.ha.session.JvmRouteBinderValve"/>

          <Deployer className="org.apache.catalina.ha.deploy.FarmWarDeployer"
                    tempDir="/tmp/war-temp/"
                    deployDir="/tmp/war-deploy/"
                    watchDir="/tmp/war-listen/"
                    watchEnabled="false"/>

          <ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener"/>
        </Cluster>
        ......
      <Host name="www.testou.com"  appBase="/data/testdir"
            unpackWARs="true" autoDeploy="true">
      </Host>
      ......

2.1.4 在 tomcat1/2 上创建 web.xml 文件

#拷备web.xml全局文件,在倒数第2行添加<distributable/>
# mkdir -p /data/testdir/ROOT/WEB-INF
# cp /usr/local/tomcat/conf/web.xml /data/testdir/ROOT/WEB-INF
# vi /data/testdir/ROOT/WEB-INF/web.xml
 ......
   <distributable/>
 </web-app>

2.1.5  在 tomcat1/2 上创建测试资源

# cat /data/testdir/ROOT/index.jsp
<%@ page import="java.util.*" %>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>lbjsptest</title>
</head>
<body>
<div>On <%=request.getServerName() %></div>
<div><%=request.getLocalAddr() + ":" + request.getLocalPort() %></div>
<div>SessionID = <span style="color:blue"><%=session.getId() %></span></div>
<%=new Date()%>
</body>
</html>

2.1.6 测试

#root身份启动tomcat,实验过程中以java用户启动tomcat,访问报错500和404
#使用浏览器访问http://www.testou.com/,刷新切换到不同real_server,查看session_id是否固定不变

2.2 实现 session server

2.2.1 sticky 模式

2.2.1.1 原理

查询时,如果tomcat自身内存查得到session将直接使用,如果查不到会连接到memcached查询;
当http请求结束时,tomcat存储主session,并将备session存储到memcached

2.2.1.2 环境

10.0.0.7     haproxy: 1.5.18
10.0.0.27    tomcat1: 8.5.85    JDK: 1.8.0_191 memcached: 1.4.15
10.0.0.37    tomcat2: 8.5.85    JDK: 1.8.0_191 memcached: 1.4.15

2.2.1.3 haproxy 配置

# tail -n6 /etc/haproxy/haproxy.cfg
listen test_web
 bind 10.0.0.7:80
 mode tcp
 balance roundrobin
 server web1 10.0.0.27:8080 weight 1 check inter 3s fall 3 rise 5
 server web2 10.0.0.37:8080 weight 1 check inter 3s fall 3 rise 5

2.2.1.4 tomcat1/2 server.xml 配置

#更改tomcat1/2 server.xml文件,在<Engine>下新增自定义站点<Host>
# vi /usr/local/tomcat/conf/server.xml
  ......
    <Engine name="Catalina" defaultHost="localhost">
      <Host name="www.testou.com"  appBase="/data/testdir"
            unpackWARs="true" autoDeploy="true">
      </Host>
    </Engine>
  ......

2.2.1.5 在 tomcat1/2 context.xml 配置

#在tomcat1 context.xml文件中添加
# vi /usr/local/tomcat/conf/context.xml
<Context>
...
<Manager className="de.javakaffee.web.msm.MemcachedBackupSessionManager"
memcachedNodes="n1:10.0.0.27:11211,n2:10.0.0.37:11211"
failoverNodes="n1"
requestUriIgnorePattern=".*\.(ico|png|gif|jpg|css|js)$"
transcoderFactoryClass="de.javakaffee.web.msm.serializer.kryo.KryoTranscoderFactory"
/>
</Context>

#在tomcat2 context.xml文件中添加
# vi /usr/local/tomcat/conf/context.xml
<Context>
...
<Manager className="de.javakaffee.web.msm.MemcachedBackupSessionManager"
memcachedNodes="n1:10.0.0.27:11211,n2:10.0.0.37:11211"
failoverNodes="n2"
requestUriIgnorePattern=".*\.(ico|png|gif|jpg|css|js)$"
transcoderFactoryClass="de.javakaffee.web.msm.serializer.kryo.KryoTranscoderFactory"
/>
</Context>

2.2.1.6 将memcache管理端、驱动程序、序列与反序列化jar包上传到tomcat对应目录

# ls /usr/local/tomcat/lib/{asm-5.2.jar,kryo-3.0.3.jar,kryo-serializers-0.45.jar,minlog-1.3.1.jar,msm-kryo-serializer-2.3.2.jar,objenesis-2.6.jar,reflectasm-1.11.9.jar,jedis-3.0.0.jar,memcached-session-manager-2.3.2.jar,memcached-session-manager-tc8-2.3.2.jar,spymemcached-2.12.3.jar}
/usr/local/tomcat/lib/asm-5.2.jar
/usr/local/tomcat/lib/jedis-3.0.0.jar
/usr/local/tomcat/lib/kryo-3.0.3.jar
/usr/local/tomcat/lib/kryo-serializers-0.45.jar
/usr/local/tomcat/lib/memcached-session-manager-2.3.2.jar
/usr/local/tomcat/lib/memcached-session-manager-tc8-2.3.2.jar
/usr/local/tomcat/lib/minlog-1.3.1.jar
/usr/local/tomcat/lib/msm-kryo-serializer-2.3.2.jar
/usr/local/tomcat/lib/objenesis-2.6.jar
/usr/local/tomcat/lib/reflectasm-1.11.9.jar
/usr/local/tomcat/lib/spymemcached-2.12.3.jar

2.2.1.7 在 tomcat1/2 上创建测试资源

# mkdir -p /data/testdir/ROOT
# cat /data/testdir/ROOT/index.jsp
<%@ page import="java.util.*" %>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>lbjsptest</title>
</head>
<body>
<div>On <%=request.getServerName() %></div>
<div><%=request.getLocalAddr() + ":" + request.getLocalPort() %></div>
<div>SessionID = <span style="color:blue"><%=session.getId() %></span></div>
<%=new Date()%>
</body>
</html>

2.2.1.8 测试

#使用浏览器访问http://www.testou.com/,刷新切换到不同real_server,查看session_id是否固定不变

2.2.2 non-sticky 模式

2.2.2.1 原理

#msm 1.4.0之后开始支持non-sticky模式

#原理:
 tomcat产生session后同时发送给memcached主备节点,自身不保存session
 memcache node1下线时,node2转正;node1上线时,node2依旧为主节点

2.2.2.2 环境

10.0.0.7     haproxy: 1.5.18
10.0.0.27    tomcat1: 8.5.85    JDK: 1.8.0_191  memcached: 1.4.15
10.0.0.37    tomcat2: 8.5.85    JDK: 1.8.0_191  memcached: 1.4.15

2.2.2.3 memcached配置

#安装后直接启动
# yum -y install memcached
# systemctl enable --now memcached

2.2.2.4 haproxy 配置

# tail -n6 /etc/haproxy/haproxy.cfg
listen test_web
 bind 10.0.0.7:80
 mode tcp
 balance roundrobin
 server web1 10.0.0.27:8080 weight 1 check inter 3s fall 3 rise 5
 server web2 10.0.0.37:8080 weight 1 check inter 3s fall 3 rise 5

2.2.2.5 tomcat1/2 server.xml 配置

#更改tomcat1/2 server.xml文件,在<Engine>下新增自定义站点<Host>
# vi /usr/local/tomcat/conf/server.xml
  ......
    <Engine name="Catalina" defaultHost="localhost">
      <Host name="www.testou.com"  appBase="/data/testdir"
            unpackWARs="true" autoDeploy="true">
      </Host>
    </Engine>
  ......

2.2.2.6 在 tomcat1/2 context.xml 配置

#在tomcat1/2 context.xml文件中添加
# vi /usr/local/tomcat/conf/context.xml
<Context>
...
  <Manager className="de.javakaffee.web.msm.MemcachedBackupSessionManager"
    memcachedNodes="n1:10.0.0.27:11211,n2:10.0.0.37:11211"
    sticky="false"
    sessionBackupAsync="false"
    lockingMode="uriPattern:/path1|/path2"
    requestUriIgnorePattern=".*\.(ico|png|gif|jpg|css|js)$"
    transcoderFactoryClass="de.javakaffee.web.msm.serializer.kryo.KryoTranscoderFactory"
    />
</Context>

2.2.2.7 将memcache管理端、驱动程序、序列与反序列化jar包上传到tomcat对应目录

# ls /usr/local/tomcat/lib/{asm-5.2.jar,kryo-3.0.3.jar,kryo-serializers-0.45.jar,minlog-1.3.1.jar,msm-kryo-serializer-2.3.2.jar,objenesis-2.6.jar,reflectasm-1.11.9.jar,jedis-3.0.0.jar,memcached-session-manager-2.3.2.jar,memcached-session-manager-tc8-2.3.2.jar,spymemcached-2.12.3.jar}
/usr/local/tomcat/lib/asm-5.2.jar
/usr/local/tomcat/lib/jedis-3.0.0.jar
/usr/local/tomcat/lib/kryo-3.0.3.jar
/usr/local/tomcat/lib/kryo-serializers-0.45.jar
/usr/local/tomcat/lib/memcached-session-manager-2.3.2.jar
/usr/local/tomcat/lib/memcached-session-manager-tc8-2.3.2.jar
/usr/local/tomcat/lib/minlog-1.3.1.jar
/usr/local/tomcat/lib/msm-kryo-serializer-2.3.2.jar
/usr/local/tomcat/lib/objenesis-2.6.jar
/usr/local/tomcat/lib/reflectasm-1.11.9.jar
/usr/local/tomcat/lib/spymemcached-2.12.3.jar

2.2.2.8 在 tomcat1/2 上创建测试资源

# mkdir -p /data/testdir/ROOT
# cat /data/testdir/ROOT/index.jsp
<%@ page import="java.util.*" %>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>lbjsptest</title>
</head>
<body>
<div>On <%=request.getServerName() %></div>
<div><%=request.getLocalAddr() + ":" + request.getLocalPort() %></div>
<div>SessionID = <span style="color:blue"><%=session.getId() %></span></div>
<%=new Date()%>
</body>
</html>

2.2.2.9 测试

#使用浏览器访问http://www.testou.com/,刷新切换到不同real_server,查看session_id是否固定不变
posted on 2023-03-07 18:00  不期而至  阅读(25)  评论(0)    收藏  举报