Kubernetes 环境运行微服务及实现链路追踪【自定义K8S服务-Zookeeper集群 + 自定义K8S服务-Dubbo Provider + 自定义K8S服务-Dubbo Consumer +自定义服务-Dubboadmin】

一、服务部署

1.1 配置注册中心

1.1.1 配置pv

#配置NFS
mkdir -p /data/k8sdata/xks/zookeeper-datadir-skywalking-1
mkdir -p /data/k8sdata/xks/zookeeper-datadir-skywalking-2
mkdir -p /data/k8sdata/xks/zookeeper-datadir-skywalking-3

[root@xianchaomaster1 pv]# vim /etc/exports
/data/k8sdata *(rw,no_root_squash)

[root@xianchaomaster1 pv]# exportfs -arv
exporting *:/data/k8sdata

#配置yaml
[root@xianchaomaster1 pv]# vim zookeeper-persistentvolume.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-skywalking-pv-1
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.40.180
    path: /data/k8sdata/xks/zookeeper-datadir-skywalking-1
  mountOptions:
    - nfsvers=3

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-skywalking-pv-2
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.40.180
    path: /data/k8sdata/xks/zookeeper-datadir-skywalking-2
  mountOptions:
    - nfsvers=3

---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: zookeeper-datadir-skywalking-pv-3
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 192.168.40.180
    path: /data/k8sdata/xks/zookeeper-datadir-skywalking-3
  mountOptions:
    - nfsvers=3


[root@xianchaomaster1 pv]# kubectl get pv
zookeeper-datadir-skywalking-pv-1   10Gi       RWO            Retain           Available                                                             19s
zookeeper-datadir-skywalking-pv-2   10Gi       RWO            Retain           Available                                                             19s
zookeeper-datadir-skywalking-pv-3   10Gi       RWO            Retain           Available                                                             19s

#配置PVC
[root@xianchaomaster1 pv]# vim zookeeper-persistentvolumeclaim.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-skywalking-pvc-1
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-skywalking-pv-1
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-skywalking-pvc-2
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-skywalking-pv-2
  resources:
    requests:
      storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: zookeeper-datadir-skywalking-pvc-3
spec:
  accessModes:
    - ReadWriteOnce
  volumeName: zookeeper-datadir-skywalking-pv-3
  resources:
    requests:
      storage: 10Gi

[root@xianchaomaster1 pv]# kubectl apply -f zookeeper-persistentvolumeclaim.yaml
persistentvolumeclaim/zookeeper-datadir-skywalking-pvc-1 created
persistentvolumeclaim/zookeeper-datadir-skywalking-pvc-2 created
persistentvolumeclaim/zookeeper-datadir-skywaling-pvc-3 created
[root@xianchaomaster1 pv]# kubectl get pvc
NAME                                 STATUS   VOLUME                              CAPACITY   ACCESS MODES   STORAGECLASS   AGE
zookeeper-datadir-skywalking-pvc-3    Bound    zookeeper-datadir-skywalking-pv-3   10Gi       RWO                           6s
zookeeper-datadir-skywalking-pvc-1   Bound    zookeeper-datadir-skywalking-pv-1   10Gi       RWO                           6s
zookeeper-datadir-skywalking-pvc-2   Bound    zookeeper-datadir-skywalking-pv-2   10Gi       RWO                           6s

1.1.2 配置zookeeper.yaml

#node1\node2 下载镜像
docker pull registry.cn-hangzhou.aliyuncs.com/zhangshijie/zookeeper:v3.4.14

#编写yaml文件
[root@xianchaomaster1 zookeeper]# cat zookeeper.yaml
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
spec:
  ports:
    - name: client
      port: 2181
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper1
spec:
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 32181
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "1"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper2
spec:
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 32182
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "2"
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper3
spec:
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 32183
    - name: followers
      port: 2888
    - name: election
      port: 3888
  selector:
    app: zookeeper
    server-id: "3"
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper1
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "1"
    spec:
      #imagePullSecrets:
      #- name: jcr-pull-secret
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir: {}
          #emptyDir:
          #  medium: Memory
      containers:
        - name: server
          image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/zookeeper:v3.4.14
          imagePullPolicy: IfNotPresent
          env:
            - name: MYID
              value: "1"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-skywalking-pvc-1
      volumes:
        - name: zookeeper-datadir-skywalking-pvc-1
          persistentVolumeClaim:
            claimName: zookeeper-datadir-skywalking-pvc-1
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper2
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "2"
    spec:
      #imagePullSecrets:
      #- name: jcr-pull-secret
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir: {}
          #emptyDir:
          #  medium: Memory
      containers:
        - name: server
          image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/zookeeper:v3.4.14
          imagePullPolicy: IfNotPresent
          env:
            - name: MYID
              value: "2"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-skywalking-pvc-2
      volumes:
        - name: zookeeper-datadir-skywalking-pvc-2
          persistentVolumeClaim:
            claimName: zookeeper-datadir-skywalking-pvc-2
---
kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  name: zookeeper3
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper
  template:
    metadata:
      labels:
        app: zookeeper
        server-id: "3"
    spec:
      #imagePullSecrets:
      #- name: jcr-pull-secret
      volumes:
        - name: data
          emptyDir: {}
        - name: wal
          emptyDir: {}
          #emptyDir:
          #  medium: Memory
      containers:
        - name: server
          image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/zookeeper:v3.4.14
          imagePullPolicy: IfNotPresent
          env:
            - name: MYID
              value: "3"
            - name: SERVERS
              value: "zookeeper1,zookeeper2,zookeeper3"
            - name: JVMFLAGS
              value: "-Xmx2G"
          ports:
            - containerPort: 2181
            - containerPort: 2888
            - containerPort: 3888
          volumeMounts:
          - mountPath: "/zookeeper/data"
            name: zookeeper-datadir-skywalking-pvc-3
      volumes:
        - name: zookeeper-datadir-skywalking-pvc-3
          persistentVolumeClaim:
           claimName: zookeeper-datadir-skywalking-pvc-3

#应用yaml文件
[root@xianchaomaster1 zookeeper]# kubectl apply -f zookeeper.yaml
service/zookeeper created
service/zookeeper1 created
service/zookeeper2 created
service/zookeeper3 created
deployment.apps/zookeeper1 created
deployment.apps/zookeeper2 created
deployment.apps/zookeeper3 created

[root@xianchaomaster1 zookeeper]# kubectl get pods
NAME                              READY   STATUS                  RESTARTS   AGE
zookeeper1-5d9b4645f8-vd5lk       1/1     Running                 0          4s
zookeeper2-9686b95bf-ghpzm        1/1     Running                 0          4s
zookeeper3-66f55c8548-fk4ww       1/1     Running                 0          4s

[root@xianchaomaster1 zookeeper]# kubectl exec -it zookeeper3-66f55c8548-fk4ww bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
bash-4.3# /zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
ZooKeeper remote JMX Port set to 9010
ZooKeeper remote JMX authenticate set to false
ZooKeeper remote JMX ssl set to false
ZooKeeper remote JMX log4j set to true
Using config: /zookeeper/bin/../conf/zoo.cfg
Mode: leader
bash-4.3#

[root@xianchaomaster1 zookeeper]# kubectl get svc
NAME          TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
zookeeper     ClusterIP   10.106.6.117     <none>        2181/TCP                                       2m58s
zookeeper1    NodePort    10.103.59.98     <none>        2181:32181/TCP,2888:30830/TCP,3888:31244/TCP   2m58s
zookeeper2    NodePort    10.96.144.29     <none>        2181:32182/TCP,2888:32695/TCP,3888:30422/TCP   2m58s
zookeeper3    NodePort    10.96.3.83       <none>        2181:32183/TCP,2888:30400/TCP,3888:30712/TCP   2m58s

2.部署provider

制作镜像

node1\node2分别放
#registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-consumer:v1

【一】
[root@xianchaonode1 centos]# pwd
/root/skywalking/file/dockerfile/system/centos

[root@xianchaonode1 centos]# ll
total 31848
-rw-r--r-- 1 root root      334 Apr 23 12:49 build-command.sh
-rw-r--r-- 1 root root      464 Apr 23 12:49 Dockerfile
-rw-r--r-- 1 root root 32600353 Apr 23 12:49 filebeat-7.12.1-x86_64.rpm

#1.build-command.sh
[root@xianchaonode1 centos]# cat build-command.sh
#!/bin/bash
docker build -t  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009 .
#docker push registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009

#nerdctl build -t  harbor.magedu.net/baseimages/centos-base:7.9.2009 .
#nerdctl push harbor.magedu.net/baseimages/centos-base:7.9.2009

#2.Dockerfile
[root@xianchaonode1 centos]# cat Dockerfile
#自定义Centos 基础镜像
FROM centos:7.9.2009
MAINTAINER xiaks 807722920@qq.com

ADD filebeat-7.12.1-x86_64.rpm /tmp
RUN  yum makecache fast

RUN yum install -y /tmp/filebeat-7.12.1-x86_64.rpm vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop &&  rm -rf /etc/localtime /tmp/filebeat-7.12.1-x86_64.rpm && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime  && useradd nginx -u 2022

#3.filebeat download:filebeat-7.12.1-x86_64.rpm

# docker pull centos:7.9.2009
# bash build-command.sh
  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009
  
【二】
#registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212

[root@xianchaonode1 jdk-1.8.212]# pwd
/root/skywalking/file/dockerfile/web/pub-images/jdk-1.8.212
[root@xianchaonode1 jdk-1.8.212]# ll
total 190456
-rw-r--r-- 1 root root       329 Apr 23 12:49 build-command.sh
-rw-r--r-- 1 root root       398 Apr 23 12:49 Dockerfile
-rw-r--r-- 1 root root 195013152 Apr 23 12:49 jdk-8u212-linux-x64.tar.gz
-rw-r--r-- 1 root root      2105 Apr 23 12:49 profile

#1.build-command.sh
[root@xianchaonode1 jdk-1.8.212]# cat build-command.sh
#!/bin/bash
docker build -t registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212  .
sleep 1
#docker push  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212

#nerdctl build -t harbor.magedu.net/baseimages/jdk-base:v8.212  .
#sleep 1
#nerdctl push  harbor.magedu.net/baseimages/jdk-base:v8.212

#2.Dockerfile
[root@xianchaonode1 jdk-1.8.212]# cat Dockerfile
#JDK Base Image
FROM  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009

MAINTAINER xiaks "807722920@qq.com"

ADD jdk-8u212-linux-x64.tar.gz /usr/local/src/
RUN ln -sv /usr/local/src/jdk1.8.0_212 /usr/local/jdk
ADD profile /etc/profile

ENV JAVA_HOME /usr/local/jdk
ENV JRE_HOME $JAVA_HOME/jre
ENV CLASSPATH $JAVA_HOME/lib/:$JRE_HOME/lib/
ENV PATH $PATH:$JAVA_HOME/bin

#3.profile
[root@xianchaonode1 jdk-1.8.212]# cat profile
# /etc/profile

# System wide environment and startup programs, for login setup
# Functions and aliases go in /etc/bashrc

# It's NOT a good idea to change this file unless you know what you
# are doing. It's much better to create a custom.sh shell script in
# /etc/profile.d/ to make custom changes to your environment, as this
# will prevent the need for merging in future updates.

pathmunge () {
    case ":${PATH}:" in
        *:"$1":*)
            ;;
        *)
            if [ "$2" = "after" ] ; then
                PATH=$PATH:$1
            else
                PATH=$1:$PATH
            fi
    esac
}


if [ -x /usr/bin/id ]; then
    if [ -z "$EUID" ]; then
        # ksh workaround
        EUID=`/usr/bin/id -u`
        UID=`/usr/bin/id -ru`
    fi
    USER="`/usr/bin/id -un`"
    LOGNAME=$USER
    MAIL="/var/spool/mail/$USER"
fi

# Path manipulation
if [ "$EUID" = "0" ]; then
    pathmunge /usr/sbin
    pathmunge /usr/local/sbin
else
    pathmunge /usr/local/sbin after
    pathmunge /usr/sbin after
fi

HOSTNAME=`/usr/bin/hostname 2>/dev/null`
HISTSIZE=1000
if [ "$HISTCONTROL" = "ignorespace" ] ; then
    export HISTCONTROL=ignoreboth
else
    export HISTCONTROL=ignoredups
fi

export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL

# By default, we want umask to get set. This sets it for login shell
# Current threshold for system reserved uid/gids is 200
# You could check uidgid reservation validity in
# /usr/share/doc/setup-*/uidgid file
if [ $UID -gt 199 ] && [ "`/usr/bin/id -gn`" = "`/usr/bin/id -un`" ]; then
    umask 002
else
    umask 022
fi

for i in /etc/profile.d/*.sh /etc/profile.d/sh.local ; do
    if [ -r "$i" ]; then
        if [ "${-#*i}" != "$-" ]; then
            . "$i"
        else
            . "$i" >/dev/null
        fi
    fi
done

unset i
unset -f pathmunge
export LANG=en_US.UTF-8
export HISTTIMEFORMAT="%F %T `whoami` "

export JAVA_HOME=/usr/local/jdk
export TOMCAT_HOME=/apps/tomcat
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$TOMCAT_HOME/bin:$PATH
export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar

#4.下载 jdk-8u212-linux-x64.tar.gz

# bash build-command.sh
  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212
  
【三】
[root@xianchaonode1 provider]# pwd
/root/skywalking/file/dockerfile/web/myserver/dubbo/provider
[root@xianchaonode1 provider]# ll
total 1681860
-rw-r--r--  1 root root   28982315 Apr 23 12:49 apache-skywalking-java-agent-8.8.0.tgz
-rw-r--r--  1 root root        331 Apr 23 12:49 build-command.sh
-rw-r--r--  1 root root        462 Apr 23 12:49 Dockerfile
-rw-r--r--  1 root root   14301703 Apr 23 12:48 dubbo-server.jar
-rwxr-xr-x  1 root root        396 Apr 23 12:48 run_java.sh
drwxr-xr-x 10 root root        221 Apr 23 12:48 skywalking-agent

#1.build-command.sh
[root@xianchaonode1 provider]# cat build-command.sh
#!/bin/bash
docker build -t registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-provider:v1  .
sleep 1
#docker push harbor.magedu.net/magedu/dubbo-provider:v1

#nerdctl build -t harbor.magedu.net/magedu/dubbo-provider:v1-2022092301-zookeeper1  .
#sleep 1
#nerdctl push harbor.magedu.net/magedu/dubbo-provider:v1-2022092301-zookeeper1

#2.Dockerfile
[root@xianchaonode1 provider]# cat Dockerfile
#Dubbo provider
FROM registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212

MAINTAINER xiaks

RUN yum install file nc -y && useradd user1 -u 2000
RUN mkdir -p /apps/dubbo/provider
ADD dubbo-server.jar  /apps/dubbo/provider/
ADD run_java.sh /apps/dubbo/provider/bin/
ADD skywalking-agent/ /skywalking-agent/
RUN chown user1.user1 /apps /skywalking-agent -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]

#3.run_java.sh
[root@xianchaonode1 provider]# cat run_java.sh
#!/bin/bash
#/usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat  &
#su - user1 -c "java -jar /apps/dubbo/provider/dubbo-server.jar"
su - user1 -c "java -javaagent:/skywalking-agent/skywalking-agent.jar -jar /apps/dubbo/provider/dubbo-server.jar"
#tail -f /etc/hosts
[root@xianchaonode1 provider]# chmod +x run_java.sh

#4.skywalking-agent/config/agent.config
[root@xianchaonode1 provider]# vim skywalking-agent/config/agent.config
    agent.namespace=${SW_AGENT_NAMESPACE:k8s-dubbo}
    agent.service_name=${SW_AGENT_NAME:k8s-dubbo-provider}
    collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:192.168.40.182:11800}

#5.dubbo-server.jar
修改配置:config.properties文件 指向 k8s中 的 zookeeper1、zookeeper2、zookeeper3
    dubbo.registry=zookeeper://zookeeper1.default.svc.cluster.local:2181?backup=zookeeper2.default.svc.cluster.local:2181,zookeeper3.default.svc.cluster.local:2181
    #dubbo.registry=zookeeper://zookeeper1:2181?backup=zookeeper2:2181,zookeeper3:2181

#6.apache-skywalking-java-agent-8.8.0.tgz :skywalking java agent安装包

# bash built-command.sh
  registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-provider:v1

编写provider.yaml文件

#检测service是否能够解析
[root@xianchaomaster1 provider]# kubectl exec -it zookeeper1-5d9b4645f8-vd5lk bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Name:      zookeeper1.default.svc.cluster.local
Address 1: 10.103.59.98 zookeeper1.default.svc.cluster.local
bash-4.3# nslookup zookeeper2.default.svc.cluster.local
nslookup: can't resolve '(null)': Name does not resolve

Name:      zookeeper2.default.svc.cluster.local
Address 1: 10.96.144.29 zookeeper2.default.svc.cluster.local
bash-4.3# nslookup zookeeper3.default.svc.cluster.local
nslookup: can't resolve '(null)': Name does not resolve

Name:      zookeeper3.default.svc.cluster.local
Address 1: 10.96.3.83 zookeeper3.default.svc.cluster.local
bash-4.3#


#配置provider.yaml
[root@xianchaomaster1 provider]# vim provider.yaml
[root@xianchaomaster1 provider]# cat provider.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-provider
  name: myserver-provider-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-provider
  template:
    metadata:
      labels:
        app: myserver-provider
    spec:
      containers:
      - name: myserver-provider-container
        #image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/dubboadmin:v2.5.3-2022092301-zookeeper1
        #image: harbor.magedu.net/magedu/dubbo-provider:v1-2022092301-zookeeper1
        image: registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-provider:v1
        imagePullPolicy: IfNotPresent
        #imagePullPolicy: Always
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-provider
  name: myserver-provider-spec
spec:
  type: NodePort
  ports:
  - name: http
    port: 8080
    protocol: TCP
    targetPort: 8080
    nodePort: 30800
  selector:
    app: myserver-provider

[root@xianchaomaster1 provider]# kubectl apply -f provider.yaml
deployment.apps/myserver-provider-deployment created
service/myserver-provider-spec created
[root@xianchaomaster1 provider]# kubectl get pods
NAME                                            READY   STATUS                  RESTARTS   AGE
myserver-provider-deployment-54f5fdc479-lvzs8   1/1     Running                 0          5s

[root@xianchaomaster1 provider]# kubectl logs -f myserver-provider-deployment-54f5fdc479-lvzs8
DEBUG 2023-04-23 13:06:38:759 main AgentPackagePath : The beacon class location is jar:file:/skywalking-agent/skywalking-agent.jar!/org/apache/skywalking/apm/agent/core/boot/AgentPackagePath.class.
INFO 2023-04-23 13:06:38:761 main SnifferConfigInitializer : Config file found in /skywalking-agent/config/agent.config.

  .   ____          _            __ _ _
 /\\ / ___'_ __ _ _(_)_ __  __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
 \\/  ___)| |_)| | | | | || (_| |  ) ) ) )
  '  |____| .__|_| |_|_| |_\__, | / / / /
 =========|_|==============|___/=/_/_/_/
 :: Spring Boot ::        (v1.3.1.RELEASE)

2023-04-23 13:06:46.327  INFO 8 --- [           main] com.od.dubbotest.Application             : Starting Application v0.0.1-SNAPSHOT on myserver-provider-deployment-54f5fdc479-lvzs8 with PID 8 (/apps/dubbo/provider/dubbo-server.jar started by user1 in /home/user1)
2023-04-23 13:06:46.330  INFO 8 --- [           main] com.od.dubbotest.Application             : No active profile set, falling back to default profiles: default
2023-04-23 13:06:46.371  INFO 8 --- [           main] o.s.b.f.xml.XmlBeanDefinitionReader      : Loading XML bean definitions from URL [jar:file:/apps/dubbo/provider/dubbo-server.jar!/spring-config.xml]
log4j:WARN No appenders could be found for logger (com.alibaba.dubbo.common.logger.LoggerFactory).
log4j:WARN Please initialize the log4j system properly.
log4j:WARN See http://logging.apache.org/log4j/1.2/faq.html#noconfig for more info.
2023-04-23 13:06:47.307  INFO 8 --- [           main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@4bf88b3c: startup date [Sun Apr 23 13:06:47 CST 2023]; root of context hierarchy
2023-04-23 13:06:48.562  INFO 8 --- [           main] o.s.b.f.c.PropertyPlaceholderConfigurer  : Loading properties file from class path resource [config.properties]
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:host.name=myserver-provider-deployment-54f5fdc479-lvzs8
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.version=1.8.0_212
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.vendor=Oracle Corporation
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.home=/usr/local/src/jdk1.8.0_212/jre
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.class.path=/apps/dubbo/provider/dubbo-server.jar:/skywalking-agent/skywalking-agent.jar
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2023-04-23 13:06:49.760  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.io.tmpdir=/tmp
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.compiler=<NA>
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:os.name=Linux
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:os.arch=amd64
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:os.version=3.10.0-957.el7.x86_64
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:user.name=user1
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:user.home=/home/user1
2023-04-23 13:06:49.761  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:user.dir=/home/user1
2023-04-23 13:06:49.762  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=zookeeper1.default.svc.cluster.local:2181,zookeeper2.default.svc.cluster.local:2181,zookeeper3.default.svc.cluster.local:2181 sessionTimeout=30000 watcher=org.I0Itec.zkclient.ZkClient@4d20cda7
2023-04-23 13:06:49.863  INFO 8 --- [ter.local:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server zookeeper1.default.svc.cluster.local/10.103.59.98:2181. Will not attempt to authenticate using SASL (unknown error)
2023-04-23 13:06:49.863  INFO 8 --- [ter.local:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to zookeeper1.default.svc.cluster.local/10.103.59.98:2181, initiating session
2023-04-23 13:06:49.884  INFO 8 --- [ter.local:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server zookeeper1.default.svc.cluster.local/10.103.59.98:2181, sessionid = 0x100150781930002, negotiated timeout = 30000
2023-04-23 13:06:50.439  INFO 8 --- [           main] o.s.j.e.a.AnnotationMBeanExporter        : Registering beans for JMX exposure on startup
2023-04-23 13:06:50.542  INFO 8 --- [           main] com.od.dubbotest.Application             : Started Application in 6.283 seconds (JVM running for 11.86)
Dubbo server started
Dubbo 服务端已经启动

3.部署consumer

制作镜像

【一】
node1\node2分别放
#registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-consumer:v1

[root@xianchaonode1 centos]# pwd
/root/skywalking/file/dockerfile/system/centos

[root@xianchaonode1 centos]# ll
total 31848
-rw-r--r-- 1 root root      334 Apr 23 12:49 build-command.sh
-rw-r--r-- 1 root root      464 Apr 23 12:49 Dockerfile
-rw-r--r-- 1 root root 32600353 Apr 23 12:49 filebeat-7.12.1-x86_64.rpm

#1.build-command.sh
[root@xianchaonode1 centos]# cat build-command.sh
#!/bin/bash
docker build -t  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009 .
#docker push registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009

#nerdctl build -t  harbor.magedu.net/baseimages/centos-base:7.9.2009 .
#nerdctl push harbor.magedu.net/baseimages/centos-base:7.9.2009

#2.Dockerfile
[root@xianchaonode1 centos]# cat Dockerfile
#自定义Centos 基础镜像
FROM centos:7.9.2009
MAINTAINER xiaks 807722920@qq.com

ADD filebeat-7.12.1-x86_64.rpm /tmp
RUN  yum makecache fast

RUN yum install -y /tmp/filebeat-7.12.1-x86_64.rpm vim wget tree  lrzsz gcc gcc-c++ automake pcre pcre-devel zlib zlib-devel openssl openssl-devel iproute net-tools iotop &&  rm -rf /etc/localtime /tmp/filebeat-7.12.1-x86_64.rpm && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime  && useradd nginx -u 2022

#3.filebeat download:filebeat-7.12.1-x86_64.rpm

# docker pull centos:7.9.2009
# bash build-command.sh
  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009
  
【二】
#registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212

[root@xianchaonode1 jdk-1.8.212]# pwd
/root/skywalking/file/dockerfile/web/pub-images/jdk-1.8.212
[root@xianchaonode1 jdk-1.8.212]# ll
total 190456
-rw-r--r-- 1 root root       329 Apr 23 12:49 build-command.sh
-rw-r--r-- 1 root root       398 Apr 23 12:49 Dockerfile
-rw-r--r-- 1 root root 195013152 Apr 23 12:49 jdk-8u212-linux-x64.tar.gz
-rw-r--r-- 1 root root      2105 Apr 23 12:49 profile

#1.build-command.sh
[root@xianchaonode1 jdk-1.8.212]# cat build-command.sh
#!/bin/bash
docker build -t registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212  .
sleep 1
#docker push  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212

#nerdctl build -t harbor.magedu.net/baseimages/jdk-base:v8.212  .
#sleep 1
#nerdctl push  harbor.magedu.net/baseimages/jdk-base:v8.212

#2.Dockerfile
[root@xianchaonode1 jdk-1.8.212]# cat Dockerfile
#JDK Base Image
FROM  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-centos-base:7.9.2009

MAINTAINER xiaks "807722920@qq.com"

ADD jdk-8u212-linux-x64.tar.gz /usr/local/src/
RUN ln -sv /usr/local/src/jdk1.8.0_212 /usr/local/jdk
ADD profile /etc/profile

ENV JAVA_HOME /usr/local/jdk
ENV JRE_HOME $JAVA_HOME/jre
ENV CLASSPATH $JAVA_HOME/lib/:$JRE_HOME/lib/
ENV PATH $PATH:$JAVA_HOME/bin

#3.profile
[root@xianchaonode1 jdk-1.8.212]# cat profile
# /etc/profile

# System wide environment and startup programs, for login setup
# Functions and aliases go in /etc/bashrc

# It's NOT a good idea to change this file unless you know what you
# are doing. It's much better to create a custom.sh shell script in
# /etc/profile.d/ to make custom changes to your environment, as this
# will prevent the need for merging in future updates.

pathmunge () {
    case ":${PATH}:" in
        *:"$1":*)
            ;;
        *)
            if [ "$2" = "after" ] ; then
                PATH=$PATH:$1
            else
                PATH=$1:$PATH
            fi
    esac
}


if [ -x /usr/bin/id ]; then
    if [ -z "$EUID" ]; then
        # ksh workaround
        EUID=`/usr/bin/id -u`
        UID=`/usr/bin/id -ru`
    fi
    USER="`/usr/bin/id -un`"
    LOGNAME=$USER
    MAIL="/var/spool/mail/$USER"
fi

# Path manipulation
if [ "$EUID" = "0" ]; then
    pathmunge /usr/sbin
    pathmunge /usr/local/sbin
else
    pathmunge /usr/local/sbin after
    pathmunge /usr/sbin after
fi

HOSTNAME=`/usr/bin/hostname 2>/dev/null`
HISTSIZE=1000
if [ "$HISTCONTROL" = "ignorespace" ] ; then
    export HISTCONTROL=ignoreboth
else
    export HISTCONTROL=ignoredups
fi

export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL

# By default, we want umask to get set. This sets it for login shell
# Current threshold for system reserved uid/gids is 200
# You could check uidgid reservation validity in
# /usr/share/doc/setup-*/uidgid file
if [ $UID -gt 199 ] && [ "`/usr/bin/id -gn`" = "`/usr/bin/id -un`" ]; then
    umask 002
else
    umask 022
fi

for i in /etc/profile.d/*.sh /etc/profile.d/sh.local ; do
    if [ -r "$i" ]; then
        if [ "${-#*i}" != "$-" ]; then
            . "$i"
        else
            . "$i" >/dev/null
        fi
    fi
done

unset i
unset -f pathmunge
export LANG=en_US.UTF-8
export HISTTIMEFORMAT="%F %T `whoami` "

export JAVA_HOME=/usr/local/jdk
export TOMCAT_HOME=/apps/tomcat
export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$TOMCAT_HOME/bin:$PATH
export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar

#4.下载 jdk-8u212-linux-x64.tar.gz

# bash build-command.sh
  registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212
  
【三】
[root@xianchaonode1 consumer]# pwd
/root/skywalking/file/dockerfile/web/myserver/dubbo/consumer
[root@xianchaonode1 consumer]# ll
total 1709968
-rw-r--r--  1 root root   28982315 Apr 23 12:49 apache-skywalking-java-agent-8.8.0.tgz
-rw-r--r--  1 root root        296 Apr 23 13:14 build-command.sh
-rw-r--r--  1 root root        478 Apr 23 13:14 Dockerfile
-rw-r--r--  1 root root   21677483 Apr 23 13:18 dubbo-client.jar
-rwxr-xr-x  1 root root        442 Apr 23 12:49 run_java.sh
drwxr-xr-x 10 root root        221 Apr 23 12:49 skywalking-agent

#1.build-command.sh
[root@xianchaonode1 consumer]# cat build-command.sh
#!/bin/bash
TAG=$1
docker build -t registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-consumer:v1  .
sleep 1
#docker push harbor.magedu.net/magedu/dubbo-consumer:${TAG}
#nerdctl build -t harbor.magedu.net/magedu/dubbo-consumer:${TAG}  .
#nerdctl push harbor.magedu.net/magedu/dubbo-consumer:${TAG}

#2.Dockerfile
[root@xianchaonode1 consumer]# cat Dockerfile
#Dubbo consumer
FROM registry.cn-hangzhou.aliyuncs.com/birkhoff/skywalking-jdk-base:v8.212

MAINTAINER xiaks

RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer &&  useradd user1 -u 2000
ADD run_java.sh /apps/dubbo/consumer/bin/
ADD skywalking-agent/ /skywalking-agent/
ADD dubbo-client.jar   /apps/dubbo/consumer/dubbo-client.jar
RUN chown user1.user1 /apps /skywalking-agent -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh

CMD ["/apps/dubbo/consumer/bin/run_java.sh"]

#3.run_java.sh
[root@xianchaonode1 consumer]# cat run_java.sh
#!/bin/bash
#echo "nameserver 223.6.6.6" > /etc/resolv.conf
#/usr/share/filebeat/bin/filebeat -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat  &
#su - user1 -c "java -jar /apps/dubbo/consumer/dubbo-client.jar"
su - user1 -c "java -javaagent:/skywalking-agent/skywalking-agent.jar -jar /apps/dubbo/consumer/dubbo-client.jar"
#tail -f /etc/hosts
[root@xianchaonode1 consumer]# chmod +x run_java.sh

#4.skywalking-agent/config/agent.config
[root@xianchaonode1 consumer]# vim skywalking-agent/config/agent.config
    agent.namespace=${SW_AGENT_NAMESPACE:k8s-dubbo}
    agent.service_name=${SW_AGENT_NAME:k8s-dubbo-consumer}
    collector.backend_service=${SW_AGENT_COLLECTOR_BACKEND_SERVICES:192.168.40.182:11800}

#5.dubbo-client.jar
修改配置:config.properties文件 指向 k8s中 的 zookeeper1、zookeeper2、zookeeper3
    dubbo.registry=zookeeper://zookeeper1.default.svc.cluster.local:2181?backup=zookeeper2.default.svc.cluster.local:2181,zookeeper3.default.svc.cluster.local:2181
    #dubbo.registry=zookeeper://zookeeper1:2181?backup=zookeeper2:2181,zookeeper3:2181

#6.apache-skywalking-java-agent-8.8.0.tgz :skywalking java agent安装包

# bash built-command.sh
  registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-consumer:v1

编写consumer.yaml文件

 
[root@xianchaomaster1 consumer]# vim consumer.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-consumer
  name: myserver-consumer-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-consumer
  template:
    metadata:
      labels:
        app: myserver-consumer
    spec:
      imagePullSecrets:
      containers:
      - name: myserver-consumer-container
        image: registry.cn-hangzhou.aliyuncs.com/birkhoff/dubbo-consumer:v1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http
[root@xianchaomaster1 consumer]# kubectl apply -f consumer.yaml
deployment.apps/myserver-consumer-deployment created
[root@xianchaomaster1 consumer]# kubectl get pods
NAME                                            READY   STATUS                  RESTARTS   AGE
myserver-consumer-deployment-cf88bd494-jgv4w    1/1     Running                 0          3s
myserver-provider-deployment-54f5fdc479-lvzs8   1/1     Running                 0          14m
zookeeper1-5d9b4645f8-vd5lk                     1/1     Running                 0          99m
zookeeper2-9686b95bf-ghpzm                      1/1     Running                 0          99m
zookeeper3-66f55c8548-fk4ww                     1/1     Running                 0          99m
[root@xianchaomaster1 consumer]# kubectl logs -f myserver-consumer-deployment-cf88bd494-jgv4w
DEBUG 2023-04-23 13:20:41:122 main AgentPackagePath : The beacon class location is jar:file:/skywalking-agent/skywalking-agent.jar!/org/apache/skywalking/apm/agent/core/boot/AgentPackagePath.class.
INFO 2023-04-23 13:20:41:124 main SnifferConfigInitializer : Config file found in /skywalking-agent/config/agent.config.

  .   ____          _            __ _ _
 /\\ / ___'_ __ _ _(_)_ __  __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
 \\/  ___)| |_)| | | | | || (_| |  ) ) ) )
  '  |____| .__|_| |_|_| |_\__, | / / / /
 =========|_|==============|___/=/_/_/_/
 :: Spring Boot ::        (v1.3.1.RELEASE)

2023-04-23 13:20:46.703  INFO 8 --- [           main] com.od.dubbotest.Application             : Starting Application v0.0.1-SNAPSHOT on myserver-consumer-deployment-cf88bd494-jgv4w with PID 8 (/apps/dubbo/consumer/dubbo-client.jar started by user1 in /home/user1)
2023-04-23 13:20:46.706  INFO 8 --- [           main] com.od.dubbotest.Application             : No active profile set, falling back to default profiles: default
2023-04-23 13:20:46.740  INFO 8 --- [           main] o.s.b.f.xml.XmlBeanDefinitionReader      : Loading XML bean definitions from URL [jar:file:/apps/dubbo/consumer/dubbo-client.jar!/spring-config.xml]
2023-04-23 13:20:47.073  INFO 8 --- [           main] c.a.dubbo.common.logger.LoggerFactory    : using logger: com.alibaba.dubbo.common.logger.log4j.Log4jLoggerAdapter
2023-04-23 13:20:47.117  INFO 8 --- [           main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@5824b825: startup date [Sun Apr 23 13:20:47 CST 2023]; root of context hierarchy
2023-04-23 13:20:47.697  INFO 8 --- [           main] o.s.b.f.s.DefaultListableBeanFactory     : Overriding bean definition for bean 'beanNameViewResolver' with a different definition: replacing [Root bean: class [null]; scope=; abstract=false; lazyInit=false; autowireMode=3; dependencyCheck=0; autowireCandidate=true; primary=false; factoryBeanName=org.springframework.boot.autoconfigure.web.ErrorMvcAutoConfiguration$WhitelabelErrorViewConfiguration; factoryMethodName=beanNameViewResolver; initMethodName=null; destroyMethodName=(inferred); defined in class path resource [org/springframework/boot/autoconfigure/web/ErrorMvcAutoConfiguration$WhitelabelErrorViewConfiguration.class]] with [Root bean: class [null]; scope=; abstract=false; lazyInit=false; autowireMode=3; dependencyCheck=0; autowireCandidate=true; primary=false; factoryBeanName=org.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter; factoryMethodName=beanNameViewResolver; initMethodName=null; destroyMethodName=(inferred); defined in class path resource [org/springframework/boot/autoconfigure/web/WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter.class]]
2023-04-23 13:20:47.908  INFO 8 --- [           main] o.s.b.f.c.PropertyPlaceholderConfigurer  : Loading properties file from class path resource [config.properties]
2023-04-23 13:20:48.122  INFO 8 --- [           main] trationDelegate$BeanPostProcessorChecker : Bean 'dubbo-demo-consumer' of type [class com.alibaba.dubbo.config.ApplicationConfig] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2023-04-23 13:20:48.126  INFO 8 --- [           main] trationDelegate$BeanPostProcessorChecker : Bean 'com.alibaba.dubbo.config.RegistryConfig' of type [class com.alibaba.dubbo.config.RegistryConfig] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2023-04-23 13:20:48.127  INFO 8 --- [           main] trationDelegate$BeanPostProcessorChecker : Bean 'helloService' of type [class com.alibaba.dubbo.config.spring.ReferenceBean] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2023-04-23 13:20:48.153  INFO 8 --- [           main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.transaction.annotation.ProxyTransactionManagementConfiguration' of type [class org.springframework.transaction.annotation.ProxyTransactionManagementConfiguration$$EnhancerBySpringCGLIB$$bfdf038c] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying)
2023-04-23 13:20:48.589  INFO 8 --- [           main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat initialized with port(s): 8080 (http)
2023-04-23 13:20:48.601  INFO 8 --- [           main] o.apache.catalina.core.StandardService   : Starting service Tomcat
2023-04-23 13:20:48.601  INFO 8 --- [           main] org.apache.catalina.core.StandardEngine  : Starting Servlet Engine: Apache Tomcat/8.0.30
2023-04-23 13:20:48.675  INFO 8 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring embedded WebApplicationContext
2023-04-23 13:20:48.675  INFO 8 --- [ost-startStop-1] o.s.web.context.ContextLoader            : Root WebApplicationContext: initialization completed in 1560 ms
2023-04-23 13:20:48.922  INFO 8 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean        : Mapping servlet: 'dispatcherServlet' to [/]
2023-04-23 13:20:48.927  INFO 8 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'characterEncodingFilter' to: [/*]
2023-04-23 13:20:48.927  INFO 8 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'hiddenHttpMethodFilter' to: [/*]
2023-04-23 13:20:48.927  INFO 8 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'httpPutFormContentFilter' to: [/*]
2023-04-23 13:20:48.927  INFO 8 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean  : Mapping filter: 'requestContextFilter' to: [/*]
2023-04-23 13:20:49.094  INFO 8 --- [ster.local:2181] org.I0Itec.zkclient.ZkEventThread        : Starting ZkClient event thread.
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:host.name=myserver-consumer-deployment-cf88bd494-jgv4w
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.version=1.8.0_212
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.vendor=Oracle Corporation
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.home=/usr/local/src/jdk1.8.0_212/jre
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.class.path=/apps/dubbo/consumer/dubbo-client.jar:/skywalking-agent/skywalking-agent.jar
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.io.tmpdir=/tmp
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:java.compiler=<NA>
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:os.name=Linux
2023-04-23 13:20:49.098  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:os.arch=amd64
2023-04-23 13:20:49.099  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:os.version=3.10.0-957.el7.x86_64
2023-04-23 13:20:49.099  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:user.name=user1
2023-04-23 13:20:49.099  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:user.home=/home/user1
2023-04-23 13:20:49.099  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Client environment:user.dir=/home/user1
2023-04-23 13:20:49.099  INFO 8 --- [           main] org.apache.zookeeper.ZooKeeper           : Initiating client connection, connectString=zookeeper1.default.svc.cluster.local:2181,zookeeper2.default.svc.cluster.local:2181,zookeeper3.default.svc.cluster.local:2181 sessionTimeout=30000 watcher=org.I0Itec.zkclient.ZkClient@1c0cebdf
2023-04-23 13:20:49.125  INFO 8 --- [           main] org.I0Itec.zkclient.ZkClient             : Waiting for keeper state SyncConnected
2023-04-23 13:20:49.130  INFO 8 --- [ter.local:2181)] org.apache.zookeeper.ClientCnxn          : Opening socket connection to server zookeeper1.default.svc.cluster.local/10.103.59.98:2181. Will not attempt to authenticate using SASL (unknown error)
2023-04-23 13:20:49.130  INFO 8 --- [ter.local:2181)] org.apache.zookeeper.ClientCnxn          : Socket connection established to zookeeper1.default.svc.cluster.local/10.103.59.98:2181, initiating session
2023-04-23 13:20:49.140  INFO 8 --- [ter.local:2181)] org.apache.zookeeper.ClientCnxn          : Session establishment complete on server zookeeper1.default.svc.cluster.local/10.103.59.98:2181, sessionid = 0x100150781930003, negotiated timeout = 30000
2023-04-23 13:20:49.142  INFO 8 --- [ain-EventThread] org.I0Itec.zkclient.ZkClient             : zookeeper state changed (SyncConnected)
2023-04-23 13:20:49.160  INFO 8 --- [           main] c.a.d.r.zookeeper.ZookeeperRegistry      :  [DUBBO] Register: consumer://10.244.121.11/com.od.dubbotest.api.HelloService?application=dubbo-demo-consumer&category=consumers&check=false&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=consumer&timestamp=1682227248993, dubbo version: 2.5.3, current host: 10.244.121.11
2023-04-23 13:20:49.187  INFO 8 --- [           main] c.a.d.r.zookeeper.ZookeeperRegistry      :  [DUBBO] Subscribe: consumer://10.244.121.11/com.od.dubbotest.api.HelloService?application=dubbo-demo-consumer&category=providers,configurators,routers&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=consumer&timestamp=1682227248993, dubbo version: 2.5.3, current host: 10.244.121.11
2023-04-23 13:20:49.217  INFO 8 --- [           main] c.a.d.r.zookeeper.ZookeeperRegistry      :  [DUBBO] Notify urls for subscribe url consumer://10.244.121.11/com.od.dubbotest.api.HelloService?application=dubbo-demo-consumer&category=providers,configurators,routers&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=consumer&timestamp=1682227248993, urls: [dubbo://10.244.121.24:20880/com.od.dubbotest.api.HelloService?anyhost=true&application=dubbo-demo-service&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=provider&timestamp=1682226410459, dubbo://10.244.121.24:20880/com.od.dubbotest.api.HelloService?anyhost=true&application=dubbo-demo-service&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=provider&timestamp=1682226409252, empty://10.244.121.11/com.od.dubbotest.api.HelloService?application=dubbo-demo-consumer&category=configurators&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=consumer&timestamp=1682227248993, empty://10.244.121.11/com.od.dubbotest.api.HelloService?application=dubbo-demo-consumer&category=routers&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=consumer&timestamp=1682227248993], dubbo version: 2.5.3, current host: 10.244.121.11
2023-04-23 13:20:49.365  INFO 8 --- [           main] c.a.d.remoting.transport.AbstractClient  :  [DUBBO] Successed connect to server /10.244.121.24:20880 from NettyClient 10.244.121.11 using dubbo version 2.5.3, channel is NettyChannel [channel=[id: 0x13d5e7f3, /10.244.121.11:36904 => /10.244.121.24:20880]], dubbo version: 2.5.3, current host: 10.244.121.11
2023-04-23 13:20:49.365  INFO 8 --- [           main] c.a.d.remoting.transport.AbstractClient  :  [DUBBO] Start NettyClient myserver-consumer-deployment-cf88bd494-jgv4w/10.244.121.11 connect to the server /10.244.121.24:20880, dubbo version: 2.5.3, current host: 10.244.121.11
2023-04-23 13:20:49.422  INFO 8 --- [           main] com.alibaba.dubbo.config.AbstractConfig  :  [DUBBO] Refer dubbo service com.od.dubbotest.api.HelloService from url zookeeper://zookeeper1.default.svc.cluster.local:2181/com.alibaba.dubbo.registry.RegistryService?anyhost=true&application=dubbo-demo-consumer&check=false&dubbo=2.5.3&interface=com.od.dubbotest.api.HelloService&methods=hello&pid=8&side=consumer&timestamp=1682227248993, dubbo version: 2.5.3, current host: 10.244.121.11
2023-04-23 13:20:49.635  INFO 8 --- [           main] s.w.s.m.m.a.RequestMappingHandlerAdapter : Looking for @ControllerAdvice: org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@5824b825: startup date [Sun Apr 23 13:20:47 CST 2023]; root of context hierarchy
2023-04-23 13:20:49.712  INFO 8 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/bj]}" onto public java.lang.String com.od.dubbotest.action.BjAction.say(java.lang.String)
2023-04-23 13:20:49.713  INFO 8 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/hello]}" onto public java.lang.String com.od.dubbotest.action.HelloAction.say(java.lang.String)
2023-04-23 13:20:49.714  INFO 8 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/error],produces=[text/html]}" onto public org.springframework.web.servlet.ModelAndView org.springframework.boot.autoconfigure.web.BasicErrorController.errorHtml(javax.servlet.http.HttpServletRequest,javax.servlet.http.HttpServletResponse)
2023-04-23 13:20:49.714  INFO 8 --- [           main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/error]}" onto public org.springframework.http.ResponseEntity<java.util.Map<java.lang.String, java.lang.Object>> org.springframework.boot.autoconfigure.web.BasicErrorController.error(javax.servlet.http.HttpServletRequest)
2023-04-23 13:20:49.757  INFO 8 --- [           main] o.s.w.s.handler.SimpleUrlHandlerMapping  : Mapped URL path [/webjars/**] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler]
2023-04-23 13:20:49.757  INFO 8 --- [           main] o.s.w.s.handler.SimpleUrlHandlerMapping  : Mapped URL path [/**] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler]
2023-04-23 13:20:49.811  INFO 8 --- [           main] o.s.w.s.handler.SimpleUrlHandlerMapping  : Mapped URL path [/**/favicon.ico] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler]
2023-04-23 13:20:49.966  INFO 8 --- [           main] o.s.j.e.a.AnnotationMBeanExporter        : Registering beans for JMX exposure on startup
2023-04-23 13:20:50.035  INFO 8 --- [           main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 8080 (http)
2023-04-23 13:20:50.039  INFO 8 --- [           main] com.od.dubbotest.Application             : Started Application in 4.766 seconds (JVM running for 8.981)
Dubbo client started
Dubbo 消费者端启动

You have new mail in /var/spool/mail/root
[root@xianchaomaster1 consumer]# vim service.yaml
--
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-consumer
  name: myserver-consumer-server
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30001
  selector:
    app: myserver-consumer

[root@xianchaomaster1 consumer]# kubectl apply -f service.yaml
service/myserver-consumer-server created
[root@xianchaomaster1 consumer]# kubectl get svc
NAME                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
myserver-consumer-server   NodePort    10.111.221.137   <none>        80:30001/TCP                                   5s
myserver-provider-spec     NodePort    10.110.182.162   <none>        8080:30800/TCP                                 15m
zookeeper                  ClusterIP   10.106.6.117     <none>        2181/TCP                                       100m
zookeeper1                 NodePort    10.103.59.98     <none>        2181:32181/TCP,2888:30830/TCP,3888:31244/TCP   100m
zookeeper2                 NodePort    10.96.144.29     <none>        2181:32182/TCP,2888:32695/TCP,3888:30422/TCP   100m
zookeeper3                 NodePort    10.96.3.83       <none>        2181:32183/TCP,2888:30400/TCP,3888:30712/TCP   100m

二、验证服务访问:30001端口

http://192.168.40.180:30001/hello?name=zaijianwodeairen

查看日志

[root@xianchaomaster1 consumer]# kubectl get pods
NAME                                            READY   STATUS                  RESTARTS   AGE
myserver-consumer-deployment-cf88bd494-jgv4w    1/1     Running                 0          10m
myserver-provider-deployment-54f5fdc479-lvzs8   1/1     Running                 0          24m
zookeeper1-5d9b4645f8-vd5lk                     1/1     Running                 0          110m
zookeeper2-9686b95bf-ghpzm                      1/1     Running                 0          110m
zookeeper3-66f55c8548-fk4ww                     1/1     Running                 0          110m

[root@xianchaomaster1 consumer]# kubectl logs -f myserver-consumer-deployment-cf88bd494-jgv4w
HelloAction接收到请求:poiu
HelloService返回到结果:<h1>这是Dubbo consumer测试页面Version 2</h1><h2>测试调用provider</h2>hello poiu
HelloAction接收到请求:zaijianwodeairen
HelloService返回到结果:<h1>这是Dubbo consumer测试页面Version 2</h1><h2>测试调用provider</h2>hello zaijianwodeairen
HelloAction接收到请求:xks
HelloService返回到结果:<h1>这是Dubbo consumer测试页面Version 2</h1><h2>测试调用provider</h2>hello xks
HelloAction接收到请求:aiqing
HelloService返回到结果:<h1>这是Dubbo consumer测试页面Version 2</h1><h2>测试调用provider</h2>hello aiqing
HelloAction接收到请求:Success
HelloService返回到结果:<h1>这是Dubbo consumer测试页面Version 2</h1><h2>测试调用provider</h2>hello Success

三、验证Skywalking是否监控到链路

 

Dubboadmin

制作镜像

[root@xianchaomaster1 dubboadmin]# pwd
/root/skywalking/skywalking-dubbo_2.5.3-k8s-data_dockerfile-yaml/dockerfile/web/myserver/dubbo/dubboadmin
[root@xianchaomaster1 dubboadmin]# ll
total 54304
-rw-r--r-- 1 root root      259 Sep 23  2022 build-command.sh
-rw-r--r-- 1 root root    22201 Jun 22  2021 catalina.sh
-rw-r--r-- 1 root root      649 Sep 23  2022 Dockerfile
-rw-r--r-- 1 root root 27777987 Sep 23  2022 dubboadmin.war
-rw-r--r-- 1 root root     3436 Jun 22  2021 logging.properties
-rw-r--r-- 1 root root       99 Sep 23  2022 run_tomcat.sh
-rw-r--r-- 1 root root     6427 Jun 22  2021 server.xml

#1.build-command.sh
[root@xianchaonode1 dubboadmin]# cat build-command.sh
#!/bin/bash
docker build -t registry.cn-hangzhou.aliyuncs.com/birkhoff/dubboadmin:v1  .

#2.Dockerfile
[root@xianchaonode1 dubboadmin]# cat Dockerfile
#Dubbo dubboadmin
#FROM harbor.magedu.local/pub-images/tomcat-base:v8.5.43
FROM registry.cn-hangzhou.aliyuncs.com/birkhoff/tomcat-base:v8.5.43

MAINTAINER xiaks "807722920@qq.com"

RUN yum install unzip -y
ADD server.xml /apps/tomcat/conf/server.xml
ADD logging.properties /apps/tomcat/conf/logging.properties
ADD catalina.sh /apps/tomcat/bin/catalina.sh
ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
ADD dubboadmin.war  /data/tomcat/webapps/dubboadmin.war
RUN useradd user1 && cd /data/tomcat/webapps && unzip dubboadmin.war && rm -rf dubboadmin.war && chown -R user1.user1 /data /apps

EXPOSE 8080 8443

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

#3.run_tomcat.sh
[root@xianchaonode1 dubboadmin]# cat run_tomcat.sh
#!/bin/bash

su - user1 -c "/apps/tomcat/bin/catalina.sh start"
su - user1 -c "tail -f /etc/hosts"

#chmod a+x run_tomcat.sh

#4. dubboadmin.war 修改注册中心地址
[root@xianchaomaster1 WEB-INF]# pwd
/root/skywalking/skywalking-dubbo_2.5.3-k8s-data_dockerfile-yaml/dockerfile/web/myserver/dubbo/dubboadmin/dubboadmin/WEB-INF
[root@xianchaomaster1 WEB-INF]# cat dubbo.properties
#dubbo.registry.address=zookeeper://zookeeper1.magedu.svc.magedu.local:2181
dubbo.registry.address=zookeeper://192.168.40.181:2181
dubbo.admin.root.password=root
dubbo.admin.guest.password=guest

#5.logging.properties
[root@xianchaonode1 dubboadmin]# cat logging.properties
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

handlers = 1catalina.org.apache.juli.AsyncFileHandler, 2localhost.org.apache.juli.AsyncFileHandler, 3manager.org.apache.juli.AsyncFileHandler, 4host-manager.org.apache.juli.AsyncFileHandler, java.util.logging.ConsoleHandler

.handlers = 1catalina.org.apache.juli.AsyncFileHandler, java.util.logging.ConsoleHandler

############################################################
# Handler specific properties.
# Describes specific configuration info for Handlers.
############################################################

1catalina.org.apache.juli.AsyncFileHandler.level = FINE
1catalina.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
1catalina.org.apache.juli.AsyncFileHandler.prefix = catalina.

2localhost.org.apache.juli.AsyncFileHandler.level = FINE
2localhost.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
2localhost.org.apache.juli.AsyncFileHandler.prefix = localhost.

3manager.org.apache.juli.AsyncFileHandler.level = FINE
3manager.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
3manager.org.apache.juli.AsyncFileHandler.prefix = manager.

4host-manager.org.apache.juli.AsyncFileHandler.level = FINE
4host-manager.org.apache.juli.AsyncFileHandler.directory = /data/tomcat/logs
4host-manager.org.apache.juli.AsyncFileHandler.prefix = host-manager.

java.util.logging.ConsoleHandler.level = FINE
java.util.logging.ConsoleHandler.formatter = org.apache.juli.OneLineFormatter


############################################################
# Facility specific properties.
# Provides extra control for each logger.
############################################################

org.apache.catalina.core.ContainerBase.[Catalina].[localhost].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].handlers = 2localhost.org.apache.juli.AsyncFileHandler

org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/manager].handlers = 3manager.org.apache.juli.AsyncFileHandler

org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].level = INFO
org.apache.catalina.core.ContainerBase.[Catalina].[localhost].[/host-manager].handlers = 4host-manager.org.apache.juli.AsyncFileHandler

# For example, set the org.apache.catalina.util.LifecycleBase logger to log
# each component that extends LifecycleBase changing state:
#org.apache.catalina.util.LifecycleBase.level = FINE

# To see debug messages in TldLocationsCache, uncomment the following line:
#org.apache.jasper.compiler.TldLocationsCache.level = FINE


#6.catalina.sh
[root@xianchaonode1 dubboadmin]# chmod a+x catalina.sh

[root@xianchaonode1 dubboadmin]# cat catalina.sh
#!/bin/sh

# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# -----------------------------------------------------------------------------
# Control Script for the CATALINA Server
#
# Environment Variable Prerequisites
#
#   Do not set the variables in this script. Instead put them into a script
#   setenv.sh in CATALINA_BASE/bin to keep your customizations separate.
#
#   CATALINA_HOME   May point at your Catalina "build" directory.
#
#   CATALINA_BASE   (Optional) Base directory for resolving dynamic portions
#                   of a Catalina installation.  If not present, resolves to
#                   the same directory that CATALINA_HOME points to.
#
#   CATALINA_OUT    (Optional) Full path to a file where stdout and stderr
#                   will be redirected.
#                   Default is $CATALINA_BASE/logs/catalina.out
#
#   CATALINA_OPTS   (Optional) Java runtime options used when the "start",
#                   "run" or "debug" command is executed.
#                   Include here and not in JAVA_OPTS all options, that should
#                   only be used by Tomcat itself, not by the stop process,
#                   the version command etc.
#                   Examples are heap size, GC logging, JMX ports etc.
#
#   CATALINA_TMPDIR (Optional) Directory path location of temporary directory
#                   the JVM should use (java.io.tmpdir).  Defaults to
#                   $CATALINA_BASE/temp.
#
#   JAVA_HOME       Must point at your Java Development Kit installation.
#                   Required to run the with the "debug" argument.
#
#   JRE_HOME        Must point at your Java Runtime installation.
#                   Defaults to JAVA_HOME if empty. If JRE_HOME and JAVA_HOME
#                   are both set, JRE_HOME is used.
#
#   JAVA_OPTS       (Optional) Java runtime options used when any command
#                   is executed.
#                   Include here and not in CATALINA_OPTS all options, that
#                   should be used by Tomcat and also by the stop process,
#                   the version command etc.
#                   Most options should go into CATALINA_OPTS.
#
#   JAVA_ENDORSED_DIRS (Optional) Lists of of colon separated directories
#                   containing some jars in order to allow replacement of APIs
#                   created outside of the JCP (i.e. DOM and SAX from W3C).
#                   It can also be used to update the XML parser implementation.
#                   Defaults to $CATALINA_HOME/endorsed.
#
#   JPDA_TRANSPORT  (Optional) JPDA transport used when the "jpda start"
#                   command is executed. The default is "dt_socket".
#
#   JPDA_ADDRESS    (Optional) Java runtime options used when the "jpda start"
#                   command is executed. The default is localhost:8000.
#
#   JPDA_SUSPEND    (Optional) Java runtime options used when the "jpda start"
#                   command is executed. Specifies whether JVM should suspend
#                   execution immediately after startup. Default is "n".
#
#   JPDA_OPTS       (Optional) Java runtime options used when the "jpda start"
#                   command is executed. If used, JPDA_TRANSPORT, JPDA_ADDRESS,
#                   and JPDA_SUSPEND are ignored. Thus, all required jpda
#                   options MUST be specified. The default is:
#
#                   -agentlib:jdwp=transport=$JPDA_TRANSPORT,
#                       address=$JPDA_ADDRESS,server=y,suspend=$JPDA_SUSPEND
#
#   CATALINA_PID    (Optional) Path of the file which should contains the pid
#                   of the catalina startup java process, when start (fork) is
#                   used
#
#   LOGGING_CONFIG  (Optional) Override Tomcat's logging config file
#                   Example (all one line)
#                   LOGGING_CONFIG="-Djava.util.logging.config.file=$CATALINA_BASE/conf/logging.properties"
#
#   LOGGING_MANAGER (Optional) Override Tomcat's logging manager
#                   Example (all one line)
#                   LOGGING_MANAGER="-Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager"
# -----------------------------------------------------------------------------

JAVA_OPTS="-server -Xms512m -Xmx512m -Xss512k -Xmn1g -XX:CMSInitiatingOccupancyFraction=65  -XX:+UseFastAccessorMethods -XX:+AggressiveOpts -XX:+UseBiasedLocking -XX:+DisableExplicitGC -XX:MaxTenuringThreshold=10 -XX:NewSize=2048M -XX:MaxNewSize=2048M -XX:NewRatio=2 -XX:PermSize=128m -XX:MaxPermSize=512m -XX:CMSFullGCsBeforeCompaction=5 -XX:+ExplicitGCInvokesConcurrent -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:+UseCMSCompactAtFullCollection -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods"

# OS specific support.  $var _must_ be set to either true or false.
cygwin=false
darwin=false
os400=false
case "`uname`" in
CYGWIN*) cygwin=true;;
Darwin*) darwin=true;;
OS400*) os400=true;;
esac

# resolve links - $0 may be a softlink
PRG="$0"

while [ -h "$PRG" ]; do
  ls=`ls -ld "$PRG"`
  link=`expr "$ls" : '.*-> \(.*\)$'`
  if expr "$link" : '/.*' > /dev/null; then
    PRG="$link"
  else
    PRG=`dirname "$PRG"`/"$link"
  fi
done

# Get standard environment variables
PRGDIR=`dirname "$PRG"`

# Only set CATALINA_HOME if not already set
[ -z "$CATALINA_HOME" ] && CATALINA_HOME=`cd "$PRGDIR/.." >/dev/null; pwd`

# Copy CATALINA_BASE from CATALINA_HOME if not already set
[ -z "$CATALINA_BASE" ] && CATALINA_BASE="$CATALINA_HOME"

# Ensure that neither CATALINA_HOME nor CATALINA_BASE contains a colon
# as this is used as the separator in the classpath and Java provides no
# mechanism for escaping if the same character appears in the path.
case $CATALINA_HOME in
  *:*) echo "Using CATALINA_HOME:   $CATALINA_HOME";
       echo "Unable to start as CATALINA_HOME contains a colon (:) character";
       exit 1;
esac
case $CATALINA_BASE in
  *:*) echo "Using CATALINA_BASE:   $CATALINA_BASE";
       echo "Unable to start as CATALINA_BASE contains a colon (:) character";
       exit 1;
esac

# Ensure that any user defined CLASSPATH variables are not used on startup,
# but allow them to be specified in setenv.sh, in rare case when it is needed.
CLASSPATH=

if [ -r "$CATALINA_BASE/bin/setenv.sh" ]; then
  . "$CATALINA_BASE/bin/setenv.sh"
elif [ -r "$CATALINA_HOME/bin/setenv.sh" ]; then
  . "$CATALINA_HOME/bin/setenv.sh"
fi

# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin; then
  [ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
  [ -n "$JRE_HOME" ] && JRE_HOME=`cygpath --unix "$JRE_HOME"`
  [ -n "$CATALINA_HOME" ] && CATALINA_HOME=`cygpath --unix "$CATALINA_HOME"`
  [ -n "$CATALINA_BASE" ] && CATALINA_BASE=`cygpath --unix "$CATALINA_BASE"`
  [ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi

# For OS400
if $os400; then
  # Set job priority to standard for interactive (interactive - 6) by using
  # the interactive priority - 6, the helper threads that respond to requests
  # will be running at the same priority as interactive jobs.
  COMMAND='chgjob job('$JOBNAME') runpty(6)'
  system $COMMAND

  # Enable multi threading
  export QIBM_MULTI_THREADED=Y
fi

# Get standard Java environment variables
if $os400; then
  # -r will Only work on the os400 if the files are:
  # 1. owned by the user
  # 2. owned by the PRIMARY group of the user
  # this will not work if the user belongs in secondary groups
  . "$CATALINA_HOME"/bin/setclasspath.sh
else
  if [ -r "$CATALINA_HOME"/bin/setclasspath.sh ]; then
    . "$CATALINA_HOME"/bin/setclasspath.sh
  else
    echo "Cannot find $CATALINA_HOME/bin/setclasspath.sh"
    echo "This file is needed to run this program"
    exit 1
  fi
fi

# Add on extra jar files to CLASSPATH
if [ ! -z "$CLASSPATH" ] ; then
  CLASSPATH="$CLASSPATH":
fi
CLASSPATH="$CLASSPATH""$CATALINA_HOME"/bin/bootstrap.jar

if [ -z "$CATALINA_OUT" ] ; then
  CATALINA_OUT="$CATALINA_BASE"/logs/catalina.out
fi

if [ -z "$CATALINA_TMPDIR" ] ; then
  # Define the java.io.tmpdir to use for Catalina
  CATALINA_TMPDIR="$CATALINA_BASE"/temp
fi

# Add tomcat-juli.jar to classpath
# tomcat-juli.jar can be over-ridden per instance
if [ -r "$CATALINA_BASE/bin/tomcat-juli.jar" ] ; then
  CLASSPATH=$CLASSPATH:$CATALINA_BASE/bin/tomcat-juli.jar
else
  CLASSPATH=$CLASSPATH:$CATALINA_HOME/bin/tomcat-juli.jar
fi

# Bugzilla 37848: When no TTY is available, don't output to console
have_tty=0
if [ "`tty`" != "not a tty" ]; then
    have_tty=1
fi

# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
  JAVA_HOME=`cygpath --absolute --windows "$JAVA_HOME"`
  JRE_HOME=`cygpath --absolute --windows "$JRE_HOME"`
  CATALINA_HOME=`cygpath --absolute --windows "$CATALINA_HOME"`
  CATALINA_BASE=`cygpath --absolute --windows "$CATALINA_BASE"`
  CATALINA_TMPDIR=`cygpath --absolute --windows "$CATALINA_TMPDIR"`
  CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
  JAVA_ENDORSED_DIRS=`cygpath --path --windows "$JAVA_ENDORSED_DIRS"`
fi

# Set juli LogManager config file if it is present and an override has not been issued
if [ -z "$LOGGING_CONFIG" ]; then
  if [ -r "$CATALINA_BASE"/conf/logging.properties ]; then
    LOGGING_CONFIG="-Djava.util.logging.config.file=$CATALINA_BASE/conf/logging.properties"
  else
    # Bugzilla 45585
    LOGGING_CONFIG="-Dnop"
  fi
fi

if [ -z "$LOGGING_MANAGER" ]; then
  LOGGING_MANAGER="-Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager"
fi

# Uncomment the following line to make the umask available when using the
# org.apache.catalina.security.SecurityListener
#JAVA_OPTS="$JAVA_OPTS -Dorg.apache.catalina.security.SecurityListener.UMASK=`umask`"

# ----- Execute The Requested Command -----------------------------------------

# Bugzilla 37848: only output this if we have a TTY
if [ $have_tty -eq 1 ]; then
  echo "Using CATALINA_BASE:   $CATALINA_BASE"
  echo "Using CATALINA_HOME:   $CATALINA_HOME"
  echo "Using CATALINA_TMPDIR: $CATALINA_TMPDIR"
  if [ "$1" = "debug" ] ; then
    echo "Using JAVA_HOME:       $JAVA_HOME"
  else
    echo "Using JRE_HOME:        $JRE_HOME"
  fi
  echo "Using CLASSPATH:       $CLASSPATH"
  if [ ! -z "$CATALINA_PID" ]; then
    echo "Using CATALINA_PID:    $CATALINA_PID"
  fi
fi

if [ "$1" = "jpda" ] ; then
  if [ -z "$JPDA_TRANSPORT" ]; then
    JPDA_TRANSPORT="dt_socket"
  fi
  if [ -z "$JPDA_ADDRESS" ]; then
    JPDA_ADDRESS="localhost:8000"
  fi
  if [ -z "$JPDA_SUSPEND" ]; then
    JPDA_SUSPEND="n"
  fi
  if [ -z "$JPDA_OPTS" ]; then
    JPDA_OPTS="-agentlib:jdwp=transport=$JPDA_TRANSPORT,address=$JPDA_ADDRESS,server=y,suspend=$JPDA_SUSPEND"
  fi
  CATALINA_OPTS="$JPDA_OPTS $CATALINA_OPTS"
  shift
fi

if [ "$1" = "debug" ] ; then
  if $os400; then
    echo "Debug command not available on OS400"
    exit 1
  else
    shift
    if [ "$1" = "-security" ] ; then
      if [ $have_tty -eq 1 ]; then
        echo "Using Security Manager"
      fi
      shift
      exec "$_RUNJDB" "$LOGGING_CONFIG" $LOGGING_MANAGER $JAVA_OPTS $CATALINA_OPTS \
        -Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
        -sourcepath "$CATALINA_HOME"/../../java \
        -Djava.security.manager \
        -Djava.security.policy=="$CATALINA_BASE"/conf/catalina.policy \
        -Dcatalina.base="$CATALINA_BASE" \
        -Dcatalina.home="$CATALINA_HOME" \
        -Djava.io.tmpdir="$CATALINA_TMPDIR" \
         -Djava.awt.headless=true \
        org.apache.catalina.startup.Bootstrap "$@" start
    else
      exec "$_RUNJDB" "$LOGGING_CONFIG" $LOGGING_MANAGER $JAVA_OPTS $CATALINA_OPTS \
        -Djava.endorsed.dirs="$JAVA_ENDORSED_DIRS" -classpath "$CLASSPATH" \
        -sourcepath "$CATALINA_HOME"/../../java \
        -Dcatalina.base="$CATALINA_BASE" \
        -Dcatalina.home="$CATALINA_HOME" \
        -Djava.io.tmpdir="$CATALINA_TMPDIR" \
         -Djava.awt.headless=true \
        org.apache.catalina.startup.Bootstrap "$@" start
    fi
  fi

elif [ "$1" = "run" ]; then

  shift
  if [ "$1" = "-security" ] ; then
    if [ $have_tty -eq 1 ]; then
      echo "Using Security Manager"
    fi
    shift
    eval exec "\"$_RUNJAVA\"" "\"$LOGGING_CONFIG\"" $LOGGING_MANAGER $JAVA_OPTS $CATALINA_OPTS \
      -Djava.endorsed.dirs="\"$JAVA_ENDORSED_DIRS\"" -classpath "\"$CLASSPATH\"" \
      -Djava.security.manager \
      -Djava.security.policy=="\"$CATALINA_BASE/conf/catalina.policy\"" \
      -Dcatalina.base="\"$CATALINA_BASE\"" \
      -Dcatalina.home="\"$CATALINA_HOME\"" \
      -Djava.io.tmpdir="\"$CATALINA_TMPDIR\"" \
       -Djava.awt.headless=true \
      org.apache.catalina.startup.Bootstrap "$@" start
  else
    eval exec "\"$_RUNJAVA\"" "\"$LOGGING_CONFIG\"" $LOGGING_MANAGER $JAVA_OPTS $CATALINA_OPTS \
      -Djava.endorsed.dirs="\"$JAVA_ENDORSED_DIRS\"" -classpath "\"$CLASSPATH\"" \
      -Dcatalina.base="\"$CATALINA_BASE\"" \
      -Dcatalina.home="\"$CATALINA_HOME\"" \
      -Djava.io.tmpdir="\"$CATALINA_TMPDIR\"" \
       -Djava.awt.headless=true \
      org.apache.catalina.startup.Bootstrap "$@" start
  fi

elif [ "$1" = "start" ] ; then

  if [ ! -z "$CATALINA_PID" ]; then
    if [ -f "$CATALINA_PID" ]; then
      if [ -s "$CATALINA_PID" ]; then
        echo "Existing PID file found during start."
        if [ -r "$CATALINA_PID" ]; then
          PID=`cat "$CATALINA_PID"`
          ps -p $PID >/dev/null 2>&1
          if [ $? -eq 0 ] ; then
            echo "Tomcat appears to still be running with PID $PID. Start aborted."
            echo "If the following process is not a Tomcat process, remove the PID file and try again:"
            ps -f -p $PID
            exit 1
          else
            echo "Removing/clearing stale PID file."
            rm -f "$CATALINA_PID" >/dev/null 2>&1
            if [ $? != 0 ]; then
              if [ -w "$CATALINA_PID" ]; then
                cat /dev/null > "$CATALINA_PID"
              else
                echo "Unable to remove or clear stale PID file. Start aborted."
                exit 1
              fi
            fi
          fi
        else
          echo "Unable to read PID file. Start aborted."
          exit 1
        fi
      else
        rm -f "$CATALINA_PID" >/dev/null 2>&1
        if [ $? != 0 ]; then
          if [ ! -w "$CATALINA_PID" ]; then
            echo "Unable to remove or write to empty PID file. Start aborted."
            exit 1
          fi
        fi
      fi
    fi
  fi

  shift
  touch "$CATALINA_OUT"
  if [ "$1" = "-security" ] ; then
    if [ $have_tty -eq 1 ]; then
      echo "Using Security Manager"
    fi
    shift
    eval "\"$_RUNJAVA\"" "\"$LOGGING_CONFIG\"" $LOGGING_MANAGER $JAVA_OPTS $CATALINA_OPTS \
      -Djava.endorsed.dirs="\"$JAVA_ENDORSED_DIRS\"" -classpath "\"$CLASSPATH\"" \
      -Djava.security.manager \
      -Djava.security.policy=="\"$CATALINA_BASE/conf/catalina.policy\"" \
      -Dcatalina.base="\"$CATALINA_BASE\"" \
      -Dcatalina.home="\"$CATALINA_HOME\"" \
      -Djava.io.tmpdir="\"$CATALINA_TMPDIR\"" \
       -Djava.awt.headless=true \
      org.apache.catalina.startup.Bootstrap "$@" start \
      >> "$CATALINA_OUT" 2>&1 "&"

  else
    eval "\"$_RUNJAVA\"" "\"$LOGGING_CONFIG\"" $LOGGING_MANAGER $JAVA_OPTS $CATALINA_OPTS \
      -Djava.endorsed.dirs="\"$JAVA_ENDORSED_DIRS\"" -classpath "\"$CLASSPATH\"" \
      -Dcatalina.base="\"$CATALINA_BASE\"" \
      -Dcatalina.home="\"$CATALINA_HOME\"" \
      -Djava.io.tmpdir="\"$CATALINA_TMPDIR\"" \
       -Djava.awt.headless=true \
      org.apache.catalina.startup.Bootstrap "$@" start \
      >> "$CATALINA_OUT" 2>&1 "&"

  fi

  if [ ! -z "$CATALINA_PID" ]; then
    echo $! > "$CATALINA_PID"
  fi

  echo "Tomcat started."

elif [ "$1" = "stop" ] ; then

  shift

  SLEEP=5
  if [ ! -z "$1" ]; then
    echo $1 | grep "[^0-9]" >/dev/null 2>&1
    if [ $? -gt 0 ]; then
      SLEEP=$1
      shift
    fi
  fi

  FORCE=0
  if [ "$1" = "-force" ]; then
    shift
    FORCE=1
  fi

  if [ ! -z "$CATALINA_PID" ]; then
    if [ -f "$CATALINA_PID" ]; then
      if [ -s "$CATALINA_PID" ]; then
        kill -0 `cat "$CATALINA_PID"` >/dev/null 2>&1
        if [ $? -gt 0 ]; then
          echo "PID file found but no matching process was found. Stop aborted."
          exit 1
        fi
      else
        echo "PID file is empty and has been ignored."
      fi
    else
      echo "\$CATALINA_PID was set but the specified file does not exist. Is Tomcat running? Stop aborted."
      exit 1
    fi
  fi

  eval "\"$_RUNJAVA\"" $LOGGING_MANAGER $JAVA_OPTS \
    -Djava.endorsed.dirs="\"$JAVA_ENDORSED_DIRS\"" -classpath "\"$CLASSPATH\"" \
    -Dcatalina.base="\"$CATALINA_BASE\"" \
    -Dcatalina.home="\"$CATALINA_HOME\"" \
    -Djava.io.tmpdir="\"$CATALINA_TMPDIR\"" \
     -Djava.awt.headless=true \
    org.apache.catalina.startup.Bootstrap "$@" stop

  # stop failed. Shutdown port disabled? Try a normal kill.
  if [ $? != 0 ]; then
    if [ ! -z "$CATALINA_PID" ]; then
      echo "The stop command failed. Attempting to signal the process to stop through OS signal."
      kill -15 `cat "$CATALINA_PID"` >/dev/null 2>&1
    fi
  fi

  if [ ! -z "$CATALINA_PID" ]; then
    if [ -f "$CATALINA_PID" ]; then
      while [ $SLEEP -ge 0 ]; do
        kill -0 `cat "$CATALINA_PID"` >/dev/null 2>&1
        if [ $? -gt 0 ]; then
          rm -f "$CATALINA_PID" >/dev/null 2>&1
          if [ $? != 0 ]; then
            if [ -w "$CATALINA_PID" ]; then
              cat /dev/null > "$CATALINA_PID"
              # If Tomcat has stopped don't try and force a stop with an empty PID file
              FORCE=0
            else
              echo "The PID file could not be removed or cleared."
            fi
          fi
          echo "Tomcat stopped."
          break
        fi
        if [ $SLEEP -gt 0 ]; then
          sleep 1
        fi
        if [ $SLEEP -eq 0 ]; then
          echo "Tomcat did not stop in time."
          if [ $FORCE -eq 0 ]; then
            echo "PID file was not removed."
          fi
          echo "To aid diagnostics a thread dump has been written to standard out."
          kill -3 `cat "$CATALINA_PID"`
        fi
        SLEEP=`expr $SLEEP - 1 `
      done
    fi
  fi

  KILL_SLEEP_INTERVAL=5
  if [ $FORCE -eq 1 ]; then
    if [ -z "$CATALINA_PID" ]; then
      echo "Kill failed: \$CATALINA_PID not set"
    else
      if [ -f "$CATALINA_PID" ]; then
        PID=`cat "$CATALINA_PID"`
        echo "Killing Tomcat with the PID: $PID"
        kill -9 $PID
        while [ $KILL_SLEEP_INTERVAL -ge 0 ]; do
            kill -0 `cat "$CATALINA_PID"` >/dev/null 2>&1
            if [ $? -gt 0 ]; then
                rm -f "$CATALINA_PID" >/dev/null 2>&1
                if [ $? != 0 ]; then
                    if [ -w "$CATALINA_PID" ]; then
                        cat /dev/null > "$CATALINA_PID"
                    else
                        echo "The PID file could not be removed."
                    fi
                fi
                echo "The Tomcat process has been killed."
                break
            fi
            if [ $KILL_SLEEP_INTERVAL -gt 0 ]; then
                sleep 1
            fi
            KILL_SLEEP_INTERVAL=`expr $KILL_SLEEP_INTERVAL - 1 `
        done
        if [ $KILL_SLEEP_INTERVAL -lt 0 ]; then
            echo "Tomcat has not been killed completely yet. The process might be waiting on some system call or might be UNINTERRUPTIBLE."
        fi
      fi
    fi
  fi

elif [ "$1" = "configtest" ] ; then

    eval "\"$_RUNJAVA\"" $LOGGING_MANAGER $JAVA_OPTS \
      -Djava.endorsed.dirs="\"$JAVA_ENDORSED_DIRS\"" -classpath "\"$CLASSPATH\"" \
      -Dcatalina.base="\"$CATALINA_BASE\"" \
      -Dcatalina.home="\"$CATALINA_HOME\"" \
      -Djava.io.tmpdir="\"$CATALINA_TMPDIR\"" \
       -Djava.awt.headless=true \
      org.apache.catalina.startup.Bootstrap configtest
    result=$?
    if [ $result -ne 0 ]; then
        echo "Configuration error detected!"
    fi
    exit $result

elif [ "$1" = "version" ] ; then

    "$_RUNJAVA"   \
      -classpath "$CATALINA_HOME/lib/catalina.jar" \
      org.apache.catalina.util.ServerInfo

else

  echo "Usage: catalina.sh ( commands ... )"
  echo "commands:"
  if $os400; then
    echo "  debug             Start Catalina in a debugger (not available on OS400)"
    echo "  debug -security   Debug Catalina with a security manager (not available on OS400)"
  else
    echo "  debug             Start Catalina in a debugger"
    echo "  debug -security   Debug Catalina with a security manager"
  fi
  echo "  jpda start        Start Catalina under JPDA debugger"
  echo "  run               Start Catalina in the current window"
  echo "  run -security     Start in the current window with security manager"
  echo "  start             Start Catalina in a separate window"
  echo "  start -security   Start in a separate window with security manager"
  echo "  stop              Stop Catalina, waiting up to 5 seconds for the process to end"
  echo "  stop n            Stop Catalina, waiting up to n seconds for the process to end"
  echo "  stop -force       Stop Catalina, wait up to 5 seconds and then use kill -KILL if still running"
  echo "  stop n -force     Stop Catalina, wait up to n seconds and then use kill -KILL if still running"
  echo "  configtest        Run a basic syntax check on server.xml - check exit code for result"
  echo "  version           What version of tomcat are you running?"
  echo "Note: Waiting for the process to end and use of the -force option require that \$CATALINA_PID is defined"
  exit 1

fi

#7.server.xml
[root@xianchaonode1 dubboadmin]# cat server.xml
<?xml version="1.0" encoding="UTF-8"?>
<!--
  Licensed to the Apache Software Foundation (ASF) under one or more
  contributor license agreements.  See the NOTICE file distributed with
  this work for additional information regarding copyright ownership.
  The ASF licenses this file to You under the Apache License, Version 2.0
  (the "License"); you may not use this file except in compliance with
  the License.  You may obtain a copy of the License at

      http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License.
--><!-- Note:  A "Server" is not itself a "Container", so you may not
     define subcomponents such as "Valves" at this level.
     Documentation at /docs/config/server.html
 --><Server port="8005" shutdown="SHUTDOWN">
  <Listener className="org.apache.catalina.startup.VersionLoggerListener"/>
  <!-- Security listener. Documentation at /docs/config/listeners.html
  <Listener className="org.apache.catalina.security.SecurityListener" />
  -->
  <!--APR library loader. Documentation at /docs/apr.html -->
  <Listener SSLEngine="on" className="org.apache.catalina.core.AprLifecycleListener"/>
  <!-- Prevent memory leaks due to use of particular java/javax APIs-->
  <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener"/>
  <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener"/>
  <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener"/>

  <!-- Global JNDI resources
       Documentation at /docs/jndi-resources-howto.html
  -->
  <GlobalNamingResources>
    <!-- Editable user database that can also be used by
         UserDatabaseRealm to authenticate users
    -->
    <Resource auth="Container" description="User database that can be updated and saved" factory="org.apache.catalina.users.MemoryUserDatabaseFactory" name="UserDatabase" pathname="conf/tomcat-users.xml" type="org.apache.catalina.UserDatabase"/>
  </GlobalNamingResources>

  <!-- A "Service" is a collection of one or more "Connectors" that share
       a single "Container" Note:  A "Service" is not itself a "Container",
       so you may not define subcomponents such as "Valves" at this level.
       Documentation at /docs/config/service.html
   -->
  <Service name="Catalina">

    <!--The connectors can use a shared executor, you can define one or more named thread pools-->
    <!--
    <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
        maxThreads="150" minSpareThreads="4"/>
    -->


    <!-- A "Connector" represents an endpoint by which requests are received
         and responses are returned. Documentation at :
         Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
         Java AJP  Connector: /docs/config/ajp.html
         APR (HTTP/AJP) Connector: /docs/apr.html
         Define a non-SSL/TLS HTTP/1.1 Connector on port 8080
    -->
    <Connector connectionTimeout="20000" port="8080" protocol="HTTP/1.1" redirectPort="8443"/>
    <!-- A "Connector" using the shared thread pool-->
    <!--
    <Connector executor="tomcatThreadPool"
               port="8080" protocol="HTTP/1.1"
               connectionTimeout="20000"
               redirectPort="8443" />
    -->
    <!-- Define a SSL/TLS HTTP/1.1 Connector on port 8443
         This connector uses the NIO implementation that requires the JSSE
         style configuration. When using the APR/native implementation, the
         OpenSSL style configuration is required as described in the APR/native
         documentation -->
    <!--
    <Connector port="8443" protocol="org.apache.coyote.http11.Http11NioProtocol"
               maxThreads="150" SSLEnabled="true" scheme="https" secure="true"
               clientAuth="false" sslProtocol="TLS" />
    -->

    <!-- Define an AJP 1.3 Connector on port 8009 -->
    <Connector port="8009" protocol="AJP/1.3" redirectPort="8443"/>


    <!-- An Engine represents the entry point (within Catalina) that processes
         every request.  The Engine implementation for Tomcat stand alone
         analyzes the HTTP headers included with the request, and passes them
         on to the appropriate Host (virtual host).
         Documentation at /docs/config/engine.html -->

    <!-- You should set jvmRoute to support load-balancing via AJP ie :
    <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
    -->
    <Engine defaultHost="localhost" name="Catalina">

      <!--For clustering, please take a look at documentation at:
          /docs/cluster-howto.html  (simple how to)
          /docs/config/cluster.html (reference documentation) -->
      <!--
      <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
      -->

      <!-- Use the LockOutRealm to prevent attempts to guess user passwords
           via a brute-force attack -->
      <Realm className="org.apache.catalina.realm.LockOutRealm">
        <!-- This Realm uses the UserDatabase configured in the global JNDI
             resources under the key "UserDatabase".  Any edits
             that are performed against this UserDatabase are immediately
             available for use by the Realm.  -->
        <Realm className="org.apache.catalina.realm.UserDatabaseRealm" resourceName="UserDatabase"/>
      </Realm>

      <Host appBase="/data/tomcat/webapps" autoDeploy="true" name="localhost" unpackWARs="true">

        <!-- SingleSignOn valve, share authentication between web applications
             Documentation at: /docs/config/valve.html -->
        <!--
        <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
        -->

        <!-- Access log processes all example.
             Documentation at: /docs/config/valve.html
             Note: The pattern used is equivalent to using pattern="common" -->
        <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" pattern="%h %l %u %t &quot;%r&quot; %s %b" prefix="localhost_access_log" suffix=".txt"/>

        <Context docBase="dubboadmin" path="/" reloadable="true" source="org.eclipse.jst.jee.server:dubboadmin"/>

        </Host>
    </Engine>
  </Service>
</Server>

#bash built-command.sh

配置dubboadmin.yaml文件

[root@xianchaomaster1 dubboadmin]# vim dubboadmin.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: myserver-dubboadmin
  name: myserver-dubboadmin-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: myserver-dubboadmin
  template:
    metadata:
      labels:
        app: myserver-dubboadmin
    spec:
      containers:
      - name: myserver-dubboadmin-container
        image: registry.cn-hangzhou.aliyuncs.com/birkhoff/dubboadmin:v1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 8080
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: myserver-dubboadmin
  name: myserver-dubboadmin-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30080
  selector:
    app: myserver-dubboadmin
[root@xianchaomaster1 dubboadmin]# kubectl apply -f dubboadmin.yaml
deployment.apps/myserver-dubboadmin-deployment created
service/myserver-dubboadmin-service created

[root@xianchaomaster1 dubboadmin]# kubectl get svc
NAME                          TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
myserver-consumer-server      NodePort    10.111.221.137   <none>        80:30001/TCP                                   101m
myserver-dubboadmin-service   NodePort    10.98.71.151     <none>        80:30080/TCP                                   7s
myserver-provider-spec        NodePort    10.110.182.162   <none>        8080:30800/TCP                                 117m
zookeeper                     ClusterIP   10.106.6.117     <none>        2181/TCP                                       3h22m
zookeeper1                    NodePort    10.103.59.98     <none>        2181:32181/TCP,2888:30830/TCP,3888:31244/TCP   3h22m
zookeeper2                    NodePort    10.96.144.29     <none>        2181:32182/TCP,2888:32695/TCP,3888:30422/TCP   3h22m
zookeeper3                    NodePort    10.96.3.83       <none>        2181:32183/TCP,2888:30400/TCP,3888:30712/TCP   3h22m

[root@xianchaomaster1 dubboadmin]# kubectl get pods
NAME                                             READY   STATUS                  RESTARTS   AGE
myserver-dubboadmin-deployment-fb59448b8-rhrjt   1/1     Running                 0          12s

[root@xianchaomaster1 dubboadmin]# netstat -nltp | grep 30080
tcp        0      0 0.0.0.0:30080           0.0.0.0:*               LISTEN      15751/kube-proxy

#访问 :账户密码:root/root

posted @ 2023-04-23 13:35  しみずよしだ  阅读(320)  评论(0)    收藏  举报