kubernetes-测试环境部署方案s01-基础功能实现

版权所有,请勿转载。

1.1 创建namespace(u01环境使用一个ns)

cat k01-ns.yml 

apiVersion: v1
kind: Namespace
metadata:
  name: ns-k01

kubectl apply -f k01-ns.yml

kubectl get ns

 

1.2 待使用的node添加label

kubectl label node node1 cslcenv=no-k01

kubectl get node --show-labels 

 

2、创建应用及svc

以U01环境为例:

1个pod对应1个deployment+2个service(1个clusterip(设置特定IP),1个nodeport)

cat k01-sie1.yml 
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: sie1
  namespace: ns-k01
spec:
#  replicas: 3
  template:
    metadata:
      annotations:
        security.alpha.kubernetes.io/unsafe-sysctls: kernel.msgmnb=13107200,kernel.msgmni=512,kernel.msgmax=65536,kernel.shmmax=69719476736,kernel.sem=500 256000 250 1024
      labels:
        k01: sie1
    spec:
      nodeSelector:
        cslcenv: no-k01
      containers:
      - name: sie1-ssh
        image: 172.28.2.2:4000/sie:20180316
        command: 
        - '/usr/sbin/sshd'
        - -D
        ports:
        - name: ssh
          containerPort: 22
        - name: mq4
          containerPort: 1414
        - name: mq5
          containerPort: 1415
        - name: mq6
          containerPort: 1416
        - name: mq7
          containerPort: 1417
        - name: mq8
          containerPort: 1418
        - name: mq9
          containerPort: 1419
        - name: mq10
          containerPort: 1420
        - name: mq11
          containerPort: 1421
        - name: cas
          containerPort: 20050

---
apiVersion: v1
kind: Service
metadata: 
  name: sie1svc
  namespace: ns-k01
spec:
  type: ClusterIP
  selector:
    k01: sie1
  clusterIP: 10.233.9.35
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
  - name: mq4
    port: 1414
    targetPort: 1414
  - name: mq5
    port: 1415
    targetPort: 1415
  - name: mq6
    port: 1416
    targetPort: 1416
  - name: mq7
    port: 1417
    targetPort: 1417
  - name: mq8
    port: 1418
    targetPort: 1418
  - name: mq9
    port: 1419
    targetPort: 1419
  - name: mq10
    port: 1420
    targetPort: 1420
  - name: mq11
    port: 1421
    targetPort: 1421
  - name: cas
    port: 20050
    targetPort: 20050
---
apiVersion: v1
kind: Service
metadata:
  name: sie1nodeport
  namespace: ns-k01
spec:
  type: NodePort
  selector:
    k01: sie1
#  clusterIP: 10.233.9.211
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
    nodePort: 30035
  - name: cas
    port: 20050
    targetPort: 20050
    nodePort: 30050
k01-sie.yml
cat k01-ctp1.yml 
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: ctp1
  namespace: ns-k01
spec:
#  replicas: 3
  template:
    metadata:
      annotations:
        security.alpha.kubernetes.io/unsafe-sysctls: kernel.msgmnb=13107200,kernel.msgmni=512,kernel.msgmax=65536,kernel.shmmax=69719476736,kernel.sem=500 256000 250 1024
      labels:
        k01: ctp1
    spec:
      nodeSelector:
        cslcenv: no-k01
      containers:
      - name: ctp1-ssh
        image: 172.28.2.2:4000/ctp:20180316
        command: 
        - '/usr/sbin/sshd'
        - -D
        ports:
        - name: ssh
          containerPort: 22
        - name: qcs1
          containerPort: 13101
        - name: qcs2
          containerPort: 13201
        - name: qrs
          containerPort: 14101
        - name: tts1
          containerPort: 15101
        - name: tts2
          containerPort: 15401
        - name: ocs1
          containerPort: 16201
        - name: ocs2
          containerPort: 16401
        - name: lcs1
          containerPort: 18101
        - name: lcs2
          containerPort: 18201
        - name: mcs1
          containerPort: 19101
        - name: mcs2
          containerPort: 19201

---
apiVersion: v1
kind: Service
metadata: 
  name: ctp1svc
  namespace: ns-k01
spec:
  type: ClusterIP
  selector:
    k01: ctp1
  clusterIP: 10.233.9.31
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
  - name: qcs1
    port: 13101
    targetPort: 13101       
  - name: qcs2
    port: 13201
    targetPort: 13201
  - name: qrs
    port: 14101
    targetPort: 14101
  - name: tts1
    port: 15101
    targetPort: 15101
  - name: tts2
    port: 15401
    targetPort: 15401
  - name: ocs1
    port: 16201
    targetPort: 16201
  - name: ocs2
    port: 16401
    targetPort: 16401
  - name: lcs1
    port: 18101
    targetPort: 18101
  - name: lcs2
    port: 18201
    targetPort: 18201
  - name: mcs1
    port: 19101
    targetPort: 19101
  - name: mcs2
    port: 19201
    targetPort: 19201
---
apiVersion: v1
kind: Service
metadata:
  name: ctp1nodeport
  namespace: ns-k01
spec:
  type: NodePort
  selector:
    k01: ctp1
#  clusterIP: 10.233.9.211
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
    nodePort: 30031
k01-ctp1.yml
cat k01-ctp2.yml 
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: ctp2
  namespace: ns-k01
spec:
#  replicas: 3
  template:
    metadata:
      annotations:
        security.alpha.kubernetes.io/unsafe-sysctls: kernel.msgmnb=13107200,kernel.msgmni=512,kernel.msgmax=65536,kernel.shmmax=69719476736,kernel.sem=500 256000 250 1024
      labels:
        k01: ctp2
    spec:
      nodeSelector:
        cslcenv: no-k01
      containers:
      - name: ctp2-ssh
        image: 172.28.2.2:4000/ctp:20180316
        command: 
        - '/usr/sbin/sshd'
        - -D
        ports:
        - name: ssh
          containerPort: 22
        - name: qcs1
          containerPort: 13101
        - name: qcs2
          containerPort: 13201
        - name: qrs
          containerPort: 14101
        - name: tts1
          containerPort: 15101
        - name: tts2
          containerPort: 15401
        - name: ocs1
          containerPort: 16201
        - name: ocs2
          containerPort: 16401
        - name: lcs1
          containerPort: 18101
        - name: lcs2
          containerPort: 18201
        - name: mcs1
          containerPort: 19101
        - name: mcs2
          containerPort: 19201

---
apiVersion: v1
kind: Service
metadata: 
  name: ctp2svc
  namespace: ns-k01
spec:
  type: ClusterIP
  selector:
    k01: ctp2
  clusterIP: 10.233.9.32
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
  - name: qcs1
    port: 13101
    targetPort: 13101       
  - name: qcs2
    port: 13201
    targetPort: 13201
  - name: qrs
    port: 14101
    targetPort: 14101
  - name: tts1
    port: 15101
    targetPort: 15101
  - name: tts2
    port: 15401
    targetPort: 15401
  - name: ocs1
    port: 16201
    targetPort: 16201
  - name: ocs2
    port: 16401
    targetPort: 16401
  - name: lcs1
    port: 18101
    targetPort: 18101
  - name: lcs2
    port: 18201
    targetPort: 18201
  - name: mcs1
    port: 19101
    targetPort: 19101
  - name: mcs2
    port: 19201
    targetPort: 19201
---
apiVersion: v1
kind: Service
metadata:
  name: ctp2nodeport
  namespace: ns-k01
spec:
  type: NodePort
  selector:
    k01: ctp2
#  clusterIP: 10.233.9.211
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
    nodePort: 30032
k01-ctp2.yml
cat k01-ctp3.yml 
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: ctp3
  namespace: ns-k01
spec:
#  replicas: 3
  template:
    metadata:
      annotations:
        security.alpha.kubernetes.io/unsafe-sysctls: kernel.msgmnb=13107200,kernel.msgmni=512,kernel.msgmax=65536,kernel.shmmax=69719476736,kernel.sem=500 256000 250 1024
      labels:
        k01: ctp3
    spec:
      nodeSelector:
        cslcenv: no-k01
      containers:
      - name: ctp3-ssh
        image: 172.28.2.2:4000/ctp:20180316
        command: 
        - '/usr/sbin/sshd'
        - -D
        ports:
        - name: ssh
          containerPort: 22
        - name: qcs1
          containerPort: 13101
        - name: qcs2
          containerPort: 13201
        - name: qrs
          containerPort: 14101
        - name: tts1
          containerPort: 15101
        - name: tts2
          containerPort: 15401
        - name: ocs1
          containerPort: 16201
        - name: ocs2
          containerPort: 16401
        - name: lcs1
          containerPort: 18101
        - name: lcs2
          containerPort: 18201
        - name: mcs1
          containerPort: 19101
        - name: mcs2
          containerPort: 19201

---
apiVersion: v1
kind: Service
metadata: 
  name: ctp3svc
  namespace: ns-k01
spec:
  type: ClusterIP
  selector:
    k01: ctp3
  clusterIP: 10.233.9.33
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
  - name: qcs1
    port: 13101
    targetPort: 13101       
  - name: qcs2
    port: 13201
    targetPort: 13201
  - name: qrs
    port: 14101
    targetPort: 14101
  - name: tts1
    port: 15101
    targetPort: 15101
  - name: tts2
    port: 15401
    targetPort: 15401
  - name: ocs1
    port: 16201
    targetPort: 16201
  - name: ocs2
    port: 16401
    targetPort: 16401
  - name: lcs1
    port: 18101
    targetPort: 18101
  - name: lcs2
    port: 18201
    targetPort: 18201
  - name: mcs1
    port: 19101
    targetPort: 19101
  - name: mcs2
    port: 19201
    targetPort: 19201
---
apiVersion: v1
kind: Service
metadata:
  name: ctp3nodeport
  namespace: ns-k01
spec:
  type: NodePort
  selector:
    k01: ctp3
#  clusterIP: 10.233.9.211
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
    nodePort: 30033
k01-ctp3.yml
cat k01-ttdb1.yml 
apiVersion: apps/v1beta1
kind: Deployment
metadata:
  name: ttdb1
  namespace: ns-k01
spec:
#  replicas: 3
  template:
    metadata:
      annotations:
        security.alpha.kubernetes.io/unsafe-sysctls: kernel.msgmnb=13107200,kernel.msgmni=512,kernel.msgmax=65536,kernel.shmmax=69719476736,kernel.sem=500 256000 250 1024
      labels:
        k01: ttdb1
    spec:
      nodeSelector:
        cslcenv: no-k01
      containers:
      - name: ttdb1-ssh
        image: 172.28.2.2:4000/rh6-db1:20180316
        command: 
        - '/usr/sbin/sshd'
        - -D
        ports:
        - name: ssh
          containerPort: 22
        - name: oracle
          containerPort: 1521

#---
#apiVersion: v1
#kind: Service
#metadata: 
#  name: k01ttdb1-svc
#  namespace: k01
#spec:
#  type: ClusterIP
#  selector:
#    k01: ttdb1
#  clusterIP: 10.233.9.111
#  ports:
#  - name: ssh
#    protocol: TCP
#    port: 22
#    targetPort: 22
#  - name: oracle
#    port: 1521
#    targetPort: 1521

---
apiVersion: v1
kind: Service
metadata:
  name: ttdb1svc
  namespace: ns-k01
spec:
  type: NodePort
  selector:
    k01: ttdb1
  clusterIP: 10.233.9.11
  ports:
  - name: ssh
    protocol: TCP
    port: 22
    targetPort: 22
    nodePort: 30011
  - name: oracle
    port: 1521
    targetPort: 1521
    nodePort: 30021
k01-ttdb1.yml

 

参考kube-system系统应用yml

kubectl -n kube-system edit deployment kubernetes-dashboard 

 

端口梳理: 

nodeport:(ssh cas oracle )

ttdb1    22:30011/TCP,1521:30021/TCP 
ttdb2    22:30012/TCP,1521:30022/TCP 
ttdb3    22:30013/TCP,1521:30023/TCP 
umdb    22:30014/TCP,1521:30024/TCP 
clctdb    22:30015/TCP,1521:30025/TCP 
sie        22:30035/TCP  20050:30050/TCP 
ctp1      22:30031/TCP
ctp2      22:30032/TCP
ctp3      22:30033/TCP
ump      22:30020 8080:30080 21     

 

 

kubectl apply -f k01-sie1.yml 
kubectl apply -f k01-ctp1.yml 
kubectl apply -f k01-ctp2.yml 
kubectl apply -f k01-ctp3.yml

 

kubectl get svc --namespace=ns-k01
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                                                                                                AGE
ctp1nodeport   NodePort    10.233.47.160   <none>        22:30031/TCP                                                                                                           2h
ctp1svc        ClusterIP   10.233.9.31     <none>        22/TCP,13101/TCP,13201/TCP,14101/TCP,15101/TCP,15401/TCP,16201/TCP,16401/TCP,18101/TCP,18201/TCP,19101/TCP,19201/TCP   2h
ctp2nodeport   NodePort    10.233.11.111   <none>        22:30032/TCP                                                                                                           2h
ctp2svc        ClusterIP   10.233.9.32     <none>        22/TCP,13101/TCP,13201/TCP,14101/TCP,15101/TCP,15401/TCP,16201/TCP,16401/TCP,18101/TCP,18201/TCP,19101/TCP,19201/TCP   2h
ctp3nodeport   NodePort    10.233.37.171   <none>        22:30033/TCP                                                                                                           2h
ctp3svc        ClusterIP   10.233.9.33     <none>        22/TCP,13101/TCP,13201/TCP,14101/TCP,15101/TCP,15401/TCP,16201/TCP,16401/TCP,18101/TCP,18201/TCP,19101/TCP,19201/TCP   2h
sie1nodeport   NodePort    10.233.11.55    <none>        22:30035/TCP,20050:30050/TCP                                                                                           2h
sie1svc        ClusterIP   10.233.9.35     <none>        22/TCP,1414/TCP,1415/TCP,1416/TCP,1417/TCP,1418/TCP,1419/TCP,1420/TCP,1421/TCP,20050/TCP                               2h
ttdb1svc       NodePort    10.233.9.11     <none>        22:30011/TCP,1521:30021/TCP                                                                                            2h

 

kubectl apply -f xx.yml

写入至sh文件:

ls | xargs -n 1 | grep -v 'ns' | grep 'yml' |(while read line; do echo "kubectl apply -f $line"; done) > k01-applyall.sh
chmod a+x k01-applyall.sh
./k01-apply.sh

  

 或 kubectl apply -f k01-ttdb1.yml -f k01-ttdb2.yml -f k01-ttdb3.yml .....

 

daweij@master:~/k01$ ls | xargs -n 1 | grep -v 'ns' | grep 'yml' |(while read line; do echo "kubectl apply -f $line"; done) > k01-applyall.sh
daweij@master:~/k01$ cat k01-applyall.sh 
kubectl apply -f k01-clctdb.yml
kubectl apply -f k01-ctp1.yml
kubectl apply -f k01-ctp2.yml
kubectl apply -f k01-ctp3.yml
kubectl apply -f k01-sie1.yml
kubectl apply -f k01-ttdb1.yml
kubectl apply -f k01-ttdb2.yml
kubectl apply -f k01-ttdb3.yml
kubectl apply -f k01-umdb.yml
kubectl apply -f k01-ump1.yml
daweij@master:~/k01$ ./k01-applyall.sh 
deployment "clctdb" created
service "clctdbsvc" created
deployment "ctp1" unchanged
service "ctp1svc" unchanged
service "ctp1nodeport" unchanged
deployment "ctp2" created
service "ctp2svc" created
service "ctp2nodeport" created
deployment "ctp3" created
service "ctp3svc" created
service "ctp3nodeport" created
deployment "sie1" created
service "sie1svc" created
service "sie1nodeport" created
deployment "ttdb1" created
service "ttdb1svc" created
deployment "ttdb2" created
service "ttdb2svc" created
deployment "ttdb3" created
service "ttdb3svc" created
deployment "umdb" created
service "umdbsvc" created
deployment "ump1" created
service "ump1svc" created
daweij@master:~/k01$ kubectl get service -ns ns-k01
Error from server (NotFound): namespaces "s" not found
daweij@master:~/k01$ kubectl get service -n ns-k01
NAME           TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                                                                                                                AGE
clctdbsvc      NodePort    10.233.49.114   <none>        22:30015/TCP,1521:30025/TCP                                                                                            40s
ctp1nodeport   NodePort    10.233.55.23    <none>        22:30031/TCP                                                                                                           7m
ctp1svc        ClusterIP   10.233.9.31     <none>        22/TCP,13101/TCP,13201/TCP,14101/TCP,15101/TCP,15401/TCP,16201/TCP,16401/TCP,18101/TCP,18201/TCP,19101/TCP,19201/TCP   7m
ctp2nodeport   NodePort    10.233.28.45    <none>        22:30032/TCP                                                                                                           38s
ctp2svc        ClusterIP   10.233.9.32     <none>        22/TCP,13101/TCP,13201/TCP,14101/TCP,15101/TCP,15401/TCP,16201/TCP,16401/TCP,18101/TCP,18201/TCP,19101/TCP,19201/TCP   38s
ctp3nodeport   NodePort    10.233.31.145   <none>        22:30033/TCP                                                                                                           38s
ctp3svc        ClusterIP   10.233.9.33     <none>        22/TCP,13101/TCP,13201/TCP,14101/TCP,15101/TCP,15401/TCP,16201/TCP,16401/TCP,18101/TCP,18201/TCP,19101/TCP,19201/TCP   38s
sie1nodeport   NodePort    10.233.1.163    <none>        22:30035/TCP,20050:30050/TCP                                                                                           37s
sie1svc        ClusterIP   10.233.9.35     <none>        22/TCP,1414/TCP,1415/TCP,1416/TCP,1417/TCP,1418/TCP,1419/TCP,1420/TCP,1421/TCP,20050/TCP                               37s
ttdb1svc       NodePort    10.233.31.6     <none>        22:30011/TCP,1521:30021/TCP                                                                                            36s
ttdb2svc       NodePort    10.233.14.174   <none>        22:30012/TCP,1521:30022/TCP                                                                                            35s
ttdb3svc       NodePort    10.233.31.154   <none>        22:30013/TCP,1521:30023/TCP                                                                                            34s
umdbsvc        NodePort    10.233.44.128   <none>        22:30014/TCP,1521:30024/TCP                                                                                            33s
ump1svc        NodePort    10.233.15.30    <none>        22:30120/TCP,8080:30180/TCP                                                                                            32s
daweij@master:~/k01$ kubectl get pod -n ns-ko1 -o wide
No resources found.
daweij@master:~/k01$ kubectl get pod -n ns-k01 -o wide
NAME                      READY     STATUS    RESTARTS   AGE       IP               NODE
clctdb-8576d9bd55-cdjj9   1/1       Running   0          2m        10.233.102.141   node1
ctp1-679bd695c4-qfdsk     1/1       Running   0          9m        10.233.102.142   node1
ctp2-768db7655b-86r9j     1/1       Running   0          2m        10.233.102.152   node1
ctp3-78c4cc476b-gv7c5     1/1       Running   0          2m        10.233.102.151   node1
sie1-6ff4db7756-jxkdl     1/1       Running   0          2m        10.233.102.153   node1
ttdb1-5497974d9-mzpkq     1/1       Running   0          2m        10.233.102.154   node1
ttdb2-646d47fd4d-tt6kt    1/1       Running   0          2m        10.233.102.155   node1
ttdb3-74996dc8f9-lx7kq    1/1       Running   0          2m        10.233.102.156   node1
umdb-5dfcd4fb4-dphp7      1/1       Running   0          2m        10.233.102.157   node1
ump1-6f76fb974c-j8m6r     1/1       Running   0          2m        10.233.102.158   node1
执行结果

 

 

3、连接方式

pod之间连接:

通过dns,即直接访问pod对应的service_name.namespace_name

同在一个namespace中,则可直接访问service_name,如k01sie1-svc   k01ctp1-svc k01-ctp2-svc k01-ctp3-svc

外部连接nodeport

ssh root@node1 -p 30011

The authenticity of host '[node1]:30011 ([172.28.2.211]:30011)' can't be established.
RSA key fingerprint is SHA256:/0Fn6F1/Lr1dwJWtsOyE/E8Z1iUwDagQsCuYU/AvIQg.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '[node1]:30011,[172.28.2.211]:30011' (RSA) to the list of known hosts.
root@node1's password: 
Last login: Tue Mar 20 14:22:55 2018 from 10.233.97.128

[root@ttdb1-95bbb97d8-hkt6x ~]# ssh ctp1svc

The authenticity of host 'ctp1svc (10.233.9.31)' can't be established.
RSA key fingerprint is 64:ae:7b:18:0c:0a:37:0d:78:f4:a4:12:1d:53:27:95.
Are you sure you want to continue connecting (yes/no)? o
Please type 'yes' or 'no': no
Host key verification failed.

[root@ttdb1-95bbb97d8-hkt6x ~]# ssh ctp2svc

The authenticity of host 'ctp2svc (10.233.9.32)' can't be established.
RSA key fingerprint is 64:ae:7b:18:0c:0a:37:0d:78:f4:a4:12:1d:53:27:95.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'ctp2svc,10.233.9.32' (RSA) to the list of known hosts.
root@ctp2svc's password: 
Last login: Mon Dec  5 13:30:40 from 172.20.13.176
-bash: ulimit: open files: cannot modify limit: 不允许的操作
[root@ctp2-567c864d4-4h6hf ~]# 

 

 

4、配置方式:

各项IP相关配置,配置为对应dns名称,如sie1-svc(固定,同一namespace中可转发至对应pod)

尽量减少需手动配置的部分。

 

5、服务控制方式

同namespace下部署ansible pod,用户启动各项服务。

资源类型可为JOB,执行即销毁。

 

6、troublerooting及待解决事项

oracle配置: listener.ora需配置podip

tnsnames.ora中可配置dns:service_name

 

20180411新计划:

为各镜像添加启动脚本,文件路径/root/docker/run.sh

解决6.1 6.2 6.3问题,相关实现功能如下:

1、ssh启动

2、基础配置修正:原则:涉及本机部分利用hostname 和 本地ip,涉及其他容器,使用service_name

  tnsnames.ora(已service为主机名可不用修改)

  listener.ora   修改host为容器主机名

  ggs配置  修改host为service_name (需各数据库service clusterip添加一定动态范围端口,如clctdb 7850~7880  其他库 7850~7860) 每个推送进程占用目标端一个动态端口,已完成

  /etc/hosts 将ip service_name(all)写入文件(是否有必要,写入后是否影响namespace默认?)

  ctp各系统配置  

  sie各系统配置

  ump系统配置

3、各服务启动

  db:oracle服务 lsnrctl ggs(依赖db,最后启动)

  mq: ump sie

  sie:各子系统

  ctp:各子系统

  ump:jboss(依赖umdb)

4、db配置修正(db启动后,ggs启动前)

  在其他DB启动后,管理库最后启动,并执行配置修正脚本

  仅在管理库执行:封装在管理库镜像,调用sql,完成后调用ggs批量启动脚本

  封装脚本,oracle用户调用sql

 

5、上述启动过程写入指定log文件,如/root/docker/run.log

 

6、启动完成写文本文件,用于pod健康检查,如/root/docker/status

 

7、上述完成后,执行可一直挂起的命令,如tail -f /dev/null

 

各镜像脚本整理中,计划:db--ump--sie--ctp

 

6.1 如何将pod-ip/hostname传参进各配置文件,

如listener.ora ?

##grep '' /etc/hosts

其他配置除了使用service_name,是否有其他同样需求?

如lcs.ubb tts.ubb mcs.ubb ocsa.ubb qcs.ubb  qrs.ubb均有hostname和本机IP配置

解决方法:编写脚本,封装进images,通过ansible统一部署实现??

 

deployment启动即配置

/root/config.sh

 

hostname01=$(uname -n)

sed -i "/HNAME01/s/HNAME01/$hostname01/" /cslc/ctp/lcs/config/lcs.ubb
sed -i "/HNAME01/s/HNAME01/$hostname01/" /cslc/ctp/mcs/config/mcs.ubb
sed -i "/HNAME01/s/HNAME01/$hostname01/" /cslc/ctp/qcs/config/qcs.ubb
sed -i "/HNAME01/s/HNAME01/$hostname01/" /cslc/ctp/ocs/config/ocsa.ubb
sed -i "/HNAME01/s/HNAME01/$hostname01/" /cslc/ctp/tts/config/tts.ubb
sed -i "/HNAME01/s/HNAME01/$hostname01/" /cslc/ctp/qrs/config/qrs.ubb

sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/lcs/config/lcs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/ocs/config/ocsa.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/tts/config/tts.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/mcs/config/mcs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/qcs/config/qcs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/qrs/config/qrs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/ocs/config/ocsa.dm
sed -i "/LOCALIP01/s/LOCALIP01/$hostname01/" /cslc/ctp/tts/config/tts.dm

 

sed -i "/HNAME01/s/HNAME01/$(uname -n)/" /cslc/ctp/lcs/config/lcs.ubb
sed -i "/HNAME01/s/HNAME01/$(uname -n)/" /cslc/ctp/mcs/config/mcs.ubb
sed -i "/HNAME01/s/HNAME01/$(uname -n)/" /cslc/ctp/qcs/config/qcs.ubb
sed -i "/HNAME01/s/HNAME01/$(uname -n)/" /cslc/ctp/ocs/config/ocsa.ubb
sed -i "/HNAME01/s/HNAME01/$(uname -n)/" /cslc/ctp/tts/config/tts.ubb
sed -i "/HNAME01/s/HNAME01/$(uname -n)/" /cslc/ctp/qrs/config/qrs.ubb

sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/lcs/config/lcs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/ocs/config/ocsa.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/tts/config/tts.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/mcs/config/mcs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/qcs/config/qcs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/qrs/config/qrs.ubb
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/ocs/config/ocsa.dm
sed -i "/LOCALIP01/s/LOCALIP01/$(uname -n)/" /cslc/ctp/tts/config/tts.dm

 

 

6.2 sie启动mq前需调整时间,如何自动调整对应node时间?

解决方法:重建sie镜像,创建镜像安装mq时,向后调时间,比如调整至2030年?:20180330已实现

 

6.3 ansible部署脚本编写,ansible部署镜像创建,ansible容器运行方式yaml

 创建ansible镜像,封装相关脚本及配置模板等,一键部署。

 

6.4 精简镜像创建

ctp sie ump共用一基础镜像 : 20180330已完成,待测试,形成自动部署方案和脚本

oracle共用一基础镜像:预计20180415前完成;

解决方法:

Step1,docker hub中寻找合适的基础镜像

Step2,在基础镜像基础上安装部署各三方插件,如tuxedo mq oracleclient 等,封装镜像

Step3,分别安装应用及数据库,封装镜像

Step4,编写配置脚本,用于ansible调用,封装镜像

Step5,经测试后,生成可用images

 

6.5 监控,暂未实现(dashbroad + EFK + Prometheus...)

解决方法:

Dashbroad、Prometheus已部署,待探索使用

EFK待部署

 

7 基于Terraform实现虚拟机和容器的混合应用管理

 

posted @ 2018-04-03 09:14  Cslc-DaweiJ  阅读(466)  评论(0)    收藏  举报