K8S 搭建 Pinpoint (一、配置 zookeeper 集群)
环境准备
3台机器,操作系统为 CentOS Linux release 7.8.2003

下面无特殊说明,都是在 master 机器上操作
创建目录和专用的命名空间
mkdir -p /data/yaml/pinpoint && cd /data/yaml/pinpoint
kubectl create ns pinpoint
部署 zookeepers
mkdir -p /data/yaml/pinpoint/zookeepers && cd /data/yaml/pinpoint/zookeepers
cat zoo1.yaml
apiVersion: v1
kind: Service
metadata:
namespace: pinpoint
labels:
app: zoo1
name: zoo1
spec:
ports:
- name: httpa
port: 2181
targetPort: 2181
- name: httpb
port: 2888
targetPort: 2888
- name: httpc
port: 3888
targetPort: 3888
selector:
app: zoo1
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: pinpoint
name: zoo1
labels:
app: zoo1
spec:
replicas: 1
minReadySeconds: 120
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: zoo1
template:
metadata:
labels:
app: zoo1
spec:
terminationGracePeriodSeconds: 60
hostname: zoo1
containers:
- name: zoo1
image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
resources:
requests:
cpu: 100m
memory: 204Mi
limits:
cpu: 2000m
memory: 2048Mi
ports:
- containerPort: 2181
name: httpa
- containerPort: 2888
name: httpb
- containerPort: 3888
name: httpc
livenessProbe:
tcpSocket:
port: 2181
initialDelaySeconds: 60
periodSeconds: 180
env:
- name: ZOO_MY_ID
value: "1"
- name: ZOO_SERVERS
value: "server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888"
cat zoo2.yaml
apiVersion: v1
kind: Service
metadata:
namespace: pinpoint
labels:
app: zoo2
name: zoo2
spec:
ports:
- name: httpa
port: 2181
targetPort: 2181
- name: httpb
port: 2888
targetPort: 2888
- name: httpc
port: 3888
targetPort: 3888
selector:
app: zoo2
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: pinpoint
name: zoo2
labels:
app: zoo2
spec:
replicas: 1
minReadySeconds: 120
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: zoo2
template:
metadata:
labels:
app: zoo2
spec:
terminationGracePeriodSeconds: 60
hostname: zoo2
containers:
- name: zoo2
image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
resources:
requests:
cpu: 100m
memory: 204Mi
limits:
cpu: 2000m
memory: 2048Mi
ports:
- containerPort: 2181
name: httpa
- containerPort: 2888
name: httpb
- containerPort: 3888
name: httpc
livenessProbe:
tcpSocket:
port: 2181
initialDelaySeconds: 60
periodSeconds: 180
env:
- name: ZOO_MY_ID
value: "2"
- name: ZOO_SERVERS
value: "server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888"
cat zoo3.yaml
apiVersion: v1
kind: Service
metadata:
namespace: pinpoint
labels:
app: zoo3
name: zoo3
spec:
ports:
- name: httpa
port: 2181
targetPort: 2181
- name: httpb
port: 2888
targetPort: 2888
- name: httpc
port: 3888
targetPort: 3888
selector:
app: zoo3
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: pinpoint
name: zoo3
labels:
app: zoo3
spec:
replicas: 1
minReadySeconds: 120
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: zoo3
template:
metadata:
labels:
app: zoo3
spec:
terminationGracePeriodSeconds: 60
hostname: zoo3
containers:
- name: zoo3
image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
resources:
requests:
cpu: 100m
memory: 204Mi
limits:
cpu: 2000m
memory: 2048Mi
ports:
- containerPort: 2181
name: httpa
- containerPort: 2888
name: httpb
- containerPort: 3888
name: httpc
livenessProbe:
tcpSocket:
port: 2181
initialDelaySeconds: 60
periodSeconds: 180
env:
- name: ZOO_MY_ID
value: "3"
- name: ZOO_SERVERS
value: "server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888"
kubectl apply zoo1.yaml
kubectl apply zoo2.yaml
kubectl apply zoo3.yaml
kubectl get pod -n pinpoint
zoo1-749d8cc498-54tff 1/1 Running 2 6d22h
zoo2-695c9f8755-zftqf 1/1 Running 2 6d22h
zoo3-6bb94d5568-tlnlh 1/1 Running 2 6d22h
kubectl get svc -n pinpoint
zoo1 ClusterIP 10.111.190.15 <none> 2181/TCP,2888/TCP,3888/TCP 6d22h
zoo2 ClusterIP 10.98.15.48 <none> 2181/TCP,2888/TCP,3888/TCP 6d22h
zoo3 ClusterIP 10.100.184.127 <none> 2181/TCP,2888/TCP,3888/TCP 6d22h
# 测试 zookeeper 集群
kubectl -n pinpoint exec -it zoo1-749d8cc498-54tff -- zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower
kubectl -n pinpoint exec -it zoo2-695c9f8755-zftqf -- zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: follower
kubectl -n pinpoint exec -it zoo3-6bb94d5568-tlnlh -- zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /conf/zoo.cfg
Mode: leader

浙公网安备 33010602011771号