1、nexus实现私有yum、maven仓库
2、kubeadm集群搭建
3、基于k8s部署运行nginx+tomcat实现动静分离

#==============================================================================

1 nexus实现私有yum、maven仓库

1.1 安装 nexus-3.21.2

#装jdk,jdk1.8

# cd /usr/local/src
#传包
#解包
# tar xf /usr/local/src/nexus-3.21.2-03-unix.tar.gz -C /usr/local
# ln -s /usr/local/nexus-3.21.2-03 /usr/local/nexus

#监听地址端口
# vi /usr/local/nexus/etc/nexus-default.properties
 application-port=8081
 application-host=0.0.0.0
#java启动参数,限制内存
# vi /usr/local/nexus/bin/nexus.vmoptions
 -Xms2703m
 -Xmx2703m
 
#启动服务并设置开机自启
# /usr/local/nexus/bin/nexus start
# echo '/usr/local/nexus/bin/nexus start' >> /etc/rc.d/rc.local
# [ -x /etc/rc.d/rc.local ] || chmod +x /etc/rc.d/rc.local

#浏览器访问:http://10.0.0.47:8081
#默认匿名用户登录
 admin用户登录,密码在文件
# cat /usr/local/sonatype-work/nexus3/admin.password 
 e23743d3-6040-4f43-b05a-5b57d16a3be6

#登录引导
1.设置新密码为admin
2.配置匿名访问,启用匿名访问(允许匿名用户进行下载)

1.2 搭建内部 yum 源

#配置数据存储路径
#Server administration and configuration->Repository->Blob Stores->Create blob store
            ->type: File
              name: nexus-data
              State: 不打开软限制
              Path: /data/nexus-data      #一般挂存储,目录无需手动创建

#创建yum仓库
#Server administration and configuration->Repository->Repositories->Create repository->yum(proxy)
            ->name: nexus-zabbix5.0            #name作用:1.区分不同仓库,2.客户端根据名称调用
              Online: 勾选,接收外部请求
              Proxy->Remote storage: http://mirrors.aliyun.com/zabbix/zabbix/5.0/rhel/7/x86_64
              Storage->Blob store->nexus-data

#Centos7客户机创建repo文件
# vi /etc/yum.repos.d/CentOS7.repo
 [nexus-zabbix5.0]
 name=nexus-zabbix5.0
 baseurl=http://10.0.0.47:8081/repository/nexus-zabbix5.0/
 gpgcheck=0
 
# yum list zabbix-agent
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Available Packages
zabbix-agent.x86_64                             5.0.35-1.el7                              nexus-zabbix5.0

1.3 搭建内部 maven 源

#创建maven仓库
#Server administration and configuration->Repository->Repositories->Create repository->maven2(proxy)
            ->name: nexus-maven2        #name作用:1.区分不同仓库,2.客户端根据名称调用
              Online: 勾选,接收外部请求
              Proxy->Remote storage: https://repo.maven.apache.org/maven2
              Storage->Blob store->nexus-data

2 kubeadm集群搭建

2.1 环境

haproxy1+keepalived1    #10.0.0.77
haproxy2+keepalived2    #10.0.0.17
master节点1        #10.0.0.7
master节点2        #10.0.0.27
master节点3        #10.0.0.37
master节点vip        #10.0.0.248
node节点1            #10.0.0.47
node节点2            #10.0.0.57
node节点3            #10.0.0.67
harbor         #10.0.0.77

2.2 前期准备

#各节点开启ipv4_forword、优化内核参数和资源限制、不使用swap、关闭selinux、关闭防火墙、时间同步

#安装haproxy+keepalived,vip为10.0.0.248:6443,haproxy调度master使用mode tcp

#安装harbor,拉取k8s镜像
#harbor中创建项目:k8s_base_images
#k8s官方镜像仓库为k8s.gcr.io,国内不通,在harbor主机到阿里云镜像仓库拉取镜像
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.17.2
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.17.2
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.17.2
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.17.2
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.5
#重打tag号
# docker images | awk  -F' +|/' '/aliyun/{print "docker tag",$1"/"$2"/"$3":"$4,"10.0.0.77/k8s_base_images/"$3":"$4}' | bash
#客户端登录(项目为公开时,下载无需登录,上传需登录)
# docker login --username=admin --password=harbor 10.0.0.77
#上传镜像至harbor
# docker images | awk '/k8s_base_images/{print "docker push "$1":"$2}' | bash

2.3 master 节点配置

#在haproxy1和2下线master2和master3,real-server指向master1,起完全部master后,haproxy再恢复指向全部

#各master节点和nodes节点安装docker-19.03.15

#各master节点和nodes节点新增k8s yum源
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#各master节点和nodes节点安装kubeadm和kubelet,kubeadm依赖安装kubectl
# yum -y install kubeadm-1.17.2-0 kubelet-1.17.2-0 kubectl-1.17.2-0

#各master节点和nodes节点启动kubelet并设置开机自启,启动后日志会报错:找不到/var/lib/kubelet/config.yaml
# systemctl enable --now kubelet

#各master节点和nodes节点设置kubeadm命令自动补全
# yum -y install bash-completion        #依赖软件包bash-completion
# kubeadm completion bash > /etc/profile.d/kubeadm_completion.sh
# source /etc/profile.d/kubeadm_completion.sh

#查看安装指定k8s版本所需镜像列表,初始化安装若仓库指向harbor,harbor无对应镜像时将无法初始化
# kubeadm config images list --kubernetes-version v1.17.2
W0608 12:01:40.387013   23073 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0608 12:01:40.387097   23073 validation.go:28] Cannot validate kubelet config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.2
k8s.gcr.io/kube-controller-manager:v1.17.2
k8s.gcr.io/kube-scheduler:v1.17.2
k8s.gcr.io/kube-proxy:v1.17.2
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5

#master节点初始化,只须在master1执行一次
#基于kubuadm命令传参初始化
# kubeadm init \
    --apiserver-advertise-address 10.0.0.7 \    #apiserver监听地址,默认0.0.0.0
    --apiserver-bind-port 6443 \            #apiserver监听端口,默认6443
    --control-plane-endpoint=10.0.0.248 \    #k8s控制平台的稳定IP地址,master-vip
    --ignore-preflight-errors all \            #初始化时会检查内核参数和swap,异常则报错甚至停止安装,初始化前忽略某些检查项
    --image-repository 10.0.0.77/k8s_base_images \    #镜像仓库地址,改harbor地址或阿里云镜像仓库,默认k8s.gcr.io
                                                    #阿里云镜像仓库:registry.cn-hangzhou.aliyuncs.com/google_containers
    --kubernetes-version v1.17.2 \        #安装k8s的版本,默认stable-1
    --pod-network-cidr 172.31.0.0/16 \    #pod网段消耗大    
    --service-cidr 192.168.0.0/20 \        #service网段消耗小
    --service-dns-domain testou.com        #service dns后缀,默认cluster.local,后期创建service的后缀,由coredns解析service的A记录

#初始化成功后步骤:
1)在完成初始化的master节点,拷备集群配置文件
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config

2)添加其余master节点
#添加前,在完成初始化的master节点生成证书key
# kubeadm init phase upload-certs --upload-certs
.....
6fd4dfb89179880fe2be629fd49b3d54a3fd1c01a0f8d2be886c738706b10925
#在其余master节点添加到集群
# kubeadm join 10.0.0.248:6443 --token unv8k6.a5qe2ikpgjvh37fl \
    --discovery-token-ca-cert-hash sha256:be23a4bc442e07aa2a021e0513dca0b0fa79972cf35fb5ca5417ffc7993f92f5 \
    --control-plane \            #指定为master节点
    --certificate-key 4475abbd9c0625d5cdd8be4f4bb9a6a4bf8af3c69ab3a8b19e7c067d74959442
#添加新的master节点后,在新的master节点,拷备集群配置文件
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
        
#查看集群节点状态
# kubectl get nodes        #状态为NotReady,必须装网络组件和降低kubernetes-cni版本后,状态才为Ready
 ......
 master3   NotReady   master   4m24s   v1.17.2
#降低kubernetes-cni版本
# rpm -e kubernetes-cni --nodeps
# yum -y install kubernetes-cni-0.7.5-0
#装网络组件flannel,只须在其中一个master节点安装
# vi kube-flannel.yml
......
---
kind: ConfigMap
......
  net-conf.json: |
    {
      "Network": "172.31.0.0/16",        #flannel需和pod在同一网段
      "Backend": {
        "Type": "vxlan"
      }
    }
......
# kubectl apply -f kube-flannel.yml        #创建时会到quay.io/coreos/flannel拉取镜像
#查看集群节点状态
# kubectl get nodes            #状态为Ready

#在haproxy1和2上线master2和master3

2.4 node 节点配置

#各master节点和nodes节点安装docker-19.03.15

#各master节点和nodes节点新增k8s yum源
# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#各master节点和nodes节点安装kubeadm和kubelet,kubeadm依赖安装kubectl
# yum -y install kubeadm-1.17.2-0 kubelet-1.17.2-0
# yum -y install kubeadm-1.17.2-0 kubelet-1.17.2-0 kubectl-1.17.2-0
#kubectl命令可管理k8s集群,安全考虑可rpm卸载
# rpm -e kubectl --nodeps
#降低kubernetes-cni版本
# rpm -e kubernetes-cni --nodeps
# yum -y install kubernetes-cni-0.7.5-0

#各master节点和nodes节点启动kubelet并设置开机自启,启动后日志会报错:找不到/var/lib/kubelet/config.yaml
# systemctl enable --now kubelet

#添加node节点到集群
#master会自动下发指令,node节点起完flannel容器后状态也为Ready
# kubeadm join 10.0.0.248:6443 --token unv8k6.a5qe2ikpgjvh37fl \
    --discovery-token-ca-cert-hash sha256:be23a4bc442e07aa2a021e0513dca0b0fa79972cf35fb5ca5417ffc7993f92f5

2.5 部署 dashboard 组件

#官方组件:v2.0.0-rc6,完全兼容k8s-1.17
         v1.10.1-rc6,完全兼容k8s-1.8-1.10

#部署dashboard-v2.0.0-rc6

#在harbor主机拉取dashboard和metrics-scraper镜像
docker pull kubernetesui/dashboard:v2.0.0-rc6
docker pull kubernetesui/metrics-scraper:v1.0.3
#重打tag号
# docker tag kubernetesui/dashboard:v2.0.0-rc6 10.0.0.77/k8s_base_images/dashboard:v2.0.0-rc6
# docker tag kubernetesui/metrics-scraper:v1.0.3 10.0.0.77/k8s_base_images/metrics-scraper:v1.0.3
#客户端登录(项目为公开时,下载无需登录,上传需登录)
# docker login --username=admin --password=harbor 10.0.0.77
#上传镜像至harbor
# docker push 10.0.0.77/k8s_base_images/dashboard:v2.0.0-rc6
# docker push 10.0.0.77/k8s_base_images/metrics-scraper:v1.0.3

#cd /usr/local/src
#传配置文件
# ls /usr/local/src
 admin-user.yml  dashboard-2.0.0-rc6.yml
#创建pod
# kubectl apply -f dashboard-2.0.0-rc6.yml
# kubectl apply -f admin-user.yml

#haproxy主机新增配置
# tail -n7 /etc/haproxy/haproxy.cfg 
listen k8s-dashboard
 bind 10.0.0.248:30002
 mode tcp
 balance roundrobin
 server 10.0.0.7 10.0.0.7:30002 weight 1 check inter 3s fall 3 rise 5
 server 10.0.0.27 10.0.0.27:30002 weight 1 check inter 3s fall 3 rise 5
 server 10.0.0.37 10.0.0.37:30002 weight 1 check inter 3s fall 3 rise 5
# systemctl reload haproxy

#浏览器访问:https://10.0.0.248:30002        #http不识别
#登录方式1.手动输入token
# kubectl get secret -A | grep admin    #找到secret名称
# kubectl describe secret admin-user-token-7s5wd -n kubernetes-dashboard
#复制粘贴token后登录
#登录方式2.Kubeconfig        #基于创建的kubeconfig文件登录dashboard
# cp /root/.kube/config /usr/local/src/kubeconfig
# sed -ri "/user:$/a\    token: $(kubectl describe secret $(kubectl get secret -A | awk '/admin/{print $2}') -n kubernetes-dashboard | awk 'END{print $2}')" /usr/local/src/kubeconfig
# sz /usr/local/src/kubeconfig
#选择kubeconfig文件后登录

3 基于k8s部署运行nginx+tomcat实现动静分离

3.1 镜像准备

#harbor中创建项目:nginx_tomcat_base_images

#harbor拉取镜像
# docker pull nginx:1.22.1
# docker pull centos:7.9.2009

#重打tag号
# docker tag nginx:1.22.1 10.0.0.77/nginx_tomcat_base_images/nginx:1.22.1

#客户端登录(项目为公开时,下载无需登录,上传需登录)
# docker login --username=admin --password=harbor 10.0.0.77

#上传nginx镜像至harbor
# docker push 10.0.0.77/nginx_tomcat_base_images/nginx:1.22.1

#=====================================================================
#制作jdk基础镜像
# ls /usr/local/src/jdk-dockerfile/
build-command.sh  Dockerfile  jdk-8u241-linux-x64.tar.gz  profile

#Dockerfile
# cat /usr/local/src/jdk-dockerfile/Dockerfile 
FROM centos:7.9.2009
LABEL maintainer "Marko.Ou <oxz@qq.com>"
ADD jdk-8u241-linux-x64.tar.gz /usr/local
RUN ln -s /usr/local/jdk1.8.0_241 /usr/local/jdk; \
    chown -R root.root /usr/local/jdk/
ADD profile /etc/profile
ENV JAVA_HOME /usr/local/jdk
ENV JRE_HOME $JAVA_HOME/jre
ENV CLASSPATH $JAVA_HOME/lib:$JRE_HOME/lib
ENV PATH $PATH:$JAVA_HOME/bin

#环境变量
# tail -n3 /usr/local/src/jdk-dockerfile/profile 
export JAVA_HOME=/usr/local/jdk
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin

#构建脚本
# cat /usr/local/src/jdk-dockerfile/build-command.sh 
#!/bin/bash
docker build -t centos-jdk:8u241 .

#构建jdk基础镜像
# bash /usr/local/src/jdk-dockerfile/build-command.sh

#===========================================================
#制作tomcat基础镜像
# ls /usr/local/src/tomcat-dockerfile/
apache-tomcat-8.5.85.tar.gz  build-command.sh  Dockerfile  linux39.tar.gz  run_tomcat.sh  server.xml  showhost.jsp

#Dockerfile
# cat /usr/local/src/tomcat-dockerfile/Dockerfile 
FROM centos-jdk:8u241
LABEL maintainer "Marko.Ou <oxz@qq.com>"
ADD apache-tomcat-8.5.85.tar.gz /usr/local
RUN ln -s /usr/local/apache-tomcat-8.5.85/ /usr/local/tomcat; \
    rm -f /usr/local/apache-tomcat-8.5.85.tar.gz
ADD server.xml /usr/local/tomcat/conf
RUN    useradd java; \
    chown -R java.java /usr/local/tomcat/; \
    mkdir -p /data/tomcat/webapps/ROOT
ADD linux39.tar.gz /data/tomcat/webapps
ADD run_tomcat.sh /usr/local/tomcat/bin
EXPOSE 8080 8005 8009 
CMD ["/usr/local/tomcat/bin/run_tomcat.sh"]

#构建脚本
# cat /usr/local/src/tomcat-dockerfile/build-command.sh 
#!/bin/bash
docker build -t tomcat_base:8.5.85 .
# chmod +x /usr/local/src/tomcat-dockerfile/run_tomcat.sh

#运行脚本
# cat /usr/local/src/tomcat-dockerfile/run_tomcat.sh 
#!/bin/bash
su - java -c "/usr/local/tomcat/bin/catalina.sh start"
tail -f /etc/hosts

#tomcat配置文件
# grep appBase= server.xml 
      <Host name="localhost"  appBase="/data/tomcat/webapps"

#构建tomcat基础镜像
# bash /usr/local/src/tomcat-dockerfile/build-command.sh

#重打tag号
# docker tag tomcat_base:8.5.85 10.0.0.77/nginx_tomcat_base_images/tomcat_base:8.5.85

#客户端登录(项目为公开时,下载无需登录,上传需登录)
# docker login --username=admin --password=harbor 10.0.0.77

#上传nginx镜像至harbor
# docker push 10.0.0.77/nginx_tomcat_base_images/tomcat_base:8.5.85

3.2 创建 nginx pod

#创建nginx-pod yml文件
# cd /usr/local/src
# vi nginx.yml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 1        #容器副本数
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: 10.0.0.77/nginx_tomcat_base_images/nginx:1.22.1
        ports:
        - containerPort: 80

---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service
  labels:
    app: nginx-service-label
  namespace: default
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    nodePort: 30004
  selector:
    app: nginx

#创建两个资源:deployment控制器、service
# kubectl apply -f nginx.yml

#查看service和pod名称
# kubectl get svc
# kubectl get pod -o wide

#进入nginx-pod
# kubectl exec -it nginx-deployment-559c796bd5-7tg9n bash
#修改nginx首页
# echo nginx-pod > /usr/share/nginx/html/index.html

#浏览器访问:http://10.0.0.7:30004

3.3 创建 tomcat pod

#创建tomcat-pod yml文件
# cd /usr/local/src
# vi tomcat.yml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: tomcat-deployment
  labels:
    app: tomcat
spec:
  replicas: 1        #容器副本数
  selector:
    matchLabels:
      app: tomcat
  template:
    metadata:
      labels:
        app: tomcat
    spec:
      containers:
      - name: tomcat
        image: 10.0.0.77/nginx_tomcat_base_images/tomcat_base:8.5.85
        ports:
        - containerPort: 8080

---
apiVersion: v1
kind: Service
metadata:
  name: tomcat-service
  labels:
    app: tomcat-service-label
  namespace: default
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 8080
    nodePort: 30005            #验证pod跑起来后注掉
  selector:
    app: tomcat

#创建两个资源:deployment控制器、service
# kubectl apply -f tomcat.yml

#查看service和pod名称
# kubectl get svc
# kubectl get pod -o wide

#浏览器访问:http://10.0.0.7:30005/linux39/app1/index.html

#注释nodePort
# vi tomcat.yml
......
    #nodePort: 30005
    
#重建pod
# kubectl delete -f tomcat.yml
# kubectl apply -f tomcat.yml

3.4 配置 nginx-pod 反代

#进入nginx-pod配置反代
# kubectl exec -it nginx-deployment-559c796bd5-7tg9n bash
# sed -ri '/#error_page/i\location /linux39 {\n  proxy_pass http://tomcat-service;\n}' /etc/nginx/conf.d/default.conf
# nginx -t && nginx -s reload

3.5 haproxy 配置及测试

#haproxy主机新增配置
# tail -n7 /etc/haproxy/haproxy.cfg 
listen nginx_tomcat
 bind 10.0.0.248:30004
 mode tcp
 balance roundrobin
 server 10.0.0.7 10.0.0.7:30004 weight 1 check inter 3s fall 3 rise 5
 server 10.0.0.27 10.0.0.27:30004 weight 1 check inter 3s fall 3 rise 5
 server 10.0.0.37 10.0.0.37:30004 weight 1 check inter 3s fall 3 rise 5
# systemctl reload haproxy

# kubectl describe svc tomcat-service
 ......
 Endpoints:                172.31.5.12:8080            #Endpoints为tomcat的地址和端口

#客户端访问
# curl http://10.0.0.248:30004
 nginx-pod
# curl http://10.0.0.248:30004/linux39/app1/index.html
 volume page for app1
posted on 2023-06-13 15:31  不期而至  阅读(37)  评论(0)    收藏  举报