究极详细综合实验:LANMP+ELK+MHA+Jumpserver+Zabbix+Redis+DNS
项目目标与规划
协作
- 星星同学
- 1149950698@qq.com
 
- 大熊同学
- lxj647500@163.com
 
目标
- 使用 LVS+ keeplive 实现负载均衡高可用
- 使用 nginx/hapoxy 实现反向代理
- 使用 nginx 和 apache 实现动静分离
- 使用 MHA 搭建 mysql 集群
- 使用 ceph 集群的 fs 实现 web 网站内容一致及网站内容一致
- 搭建 discuz 论坛
- 搭建 DNS 解析网站域名
- 搭建 ELK+kafka/redis 集群收集网站日志
- 使用 zabbix 监控各个服务器硬件指标及服务端口
- 备份 mysql 数据库到 ceph 集群
- 使用 ansble 批量部署 nginx、apache、php、zabbix-agent、logstash , nginx 和 apache, php, mysql 必须为源码包安装。
- 搭建 jumpserver 服务器管理所有服务器,所有服务器只能通过堡垒机登陆
- 编写 mysql 数据备份和还原脚本
规划拓扑图

项目拓扑表
| 主机名 | 内网 ip | 公网 ip | 服务部署 | 
|---|---|---|---|
| keep 1 | 192.168.186.179 | 192.168.45.88 | Keepalived | 
| keep 2 | 192.168.186.180 | 192.168.45.88 | Keepalived | 
| nginx | 192.168.186.181 | 无 | nginx+php | 
| apache | 192.168.186.182 | 无 | http | 
| DB 1 | 192.168.186.183 | 无 | LAMP | 
| DB 2 | 192.168.186.184 | 无 | LNMP | 
| DB 3 | 192.168.186.185 | 无 | LNMP | 
| ceph 1 | 192.168.186.186 | 无 | ceph-MGR | 
| ceph 2 | 192.168.186.187 | 无 | ceph | 
| ceph 3 | 192.168.186.188 | 无 | ceph | 
| Zabbix | 192.168.186.189 | 无 | Zabbix | 
| Jumpserver | 192.168.186.200 | 无 | Jumpserver | 
| DELK 1 | 192.168.186.190 | 无 | dns 主 + ELK+Kafka+Redis | 
| DELK 2 | 192.168.186.191 | 无 | dns 从+ ELK+Kafka+Redis | 
| DELK 3 | 192.168.186.192 | 无 | dns 从+ ELK+Kafka+Redis | 
项目规划
- 根据拓扑图固定 IP,固定主机名,完成初始化。
- 准备 Ansible 剧本以及对应服务的部署脚本
- 从底层(数据库)开始向上搭建
- 完成服务搭建以后进行运维平台的搭建
基础初始化
硬件准备
Ceph 增加硬盘

MySQL 集群服务器 2 G 内存+4 核 CPU

网络联通性
- 确定网络的联通性
  
基本初始化
- 使用初始化脚本进行初始化
- 同时完成主机名跟 IP 的固定
  
- 全量主机本地解析文件准备
vim /etc/hosts
192.168.186.180 keep
192.168.186.181 nginx
192.168.186.182 apache
192.168.186.183 DB1
192.168.186.184 DB2
192.168.186.185 DB3
192.168.186.186 CELK1
192.168.186.187 CELK2
192.168.186.188 CELK3
192.168.186.189 zabbix
- Jumpserver 开启新网卡,桥接模式并固定 IP
  
cd /etc/sysconfig/network-scripts && cp ifcfg-ens33 ifcfg-ens38
vim ifcfg-ens38
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="ens38"
DEVICE="ens38"
ONBOOT="yes"
IPADDR="192.168.186.200"
PREFIX="24"
#GATEWAY="192.168.45.2"
DNS1="114.114.114.114"
IPV6_PRIVACY="no"
systemctl restart network
初始化快照
- 所有机器进行初始化快照
  
Ansible 部署服务
安装 ansible
- 安装
yum install -y ansible 
- 调整配置文件
vim /etc/ansible/hosts
[all]
192.168.186.181
192.168.186.182
192.168.186.183
192.168.186.184
192.168.186.185
192.168.186.186
192.168.186.187
192.168.186.188
192.168.186.189
192.168.186.190
192.168.186.191
192.168.186.192
192.168.186.200
[nginx]
192.168.186.181
192.168.186.184
192.168.186.185
[apache]
192.168.186.182
192.168.186.183
[DB]
192.168.186.183
192.168.186.184
192.168.186.185
[ceph]
192.168.186.186
192.168.186.187
192.168.186.188
[zabbix]
192.168.186.189
[jumpserver]
192.168.186.200
[ELK]
192.168.186.190
192.168.186.191
192.168.186.192
- 脚本免密推送
sh SSH-COPY.sh
- 免密脚本
#!/bin/bash
#By:lihang 2023.3.6
#自动推送秘钥到当前网段的所有在线主机
#目前存在的问题
yum install -y expect yum-utils
reset=$(tput sgr0)
red() {
    redb=$(tput setab 1)
    echo -e " ${redb} $1 ${reset}"
    echo "==========================================================="
}
green() {
    greenb=$(tput setab 2)
    echo -e " ${greenb} $1 ${reset}"
    echo "==========================================================="
}
blue() {
    blueb=$(tput setab 4)
    echo -e " ${blueb} $1 ${reset}"
    echo "==========================================================="
}
change_netyum() {
    base_yum='https://mirrors.aliyun.com/repo/Centos-7.repo'
    epel_yum='http://mirrors.aliyun.com/repo/epel-7.repo'
    if wget $base_yum &>/dev/null 2>&1; then
        green "yum base源已经更换"
    else
        red "yum base源更换失败,请检查,退出脚本"
        return 1
    fi
    if wget -O /etc/yum.repos.d/epel.repo "$epel_yum" &>/dev/null 2>&1; then
        green "yum epel源已经更换"
    else
        red "yum epel源更换失败,请检查,退出脚本"
        return 1
    fi
}
test_net() {
    if ! ping -c2 -i1 "$1" >/dev/null; then
        red "ping  $1 失败,1秒后退出脚本!"
        sleep 1
        exit 1
    else
        green "ping $1 成功,网络正常!"
        wait
    fi
}
check_expect() {
    if ! rpm -q expect &>/dev/null; then
        if yum install -y expect; then
            green "expect安装成功"
        else
            yellow "expect安装失败"
        fi
    fi
}
check_ssh_key() {
    if [ ! -f ~/.ssh/id_rsa ]; then
        ssh-keygen -P "" -f ~/.ssh/id_rsa >/dev/null 2>&1
    fi
}
push_ssh_key() {
    local ip=$1
    /usr/bin/expect <<-EOF >/dev/null 2>&1
        set timeout 5 
        spawn ssh-copy-id root@$ip
        expect {
            "yes/no" {
                send "yes\n"
                exp_continue
            }
            "password:" {
                send "$2\n"
                exp_continue
            }
            eof
        }
EOF
    if [ $? -eq 0 ]; then
        green "SSH key pushed successfully to $ip" &
    fi
}
stop_yumepel() {
    if yum install -y yum-untils &>"$log_file"; then
        green "临时关闭epel源"
        yum-config-manager --disable epel
    else
        red "yum安装测试失败请检查,开始配置yum源"
        change_netyum
        yum-config-manager --disable epel
    fi
}
start_yumepel() {
    if yum-config-manager --enable epel; then
        blue "epel源重新开启"
    fi
}
# ---------------------------------------------------------------------------- #
#                                     测试网络                                     #
# ---------------------------------------------------------------------------- #
test_net qq.com
stop_yumepel
# ---------------------------------------------------------------------------- #
#                                     检查环境                                     #
# ---------------------------------------------------------------------------- #
check_expect
# ---------------------------------------------------------------------------- #
#                                     检查秘钥                                     #
# ---------------------------------------------------------------------------- #
Check_ssh_key
# ---------------------------------------------------------------------------- #
#                                     推送秘钥                                     #
# ---------------------------------------------------------------------------- #
Subnet=$(hostname -I | cut -d '.' -f 1-3)
ip_list=$(nmap -sP "${subnet}".0/24 | awk '/for/{print $NF}' | tr -d '()')
password="123456"
# ip_list=("192.168.186.180" "192.168.186.181" "192.168.186.182" "192.168.186.200")
# password="123"
for ip in "${ip_list[@]}"; do
    push_ssh_key "$ip" $password
done
start_yumepel
剧本部署 Nginx
- 剧本展示
- name: wget nginx
  get_url:
    url: http://nginx.org/download/nginx-1.22.0.tar.gz
    dest: /root/
    force: yes
- name: tar
  unarchive:
    src: /root/nginx-1.22.0.tar.gz
    dest: /usr/local/src
    remote_src: yes
- name: copy nginx.h
  template:
    src:  config1.j2
    dest: /usr/local/src/nginx-1.22.0/src/core/nginx.h
- name: copy module.c
  template:
    src: config2.j2
    dest: /usr/local/src/nginx-1.22.0/src/http/ngx_http_header_filter_module.c
- name: copy response.c
  template:
    src: config3.j2
    dest: /usr/local/src/nginx-1.22.0/src/http/ngx_http_special_response.c
- name: yum dependence
  yum: name={{item}} state=latest
  with_items:
      - gcc 
      - gcc-c++ 
      - autoconf 
      - automake 
      - zlib 
      - zlib-devel 
      - openssl 
      - openssl-devel  
      - pcre 
      - pcre-devel
- name: configure
  shell: "cd /usr/local/src/nginx-1.22.0/ && ./configure --prefix=/usr/local/nginx --with-http_dav_module --with-http_stub_status_module --with-http_addition_module --with-http_sub_module --with-http_flv_module --with-http_mp4_module --with-pcre --with-http_ssl_module --with-http_gzip_static_module && make && make install"
- name: link
  file:
    src: /usr/local/nginx/sbin/nginx
    dest: /usr/local/sbin/nginx
    state: link
- name: start
  shell: nginx
- 入口文件
- name: nginx build
  remote_user: root
  hosts: nginx
  roles:
    - nginx
  tags: nginx
- 部署结果
- 说明:最初部署的时候使用的网段为 1 网段,后续进行了调整。该截图为 1 网段的截图
  
 
- 说明:最初部署的时候使用的网段为 1 网段,后续进行了调整。该截图为 1 网段的截图
测试 nginx
curl -I 192.168.186.181

剧本部署 MySQL
Ansible 剧本准备
- 编写好 MySQL 源码安装好的剧本,仅展示核心部分
---
# ---------------------------------------------------------------------------- #
#                           install for mysql                                  #
# ---------------------------------------------------------------------------- #
- name: Gather Node Info
  gather_facts:
    filter: ansible_processor_vcpus,memory_mb
  delegate_to: "{{delegate_host}}"
- name: Determine Memory Size
  fail:
    msg: "Not enough memory available,Exiting!"
  when: ansible_facts['memory_mb']['real'] < 2048
- name: Determine core number
  fail:
    msg: "Not enough cpu core available,Exiting!"
  when: ansible_processor_vcpus|int < 2
# 无法判断先注释
# - name: Find Mysql package
#   find:
#     path: "{{mysql_package_path}}"
#     patterns: "{{mysql_rpmpackage_name}}"
#   register: mysql_local_pack_exist
# - name: Download Mysql package
#   get_url:
#     url: "{{mysql_package_url}}"
#     dest: /root/"{{mysql_rpmpackage_name}}"
#   when: not mysql_local_pack_exist
- name: Rpm mysql
  shell: rpm -ivh "{{mysql_rpmpackage_name}}"
- name: Tar Mysqlrpm
  unarchive:
    src: "/root/rpmbuild/SOURCES/{{mysql_tarpackage_name}}"
    dest: "/usr/local/src/"
    remote_src: yes
- name: Tar boost
  unarchive:
    src: "/root/rpmbuild/SOURCES/boost_1_59_0.tar.bz2"
    dest: "/usr/local/src/"
    remote_src: yes
- name: Dependent package install
  yum:
    name:
      - gcc
      - ncurses
      - openssl-devel
      - bison
      - make
      - gcc-c++
      - cmake
      - automake
      - zlib
      - openssl
      - openssl-devel
      - pcre
      - ncurses-devel
    state: present
- name: Groupadd mysql
  group:
    name: mysql
- name: Useradd mysql
  user:
    name: mysql
    group: mysql
    create_home: no
- name: Create mysql data directory
  file:
    path: "{{mysql_datadir}}"
    state: directory
    owner: mysql
    group: mysql
- name: Create mysql log directory
  file:
    path: "{{mysql_logdir}}"
    state: directory
    owner: mysql
    group: mysql
- name: Cmake and make
  shell: cd /usr/local/src/ && \
    cmake mysql-5.7.38/ \
    -DWITH_BOOST={{ boost_dir }} \
    -DCMAKE_INSTALL_PREFIX={{ mysql_prefix }} \
    -DSYSCONFDIR={{ sysconf_dir }} \
    -DMYSQL_DATADIR={{ mysql_datadir }} \
    -DINSTALL_MANDIR={{ install_mandir }} \
    -DMYSQL_TCP_PORT={{ mysql_tcp_port }} \
    -DMYSQL_UNIX_ADDR={{ mysql_unix_addr }} \
    -DDEFAULT_CHARSET={{  mysql_charset  }} \
    -DDEFAULT_CLLATION=utf8_general_ci \
    -DEXTRA_CHARSETS=all \
    -DWITH_SSL=system \
    -DWITH_READLINE=1 \
    -DWITH_INNOBASE_STORAGE_ENGINE=1 \
    -DWITH_EMBEDDED_SERVER=1 \
    -DENABLED_LOCAL_INFILE=1 && \
    make -j{{  ansible_processor_vcpus  }}
  register: make_result
- name: Make install
  shell: cd /usr/local/src/ && \
    make install
  register: install_result
- name: Check make
  fail:
    msg: "make failed to run"
  when: make_result.rc != 0
- name: Check Make result
  debug:
    msg: "Mysql Make succeeded."
  when: make_result.rc == 0
- name: Copy mysql.server
  copy:
    src: "{{  mysql_prefix  }}/support-files/mysql.server"
    dest: /etc/init.d/mysqld
    remote_src: yes
    mode: 0755
- name: Add mysqld to chkconfig
  Command: chkconfig --add mysqld
- Name: Set mysqld to start on boot
  command: chkconfig mysqld on
- name: Check mysqld status in chkconfig
  command: chkconfig --list mysqld
  register: chkconfig_result
- name: Do something if the previous command was successful
  debug:
    msg: "The command was successful"
  when: chkconfig_result.rc == 0
- name: Do something else if the previous command failed
  debug:
    msg: "The command failed"
  when: chkconfig_result.rc != 0
# --------------------------------------------------------------  #
#                       软链接报错更换使用shell做软链接           #
# -------------------------------------------------------------- #
# - name: Soft link all files under mysql bin directory
#   file:
#     src: "{{ item }}"
#     dest: /usr/local/bin
#     state: link
#     mode: 0755
#   with_fileglob:
#     - /usr/local/mysql/bin/*
#   tags: aa
- name: Soft link all files under mysql bin directory
  shell: 'ln -sf "{{  mysql_prefix  }}"/bin/* /usr/local/bin/'
- name: render my.cnf.j2 to /etc/my.cnf
  template:
    src: my.cnf.j2
    dest: /etc/my.cnf
    owner: root
    group: root
    mode: "0644"
- name: Change ownership
  file:
    path: /usr/local/mysql
    owner: mysql
    group: mysql
    recurse: yes
- name: Initialize mysql
  command: "{{ mysql_prefix }}/bin/mysqld --initialize-insecure --user=mysql --basedir={{ mysql_prefix }} --datadir={{ mysql_datadir }}"
  register: initialize_result
# 这里判断不准确注释!
# - name: Do something if the initialize msyql successful
#   debug:
#     msg: "The mysql initialized successful"
#   when: initialize_result == 0
# - name: Do something if the initialize mysql failed
#   fail:
#     msg: "The mysql initialize failed"
#   when: initialize_result != 0
- name: Start mysqld
  service:
    name: mysql
    enabled: yes
    state: started
- name: Changing the mysql password
  command: mysqladmin -uroot password "{{  mysql_password  }}"
本地安装包准备
- 剧本需要在本地提前准备好安装包,减少下载的等待时间,在所有节点准备好 mysql 的 5.7.38 版本的安装包。
  
运行剧本部署 MySQL
- 由于这里采用的是虚拟机环境,编译消耗资源较大,没有同时安装多台
- 进行逐一节点的安装,在生产环境中,剧本测试没有问题的情况下可以批量进行安装。
ansible-playbook LNMP_CentOS7/site.yml -l 192.168.1.184 -f8 -t mysql

剧本部署 Apache
ansible apache -m yum -a "name=httpd"

ansible apache -m service -a "name=httpd enabled=yes state=started"




剧本部署 PHP
- 剧本展示
- name: yum php dependence
  yum: name={{item}}
  with_items:
    - autoconf  
    - freetype 
    - gd 
    - libpng 
    - libpng-devel 
    - libjpeg 
    - libxml2 
    - libxml2-devel 
    - zlib 
    - curl 
    - curl-devel  
    - bzip2-devel  
    - freetype-devel 
    - libjpeg-devel 
    - sqlite-devel 
    - libwebp 
- name: get oniguru
  get_url: 
    url: https://rpms.remirepo.net/enterprise/7/remi/x86_64/oniguruma5php-6.9.7.1-1.el7.remi.x86_64.rpm
    dest: /root
    validate_certs: false
- name: get oniguru-devel
  get_url:
    url: https://rpms.remirepo.net/enterprise/7/remi/x86_64/oniguruma5php-devel-6.9.7.1-1.el7.remi.x86_64.rpm
    dest: /root
    validate_certs: false
- name: yum oniguru
  shell: yum install -y oniguruma5php-*
- name: tar php
  unarchive:
    src: /root/php-8.1.9.tar.gz
    dest: /usr/local/src
- name: install php
  shell: "cd /usr/local/src/php-8.1.9/ &&  ./configure --prefix=/usr/local/php8.1.9 --enable-mysqlnd  --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd --with-openssl --enable-fpm --enable-sockets --with-gd --with-zlib --enable-sysvshm --enable-mbstring --with-freetype-dir --with-libxml-dir=/usr --enable-xml --with-mhash --with-mcrypt=/usr/local/libmcrypt --with-config-file-path=/etc --with-config-file-scan-dir=/usr/local/php8.1.9/etc/ --with-bz2 --with-iconv-dir=/usr/local/php8.1.9 && make && make install"
- name: fpm
  copy:
    src: /usr/local/php8.1.9/etc/php-fpm.conf.default
    dest: /usr/local/php8.1.9/etc/php-fpm.conf
    remote_src: yes
- name: ini
  copy:
    src: /usr/local/src/php-8.1.9/php.ini-production
    dest: /usr/local/php8.1.9/etc/php.ini
    remote_src: yes
- name: www.conf
  copy:
    src: /usr/local/php8.1.9/etc/php-fpm.d/www.conf.default
    dest: /usr/local/php8.1.9/etc/php-fpm.d/www.conf
    remote_src: yes
- name: init.d
  shell: "cp /usr/local/src/php-8.1.9/sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm"
- name: enable
  shell: "chmod +x /etc/init.d/php-fpm && chkconfig --add php-fpm && chkconfig php-fpm on &&  /etc/init.d/php-fpm start"
- name: change nginx.conf
  copy:
    src: /usr/local/nginx/conf/nginx.conf
    dest: /usr/local/nginx/conf/nginx.conf
    remote_src: no
    force: yes
- name: reload nginx
  shell: nginx -s reload
- name: create index.php
  template:
    src: config.j2
    dest: /usr/local/nginx/html/index.php
  tags:
    - bn
安装 redis
ansible ELK -m yum -a "name=epel-release"
ansible ELK -m yum -a "name=redis"
Shell 脚本部署 filebeat
- 脚本展示
#!/bin/bash
# ------------------------------ 部署filebeat ------------------------------ #
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-8.4.0-linux-x86_64.tar.gz
Tar -zxvf filebeat-8.4.0-linux-x 86_64. Tar. Gz -C /usr/local/
Cd /usr/local/filebeat-8.4.0-linux-x 86_64/ || exit
echo "配置文件修改完成"
cp /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml.bak
sed -i "25s/.*/  id: $(hostname)/" /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
sed -i "28s/false/true/" /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
sed -i '32s#.*#     - /var/log/message/*#' /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
sed -i '139s/^/#/' /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
sed -i '141s/^/#/' /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
sed -i '165i\
#----------------------------- KAFKA output --------------------------------\n\
output.kafka:\n\
  enabled: true\n\
  hosts: ["192.168.186.190:9092"]\n\
  topic: system_log' /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
if "./filebeat -e -c filebeat.yml &"; then
  echo "Start successful"
else
  echo "Start failed,exit"
  return 1
fi
echo "设置开机自启"
echo " cd /usr/local/filebeat-8.4.0-linux-x86_64/ && ./filebeat -e -c filebeat.yml & " >>/etc/rc.local
chmod +x /etc/rc.local
Jumpserver 搭建
服务部署
软件包准备
上传以下软件包
- Jumpserver-master. Zip
- Koko-master-6 d 4 e 69 b-linux-amd 64. Tar. Gz
- Python-3.6.8. Tgz
- Luna. Tar. Gz
- 依赖包准备
yum install -y gcc zlib-devel bzip2-devel openssl-devel ncurses-devel sqlite-devel readline-devel tk-devel gdbm-devel db4-devel libpcap-devel xz-devel libffi-devel
Python 环境搭建
tar xf Python-3.6.8.tgz -C /usr/local/src && cd /usr/local/src/Python-3.6.8/
./configure --prefix=/usr/local/python
make -j$(nproc) && make install 
ln -s /usr/local/python/bin/* /usr/local/bin/
python3 -V && pip3 -V

进入 python 的虚拟环境
python3.6 -m venv /opt/py3 && source /opt/py3/bin/activate
echo "source /opt/py3/bin/activate" >> /root/.bashrc

安装 Jumpserver
- 解压并安装相应的依赖包
yum install -y unzip
unzip jumpserver-master.zip -d /opt/ && cd /opt/ && mv jumpserver-master jumpserver
cd /opt/jumpserver/requirements/
yum -y install $(cat rpm_requirements.txt)

安装 python 库依赖
- 配置清华 pip 3 源
mkdir ~/.pip
cat<<EOF>~/.pip/pip.conf
[global]
index-url = https://pypi.tuna.tsinghua.edu.cn/simple
trusted-host = pypi.tuna.tsinghua.edu.cn
EOF
- 先安装部分软件, 再安装其它依赖
pip install --upgrade pip
pip3 install urllib3  pyasn1 six cffi pytest-runner requests jms-storage elasticsearch
pip3 install -r requirements.txt

- 报错调整
  
- 调整下载目录文件
vim requirements.txt
python-gssapi>=0.6.4
python-keycloak-client==0.1.3
requests>=2.22.0
jms-storage>=0.0.23
- 安装完成
  
安装 Redis
yum -y install redis
systemctl start redis
systemctl enable redis

部署 mysql
- 安装 mariadb
yum -y install mariadb mariadb-devel mariadb-server
systemctl start mariadb
systemctl enable mariadb
- 进入 mariadb
mysql
- Mariadb 授权
create database jumpserver default charset 'utf8';
grant all on jumpserver.* to 'jumpserver'@'127.0.0.1' identified by 'jumpserver';
flush privileges;
-   grant all: 授予用户所有的权限
-   on jumpserver.*: 授予权限的目标是所有的表格('_')在数据库 'jumpserver' 中
-   to 'jumpserver'@'127.0.0.1': 目标用户为 'jumpserver',该用户只能从本地IP地址 '127.0.0.1' 访问(因为这不是一个万能用户)
-   identified by 'jumpserver': 用户 'jumpserver' 是使用密码 'jumpserver' 进行身份验证的。

调整 Jumpserver 的配置文件
- 生成秘钥
  
- 调整配置文件
cd /opt/jumpserver/ && cp config_example.yml config.yml
vim config.yml
# SECURITY WARNING: keep the secret key used in production secret!
# 加密秘钥 生产环境中请修改为随机字符串,请勿外泄, 可使用命令生成 
# $ cat /dev/urandom | tr -dc A-Za-z0-9 | head -c 49;echo
SECRET_KEY: cUBbafUeVjpsO9txGxKbYQhagezAzYaPfVNGQnI2AGYZIaNFL
# SECURITY WARNING: keep the bootstrap token used in production secret!
# 预共享Token coco和guacamole用来注册服务账号,不在使用原来的注册接受机制
BOOTSTRAP_TOKEN: h8X7RRmkczrV3Dts
# Development env open this, when error occur display the full process track, Production disable it
# DEBUG 模式 开启DEBUG后遇到错误时可以看到更多日志
# DEBUG: true
# DEBUG, INFO, WARNING, ERROR, CRITICAL can set. See https://docs.djangoproject.com/en/1.10/topics/logging/
# 日志级别
# LOG_LEVEL: DEBUG
# LOG_DIR: 
# Session expiration setting, Default 24 hour, Also set expired on on browser close
# 浏览器Session过期时间,默认24小时, 也可以设置浏览器关闭则过期
# SESSION_COOKIE_AGE: 86400
# SESSION_EXPIRE_AT_BROWSER_CLOSE: false
# Database setting, Support sqlite3, mysql, postgres ....
# 数据库设置
# See https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# SQLite setting:
# 使用单文件sqlite数据库
# DB_ENGINE: sqlite3
# DB_NAME: 
# MySQL or postgres setting like:
# 使用Mysql作为数据库
DB_ENGINE: mysql
DB_HOST: 127.0.0.1
DB_PORT: 3306
DB_USER: jumpserver
DB_PASSWORD: jumpserver
DB_NAME: jumpserver
# When Django start it will bind this host and port
# ./manage.py runserver 127.0.0.1:8080
# 运行时绑定端口
HTTP_BIND_HOST: 0.0.0.0
HTTP_LISTEN_PORT: 8080
# Use Redis as broker for celery and web socket
# Redis配置
REDIS_HOST: 127.0.0.1
REDIS_PORT: 6379
生成数据库相关数据
cd /opt/jumpserver/utils/ && sh make_migrations.sh

启动 Jumpserver
- 运行 Jumpserver
cd /opt/jumpserver/
./jms start all -d
- 编写系统服务
vim /usr/lib/systemd/system/jms.service
[Unit]
Description=jms
After=network.target mariadb.service redis.service docker.service
Wants=mariadb.service redis.service docker.service
[Service]
Type=forking
Environment="PATH=/opt/py3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin"
ExecStart=/opt/jumpserver/jms start all -d
ExecRestart=/opt/jumpserver/jms restart all -d
ExecStop=/opt/jumpserver/jms stop
[Install]
WantedBy=multi-user.target
- 重启服务
systemctl daemon-reload
systemctl restart jms
- 登录查看
  
安装 Web Terminal- koko 组件
安装
Tar -zxvf koko-master-6 d 4 e 69 b-linux-amd 64. Tar. Gz -C /opt/
chown -R root:root /opt/kokodir/
cd /opt/kokodir/ && cp config_example.yml config.yml
vim config.yml

启动&自启
- 启动
cd /opt/kokodir/ && ./koko &
netstat -antup | grep 2222
- 自启
echo " cd /opt/kokodir && ./koko & " >> /etc/rc.local
chmod +x /etc/rc.local
- 测试 SSH
ssh admin@192.168.1.189 -p 2222


部署 luna
tar -zxvf luna.tar.gz -C /opt/
chown -R root:root /opt/luna/
部署 nginx
yum install -y nginx && vim /etc/nginx/conf.d/jumpserver.conf

systemctl start nginx
systemctl enable nginx
访问测试

使用 Jumpserver
- 
系统设置 
  
- 
创建用户和组 
  
  
- 
配置资产 
  
  
Zabbix 搭建
源码安装软件包获取
- 获取 zabbix 安装包
wget https://cdn.zabbix.com/zabbix/sources/stable/6.2/zabbix-6.2.1.tar.gz
tar -zxvf zabbix-6.2.1.tar.gz -C /usr/local/src/ && cd /usr/local/src/zabbix-6.2.1/
- 依赖包准备
yum -y install net-snmp net-snmp-devel curl-devel java-1.8.0-openjdk java-1.8.0-openjdk-devel  OpenIPMI-devel  libssh2-devel libevent libevent-devel mysql-devel libldap2-dev openldap-devel libxml2 libxml2-devel
编译安装
- 编译前准备
useradd -s /sbin/nologin zabbix
- 预编译&编译安装
./configure --prefix=/usr/local/zabbix --enable-server --enable-agent --enable-java --with-mysql --with-net-snmp --with-libcurl --with-libxml2 --with-openipmi --enable-ipv6 --with-ldap

- 编译安装
make install
echo $?

- 添加 path 变量
ln -s /usr/local/nginx/sbin/* /usr/local/bin/
ln -s /usr/local/zabbix/sbin/* /usr/local/sbin/
初始化 Zabbix
- 调整配置文件
vim /usr/local/zabbix/etc/zabbix_server.conf
DBHost=192.168.186.190
DBName=zabbix
DBUser=zabbix
DBPassword=123456
DBPort=3306
AllowUnsupportedDBVersions=1
- 赋权
chown -R zabbix:zabbix /usr/local/zabbix/
- 添加 zabbix 启动脚本
cp /usr/local/src/zabbix-6.2.1/misc/init.d/fedora/core/* /etc/init.d/
vim /etc/init.d/zabbix_server
 BASEDIR=/usr/local/zabbix
- 设置 zabbix_agentd
vim /etc/init.d/zabbix_agentd
 BASEDIR=/usr/local/zabbix
- 自启
chkconfig --add zabbix_server
chkconfig --add zabbix_agentd
chkconfig zabbix_server on
chkconfig zabbix_agentd on
- 配置 zabbix web 页面
cp -r /usr/local/src/zabbix-6.2.1/ui/* /usr/local/nginx/html/
chown -R nginx:nginx /usr/local/nginx/html/
- 数据库集群建库赋权
create database zabbix;
grant all privileges on zabbix.* to zabbix@'192.168.186.%' identified by '123456';
- 导入数据库文件
cd /usr/local/src/zabbix-6.2.1/
yum install -y mariadb
mysql -uroot -p123456 zabbix -h192.168.186.183< database/mysql/schema.sql
mysql -uroot -p123456 zabbix -h192.168.186.183< database/mysql/images.sql
mysql -uroot -p123456 zabbix -h192.168.186.183< database/mysql/data.sql
mysql -uroot -p123456 zabbix -h192.168.186.183< database/mysql/double.sql
- 配置 zabbix web 页面
cp -r /usr/local/src/zabbix-6.2.1/ui/* /tmp/nginx-1.22.0/html/
chown -R nginx:nginx /tmp/nginx-1.22.0/html/
- 登录
  
MySQL+MHA 搭建
MHA 搭建
基础环境准备
基础环境
- 相互免密,使用秘钥推送脚本
- Ntp 同步,已经在初始化脚本中完成
- 软件包上传
  
- 配置 mha 仓库
vim /etc/yum.repos.d/mhapath.repo
[mha]
name=mhapath
baseurl=file:///root/mhapath
enabled=1
gpgcheck=0

所有节点调整全量解析文件
vim /etc/hosts
192.168.186.180 keep
192.168.186.181 nginx
192.168.186.182 apache
192.168.186.183 DB1
192.168.186.184 DB2
192.168.186.185 DB3
192.168.186.186 CELK1
192.168.186.187 CELK2
192.168.186.188 CELK3
192.168.186.189 zabbix
192.168.186.200 jumpserver

拷贝软件包到所有节点
for ip in 184 185  ; do scp -r /etc/yum.repos.d/mhapath.repo 192.168.186.$ip:/etc/yum.repos.d/ ; done
for ip in 184 185; do scp -r /root/mhapath 192.168.186.$ip:/root ; done
for ip in 184 185; do scp /root/mha4mysql-node-0.57-0.el7.noarch.rpm 192.168.186.$ip:/root ; done

全节点基本软件包
yum -y install perl-DBD-MySQL perl-Config-Tiny perl-Log-Dispatch perl-Parallel-ForkManager --skip-broken --nogpgcheck;rpm -ivh mha4mysql-node-0.57-0.el7.noarch.rpm

Manager 软件包
yum install -y  perl-DBD-MySQL perl-Config-Tiny perl-Log-Dispatch perl-Parallel-ForkManager perl-Time-HiRes perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker perl-CPAN
rpm -ivh mha4mysql-manager-0.58-0.el7.centos.noarch.rpm 
- 查看生成的 manager 相关文件
  
半同步配置
检查插件支持情况
mysql -uroot -p123456 -e "show variables like '%have_dynamic_loading%';"

安装半同步插件
- 全部节点需要安装
mysql -uroot -p123456  -e "install plugin rpl_semi_sync_master soname 'semisync_master.so';install plugin rpl_semi_sync_slave soname 'semisync_slave.so';"
- 检查安装情况
mysql -uroot -p123456  -e "show plugins;" | grep rpl_semi_sync*
mysql -uroot -p123456  -e "select * from information_schema.plugins;" | grep rpl_semi_sync*

管理节点配置
管理节点调整 my.cnf
vim /etc/my.cnf
server-id=1
log-bin=/data/mysql/log/mysql-bin
log-bin-index=/data/mysql/log/mysql-bin.index
binlog_format=mixed
rpl_semi_sync_master_enabled=1
rpl_semi_sync_master_timeout=10000
rpl_semi_sync_slave_enabled=1
relay_log_purge=0
relay-log = /data/mysql/log/relay-bin
relay-log-index = /data/mysql/log/slave-relay-bin.index
binlog-do-db=HA
log_slave_updates=1

- 重启服务
systemctl restart mysqld
- 新增配置文件中指定的测试文件夹
mysql -uroot -p123456
create database HA;
use HA;
create table test(id int,name varchar(20));
insert into test values(1,'tom1');

授权用户
grant replication slave on *.* to slave@'192.168.186.%' identified by '123456';
grant all privileges on *.* to root@'192.168.186.%' identified by '123456';
flush privileges;
检查 master 的 binlog 状态
show master status;

导出 HA 数据库到从服务器
mysqldump -uroot -p123456 -B HA>HA.sql
scp HA.sql root@192.168.186.184:~
scp HA.sql root@192.168.186.185:~

配置从节点及检查
导入数据库
mysql -uroot -p123456 <HA.sql
调整配置文件
- 注意:这里的 id 跟主机名对应
vim /etc/my.cnf
server-id=2
log-bin=/data/mysql/log/mysql-bin
log-bin-index=/data/mysql/log/mysql-bin.index
binlog_format=mixed
rpl_semi_sync_master_enabled=1
rpl_semi_sync_master_timeout=10000
rpl_semi_sync_slave_enabled=1
relay_log_purge=0
relay-log=/data/mysql/log/relay-bin
relay-log-index=/data/mysql/log/slave-relay-bin.index
binlog-do-db=HA
log_slave_updates=1

创建之前被授权的账户
grant replication slave on *.* to slave@'192.168.186.%' identified by '123456';
grant all privileges on *.* to root@'192.168.186.%' identified  by '123456';
flush privileges;

从节点指主
stop slave;
change master to master_host='192.168.186.183',master_user='slave',master_password='123456',master_log_file='mysql-bin.000001',master_log_pos=1504;
start slave;
show slave status\G

从节点设置为只读
mysql -uroot -p123456 -e 'set global read_only=1'
回到主节点进行检查
mysql -uroot -p123456 -e "show variables like '%rpl_semi_sync%';"
mysql -uroot -p123456 -e "show status like '%rpl_semi_sync%';"
部署 MHA
准备 MHA 工作目录 (manager 节点)
Mkdir  /etc/masterha
mkdir -p /var/log/masterha/app1
vim /etc/masterha/app1.cnf
[server default]
manager_workdir=/var/log/masterha/app1  
manager_log=/var/log/masterha/app1/manager.log
master_binlog_dir=/data/mysql/log
user=root
password=123456
ping_interval=1
remote_workdir=/tmp
repl_user=slave
repl_password=123456
report_script=/usr/local/send_report
shutdown_script=""
ssh_user=root
[server1]
hostname=DB2
port=3306
[server2]
hostname=DB3
port=3306
检查配置
- 免密检查:之前配置免密用的 IP 配置的,需要进行一次主机名登录后通过测试。
masterha_check_ssh --conf=/etc/masterha/app1.cnf

- 检查主从复制
masterha_check_repl --conf=/etc/masterha/app1.cnf

- 检查 Manager 状态
masterha_check_status --conf=/etc/masterha/app1.cnf

启动并检查
- 启动
nohup masterha_manager --conf=/etc/masterha/app1.cnf \
--remove_dead_master_conf  --ignore_last_failover < /dev/null > \
/var/log/masterha/app1/manager.log 2>&1 &
- 看状态
masterha_check_status --conf=/etc/masterha/app1.cnf
- 看日志
tail /var/log/masterha/app1/manager.log

配置 MHA 的 VIP
配置虚拟 IP
ifconfig ens37:1 192.168.186.100 netmask 255.255.255.0 up
设置转移脚本
Vim /etc/masterha/app 1. Cnf
master_ip_failover_script=/usr/bin/master_ip_failover

vim /usr/bin/master_ip_failover
master_ip_failover_script=/usr/bin/master_ip_failover
#!/usr/bin/env perl
use strict;
use warnings FATAL => 'all';
use Getopt::Long;
my (
    $command,          $ssh_user,        $orig_master_host, $orig_master_ip,
    $orig_master_port, $new_master_host, $new_master_ip,    $new_master_port
);
my $vip = '192.168.186.100/24';
my $key = '1';
my $ssh_start_vip = "/sbin/ifconfig ens33:$key $vip";
my $ssh_stop_vip = "/sbin/ifconfig ens33:$key down";
GetOptions(
    'command=s'          => \$command,
    'ssh_user=s'         => \$ssh_user,
    'orig_master_host=s' => \$orig_master_host,
    'orig_master_ip=s'   => \$orig_master_ip,
    'orig_master_port=i' => \$orig_master_port,
    'new_master_host=s'  => \$new_master_host,
    'new_master_ip=s'    => \$new_master_ip,
    'new_master_port=i'  => \$new_master_port,
);
exit &main();
sub main {
    print "\n\nIN SCRIPT TEST====$ssh_stop_vip==$ssh_start_vip===\n\n";
    if ( $command eq "stop" || $command eq "stopssh" ) {
        my $exit_code = 1;
        eval {
            print "Disabling the VIP on old master: $orig_master_host \n";
            &stop_vip();
            $exit_code = 0;
        };
        if ($@) {
            warn "Got Error: $@\n";
            exit $exit_code;
        }
        exit $exit_code;
    }
    elsif ( $command eq "start" ) {
        my $exit_code = 10;
        eval {
            print "Enabling the VIP - $vip on the new master - $new_master_host \n";
            &start_vip();
            $exit_code = 0;
        };
        if ($@) {
            warn $@;
            exit $exit_code;
        }
        exit $exit_code;
    }
    elsif ( $command eq "status" ) {
        print "Checking the Status of the script.. OK \n";
        #`ssh $ssh_user\@cluster1 \" $ssh_start_vip \"`;
        exit 0;
    }
    else {
        &usage();
        exit 1;
    }
}
# A simple system call that enable the VIP on the new master
sub start_vip() {
    `ssh $ssh_user\@$new_master_host \" $ssh_start_vip \"`;
}
# A simple system call that disable the VIP on the old_master
sub stop_vip() {
    `ssh $ssh_user\@$orig_master_host \" $ssh_stop_vip \"`;
}
sub usage {
    print
    "Usage: master_ip_failover --command=start|stop|stopssh|status --orig_master_host=host --orig_master_ip=ip --orig_master_port=port --new_master_host=host --new_master_ip=ip --new_master_port=port\n";
}
chmod +x /usr/bin/master_ip_failover
环境测试
masterha_check_repl --conf=/etc/masterha/app1.cnf
masterha_check_ssh --conf=/etc/masterha/app1.cnf
masterha_check_status --conf=/etc/masterha/app1.cnf


- 搭建完成
Xtrabackup 备份
脚本展示
#!/bin/bash
de="--defaults-file=/etc/my.cnf"
so="--socket=/usr/local/mysql/mysql.sock"
us="--user=root"
pa="--password=123456"
po="--port=3306"
xb="/opt/mysqlbackup/xback/"
ad="/opt/mysqlbackup/add/"
da="/data/mysql/data"
date1=$(date +%F)
t2=/root/t2
t3=/root/t3
if [ $(cat /etc/my.cnf |grep "datadir=$da"|wc -l) -eq 0 ];then
sed -i '$a \datadir=$da' /etc/my.cnf
systemctl restart mysql
fi
#全备
qb (){
    if [ ! -d $xb ];then
    mkdir -p $xb
    fi
    tar -zcf /tmp/mysql_$date1.tar $xb --remove-files &> /dev/null
    if [ $? -eq 0 ];then
        echo -e "\e[32;1m 前文件备份成功\e[0m"
    else 
        echo -e "\e[32;1m 前文件备份失败\e[0m"
    Fi
    if [ ! -d $xb ];then
    mkdir -p $xb
    fi
    innobackupex $de $so $us $pa  $xb &> /dev/null
    if [ $? -eq 0 ];then
           echo -e "\e[32;1m全备成功\e[0m"
        else 
            echo -e "\e[32;1m全备失败\e[0m"
    fi
 rm -rf $ad*
}
#增备
zb (){
    if [ ! -d $ad ];then
    mkdir -p $ad
    fi
    if [ $(ls -A $ad | wc -l) -eq 0 ];then
    find $xb -maxdepth 1 -type d > $t2
    a1=$(find $xb -maxdepth 1 -type d | wc -l)
    innobackupex $de $so $us $pa $po --incremental $ad --incremental-basedir=$(sed -n "${a1}p" $t2) &> /dev/null
     if [ $? -eq 0 ];then
           echo -e "\e[32;1m增备成功\e[0m"
        else 
            echo -e "\e[32;1m增备失败\e[0m"
    fi
    else
    find $ad -maxdepth 1 -type d > $t3
    a2=$(find $ad -maxdepth 1 -type d | wc -l)
    innobackupex $de $so $us $pa $po --incremental $ad --incremental-basedir=$(sed -n "${a2}p" $t3) &> /dev/null
     if [ $? -eq 0 ];then
           echo -e "\e[32;1m增备成功\e[0m"
        else 
            echo -e "\e[32;1m增备失败\e[0m"
    fi
    fi
}
#恢复
hf (){
    if [ $(ls -A $ad | wc -l) -eq 0 ];then
    find $xb -maxdepth 1 -type d > $t2
    b1=$(find $xb -maxdepth 1 -type d | wc -l)
    innobackupex --apply-log $(sed -n "${b1}p" $t2) &> /dev/null
    systemctl stop mysql
    rm -rf $da/*
    innobackupex --copy-back $(sed -n "${b1}p" $t2) &> /dev/null
    chown -R mysql:mysql $da
    systemctl restart mysql
    else
    find $xb -maxdepth 1 -type d > $t2
    b1=$(find $xb -maxdepth 1 -type d | wc -l)
    innobackupex --apply-log --redo-only $(sed -n "${b1}p" $t2) &> /dev/null
    find $ad -maxdepth 1 -type d > $t3
    B 2=$(find $ad -maxdepth 1 -type d | wc -l)
    cc=2
    while [ $b2 -gt 2 ]
    do
    innobackupex --apply-log --redo-only $(sed -n "${b1}p" $t2) --incremental-dir=$(sed -n "${cc}p" $t3) &> /dev/null
    b2=$((b2-1))
    cc=$((cc+1))
    done
    b3=$(find $ad -maxdepth 1 -type d | wc -l)
    innobackupex --apply-log $(sed -n "${b1}p" $t2) --incremental-dir=$(sed -n "${b3}p" $t3) &> /dev/null
    innobackupex --apply-log $(sed -n "${b1}p" $t2) &> /dev/null
    systemctl stop mysqld
    rm -rf $da
    mkdir $da
    innobackupex --copy-back $(sed -n "${b1}p" $t2) &> /dev/null
     if [ $? -eq 0 ];then
           echo -e "\e[32;1m恢复成功\e[0m"
        else 
            echo -e "\e[32;1m恢复失败\e[0m"
    fi
    chown -R mysql:mysql $da
    systemctl start mysqld
    fi
}
case "$1" in
    all)
        qb
        ;;
    add)
        zb
        ;;
    rec)
        hf
        ;;
    *)
        echo $"Usage: $0 {all(全备)|add(增量)|rec(恢复)}"
        ;;
esac
环境准备
目录准备
- 创建全备目录
- 创建增量目录
mkdir -p /opt/mysqlbackup/xback
mkdir -p /opt/mysqlbackup/add
依赖包准备
- 本地上传依赖包
percona-xtrabackup-2.3.6-1.el7.x86_64.rpm
libev-4.03-3.el6.x86_64.rpm
- 将依赖传到数据库个主机并安装
ansible DB -m copy -a "src=/root/libev-4.03-3.el6.x86_64.rpm dest=/root remote_src=no"
ansible DB -m unarchive -a "src=Percona-XtraBackup-2.4.14-ref675d4-el7-x86_64-bundle.tar dest=/root"
ansible DB -m command -a "rpm -ivh libev-4.03-3.el6.x86_64.rpm"

- 安装 xtrabackup
ansible DB -m yum -a "name=percona-xtrabackup-24-2.4.14-1.el7.x86_64.rpm"
脚本测试
全量测试
- 准备测试数据库
mysql> select * from t1;

- 进行全备测试
  
- 删除内容
mysql> delete from t1;
select * from t1;
- 恢复内容测试
sh -x xtra.sh rec
- 查看结果
mysql> select * from t1;    

增量测试
- 添加数据
mysql> insert into t1 values(2,"b");
- 第一次增备
 sh -x xtra.sh add        
- 删除数据
mysql> delete from t1 where id=1;
- 查看表
mysql> select * from t1;
- 第二次增备
sh -x xtra.sh add
- 增量备份恢复
 sh -x xtra.sh rec
- 查看结果
mysql> select * from t1;

使用 rsync 实时同步备份数据到 ceph
- 安装 rsync,并修改配置文件
[root@keep ~]# ansible DB -m yum -a "name=rsync"    
[root@keep ~]# ansible ceph -m yum -a "name=rsync"   
[root@ceph1 ~]# useradd backup      #添加虚拟用户
[root@ceph1 ~]#  echo "backup:123456" | chpasswd
[root@ceph1 ~]# mkdir /webbackup    #添加备份文件
[root@ceph1 ~]# chown -R backup:backup /webbackup/
[root@ceph1 ~]# cat /etc/rsyncd.conf |grep -v ^#       
[wwwroot]                             
path = /webbackup                   
comment = used for web-data root    
read only = false                    
list = yes                                     
auth users = backupuser                      
secrets file = /etc/rsync.passwd
- 初始化
[root@ceph1 ~]# echo "backupuser:123456" >> /etc/rsync.passwd
[root@ceph1 ~]# chmod 600 /etc/rsync.passwd    #添加虚拟用户并赋权
[root@ceph1 ~]# rsync --daemon --config=/etc/rsyncd.conf  #启动进程
[root@ceph1 ~]# echo "rsync --daemon --config=/etc/rsyncd.conf " >> /etc/rc.d/rc.local
- 上传 sersync 到 keep
[root@keep ~]# ansible DB -m unarchive -a "src=sersync2.5.4_64bit_binary_stable_final.tar.gz dest=/usr/local"   
[root@keep ~]# ansible DB -m command -a "ln -s /usr/local/GNU-Linux-x86/sersync2 /usr/local/bin/"    
[root@DB1 ~]# vim /usr/local/GNU-Linux-x86/confxml.xml  

            <remote ip="192.168.186.187" name="wwwroot"/>
            <!--<remote ip="192.168.8.39" name="tongbu"/>-->
            <!--<remote ip="192.168.8.40" name="tongbu"/>-->
        </localpath>
        <rsync>
            <commonParams params="-artuz"/>
            <auth start="true" users="backupuser" passwordfile="/etc/rsync.passwd"/>
            <userDefinedPort start="false" port="874"/><!-- port=874 -->
            <timeout start="false" time="100"/><!-- timeout=100 -->
            <ssh start="false"/>
        </rsync>
        <failLog path="/tmp/rsync_fail_log.sh" timeToExecute="60"/><!--default every 60mins execute once-->
        <crontab start="false" schedule="600"><!--600mins-->
            <crontabfilter start="false">
                <exclude expression="*.php"></exclude>
                <exclude expression="info/*"></exclude>
            </crontabfilter>
        </crontab>
        <plugin start="false" name="command"/>
    </sersync>
    <plugin name="command">
        <param prefix="/bin/sh" suffix="" ignoreError="true"/>  <!--prefix /opt/tongbu/mmm.sh suffix-->
        <filter start="false">
            <include expression="(.*)\.php"/>
            <include expression="(.*)\.sh"/>
        </filter>
    </plugin>
    <plugin name="socket">
        <localpath watch="/opt/tongbu">
            <deshost ip="192.168.138.20" port="8009"/>
        </localpath>
    </plugin>
    <plugin name="refreshCDN">
        <localpath watch="/data0/htdocs/cms.xoyo.com/site/">
            <cdninfo domainname="ccms.chinacache.com" port="80" username="xxxx" passwd="xxxx"/>
            <sendurl base="http://pic.xoyo.com/cms"/>
            <regexurl regex="false" match="cms.xoyo.com/site([/a-zA-Z0-9]*).xoyo.com/images"/>
        </localpath>
    </plugin>
</head>
- 启动 serync
Sersync 2  -d -r -o  /usr/local/GNU-Linux-x 86/confxml. Xml
- 查看备份是否成功
- 已经成功备份
 
[ root@ceph1 webbackup]# ls
aa.txt  add  xback
- 在数据库添加数据,做增量备份测试
 mysql> insert into t1 values(3,"c");         
 [root@DB1 ~]# sh xtra.sh add
 [root@ceph1 webbackup]# ls add/

数据备份到 ceph 挂载目录
[root@ceph2 ceph]# mount -t ceph 192.168.186.186:6789:/ /mnt/ceph -o name=admin,secret=AQAsTCRkub/SLhAAvTEnC4/ZlRiY6nyV1XI+Rg==
[root@ceph2 ceph]# cat /etc/rsyncd.conf |grep -v ^#|grep -v ^$
uid = root                          
gid = root                          
address = 192.168.186.187                
port = 873                              
hosts allow =192.168.186.0/24                   
use chroot = yes         
max connections = 5                        
pid file = /var/run/rsyncd.pid       
lock file = /var/run/rsync.lock       
log file = /var/log/rsyncd.log  
motd file = /etc/rsyncd.motd
[wwwroot]                             
path = /mnt/ceph                   
comment = used for web-data root    
read only = false                    
list = yes                                     
auth users = backupuser                      
secrets file = /etc/rsync.passwd
- 查看进程
[root@ceph2 ceph]# ps -ef|grep rsync    
root       10689       1  0 09:17 ?        00:00:00 rsync --daemon --config=/etc/rsyncd.conf
root       10708   10651  0 09:24 pts/1    00:00:00 grep --color=auto rsync
- 杀死进程,重启,数据库端更改远程主机地址
[root@ceph2 ceph]# kill 10689
[root@ceph2 ceph]# rsync --daemon --config=/etc/rsyncd.conf
[root@DB1 ~]# vim /usr/local/GNU-Linux-x86/confxml.xml
    <sersync>
        <localpath watch="/opt/mysqlbackup/">
            <remote ip="192.168.186.187" name="wwwroot"/>
            <!--<remote ip="192.168.8.39" name="tongbu"/>-->
            <!--<remote ip="192.168.8.40" name="tongbu"/>-->
        </localpath>
        <rsync>
            <commonParams params="-artuz"/>
            <auth start="true" users="backupuser" passwordfile="/etc/rsync.passwd"/>
            <userDefinedPort start="false" port="874"/><!-- port=874 -->
            <timeout start="false" time="100"/><!-- timeout=100 -->
            <ssh start="false"/>
        </rsync>
        <failLog path="/tmp/rsync_fail_log.sh" timeToExecute="60"/><!--default every 60mins execute once-->
        <crontab start="false" schedule="600"><!--600mins-->
            <crontabfilter start="false">
                <exclude expression="*.php"></exclude>
                <exclude expression="info/*"></exclude>
            </crontabfilter>
        </crontab>
        <plugin start="false" name="command"/>
    </sersync>
- 重启生效
sersync2  -d -r -o  /usr/local/GNU-Linux-x86/confxml.xml
- 查看同步
  
Ceph 搭建
基础环境搭建
初始化完成的
- NTP 时间同步
- 免密登录
- 关闭防火墙
- 新增硬盘 20 G 一块
- Hosts 解析
未完成的
- Yum 源
- Ceph 源
软件准备
Ceph 源
vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.aliyun.com/ceph/rpm-15.2.9/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
#gpgkey=https://download.ceph.com/keys/release.asc
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-15.2.9/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
#gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-15.2.9/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
#gpgkey=https://download.ceph.com/keys/release.asc
Epel 源
yum -y install epel-release && yum repolist

安装 Ceph 及其依赖包
yum install -y ceph-deploy ceph ceph-radosgw snappy leveldb gdisk python-argparse gperftools-libs python-setuptools yum-plugin-priorities yum-utils ntpdate python-pip

- 搭建完成开始部署 Ceph
部署管理节点
1. 创建 monitor 服务
cd /etc/ceph
ceph-deploy new ceph1
2. 修改副本数
因为是使用的是三台机器做,所以需要将副本数修改为两份。
vim ceph.conf
修改配置文件为以下内容:
[global]
fsid = 085c7b61-cac0-494b-9bfc-18fe2cc2deb0
mon_initial_members = ceph1
mon_host = 192.168.186.186
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd_pool_default_size = 2

3. 部署 initial monitor
ceph-deploy mon create ceph1

4. 创建 keyring 文件
- 收集秘钥文件
ceph-deploy gatherkeys ceph1

- 确定收集到了
  
- 确定节点权限
vim ceph.client.admin.keyring

5. 部署 MGR 管理服务
在 Master 上部署 [[0-Ceph分布式文件系统#MGR|MGR]],也可在其它机器上部署,实现高可用。
ceph-deploy mgr create ceph1

6. 部署 OSD 服务
- 这里主要是做了一个磁盘环境准备,清空之前的磁盘内容,同时确定磁盘可用。
Cd /etc/ceph/
ceph-deploy disk zap ceph1  /dev/sdb
ceph-deploy disk zap ceph2 /dev/sdb
ceph-deploy disk zap ceph3 /dev/sdb

2. 添加 OSD 节点
ceph-deploy osd create ceph1 --data /dev/sdb
ceph-deploy osd create ceph2 --data /dev/sdb
ceph-deploy osd create ceph3 --data /dev/sdb
- 检查 OSD 状态
ceph-deploy osd list ceph1 ceph2 ceph3

 
 
7. 统一集群配置
- 秘钥设置(管理节点执行)
Ceph-deploy admin ceph 1 ceph 2 ceph 3

- 调整秘钥文件权限 (所有节点执行)
chmod +r /etc/ceph/ceph.client.admin.keyring

8. 部署 MDS
- 安装
ceph-deploy mds create ceph1 ceph2 ceph3

- 检查 mds 状态
ceph mds stat

9. 检查 Ceph 状态
ceph -s

10. 补充 pip 模块
- 配置国内 pip 源,下载对应缺失模块
mkdir ~/.pip
cat<<EOF>~/.pip/pip.conf
[global]
index-url = https://pypi.tuna.tsinghua.edu.cn/simple
trusted-host = pypi.tuna.tsinghua.edu.cn
EOF
pip3 install pecan werkzeug 

- 重启再次查看模块状态
reboot
ceph -s

部署 Ceph 文件系统
管理节点创建文件系统
- 创建归置组 PG
- 创建文件系统
ceph osd pool create cephfs_data 128
ceph osd pool create cephfs_metadata 128
ceph fs new cephfs cephfs_metadata cephfs_data
ceph fs ls
ceph mds stat

从节点挂载文件系统
- 准备挂载点
mkdir /mnt/ceph
- 管理节点查看秘钥
cat /etc/ceph/ceph.client.admin.keyring
- 两个从节点进行秘钥挂载(管理节点不负责读写不做挂载)
mount -t ceph 192.168.186.186:6789:/ /mnt/ceph -o name=admin, secret=AQBgLyBk+IjQJxAAkTaPx 32 hR 9 A 0+yqG/+SqmA==

搭建完成
至此,Ceph 的搭建完成,整个过程需要明确逻辑点跟概念点,本篇重操作,轻理论。
动静分离
准备测试页面
静态页面
- nginx 端放置测试信息
- 有两台 nginx 服务器安装在 DB 2 和 DB3上
[root@nginx ~]# echo "nginx1" > /usr/local/nginx/html/index.html   
[root@DB2 ~]# echo "nginx2" > /usr/local/nginx/html/index.html     
[root@DB3 ~]# echo "nginx3" > /usr/local/nginx/html/index.html     
动态页面
- 在 Apache 配置
 vim /var/www/html/test.php
<?php
        $link= new mysqli('192.168.186.183','test','123456');
        if ($link)echo "connection success......";
        mysql_close();
?>
- 这里的 DB 是另外的 Apache 端
<?php
        $link= new mysqli('192.168.186.183','test','123456');
        if ($link)echo "DB connection success......";
        mysql_close();
?>
配置 nginx 反代负载均衡并配置动静分离
- 调整反向代理文件
vim /usr/local/nginx/conf/nginx.conf
    upstream apache {
        server 192.168.186.183:80;
        server 192.168.186.182:80;
}
        upstream nginx {
        server 192.168.186.181:808;
        server 192.168.186.184:808;
        server 192.168.186.185:808;
}
- Nginx 新增 808 端口处理静态页面
       server{
         listen 808;
         server_name  localhost;
         location / {
            root   html;
            index  index.html;
         }
}
- 在主端口配置动静分离
- 全匹配处理交给 nginx
- 正则匹配 php 页面交给 apache
 
    server {
        listen       80;
        server_name  localhost;
        #charset koi8-r;
        #access_log  logs/host.access.log  main;
        location / {
                proxy_pass http://nginx;
        }
        location ~ .*\.(php|php5)?$
            {
            fastcgi_pass 127.0.0.1:9000;
            fastcgi_index index.php;
            include fastcgi.conf;
            fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
            include fastcgi_params;
                proxy_pass http://apache;
            }
测试效果
测试 192.168.186.181




测试 192.168.186.184





使用 tomcat 做动态解析
环境搭建
- 注:断网用 nat 网段来做测试,192.168.45.116对应就是192.168.186.180
- 上传相关软件包
apache-tomcat-8.5.42.tar.gz mysql-connector-java-5.1.47.tar.gz jdk-8u171-linux-x64.tar.gz
- 安装 jdk
 tar -zxvf jdk-8u171-linux-x64.tar.gz -C /usr/local/  
 vim /etc/profile
JAVA_HOME=/usr/local/jdk1.8.0_171 
PATH=$JAVA_HOME/bin:$PATH
CLASSPATH=$JAVA_HOME/jre/lib/ext:$JAVA_HOME/lib/tools.jar 
export PATH JAVA_HOME CLASSPATH
source !$
java -version
- 安装 Tomact
tar -zxvf apache-tomcat-8.5.42.tar.gz -C /usr/local/ && cd /usr/local/
mv apache-tomcat-8.5.42/  tomcat
vim /lib/systemd/system/tomcat.service
[Unit]
Description=tomcat
After=network.target
[Service]
Type=forking
Environment=JAVA_HOME=/usr/local/jdk1.8.0_171/
Environment=CATALINA_HOME=/usr/local/tomcat
ExecStart=/usr/local/tomcat/bin/startup.sh start
ExecStop=/usr/local/tomcat/bin/shutdown.sh stop
PrivateTmp=true
[Install]
WantedBy=multi-user.target
systemctl daemon-reload
- 安装 MySQL 连接器
tar -zxvf mysql-connector-java-5.1.47.tar.gz -C /usr/local/src/
cd /usr/local/src/mysql-connector-java-5.1.47/
- 将控制器命令 jar 包放在 tomcat 的 Lib 库中
cp mysql-connector-java-5.1.47-bin.jar /usr/local/tomcat/lib/
配置 Php
- 下载链接: http://php-java-bridge.sourceforge.net/pjb/download.php
- 下载两个包:JavaBridge.jar和php-servlet.jar,放到 tomcat 的lib文件夹下。
  
  
- 在 tomcat 的 web. Xml 配置文件的 web-app 字段写入如下过滤语言
- 使 php 能解析 tomcat 页面下的 php 文件
 
vim /usr/local/tomcat/conf/web.xml
<web-app xmlns="http://xmlns.jcp.org/xml/ns/javaee"
  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
  xsi:schemaLocation="http://xmlns.jcp.org/xml/ns/javaee
                      http://xmlns.jcp.org/xml/ns/javaee/web-app_3_1.xsd"
  version="3.1">
           <filter>
        <filter-name>PhpCGIFilter</filter-name>
        <filter-class>php.java.servlet.PhpCGIFilter</filter-class>
    </filter>
    <filter-mapping>
        <filter-name>PhpCGIFilter</filter-name>
        <url-pattern>/*</url-pattern>
    </filter-mapping>
    <listener>
        <listener-class>php.java.servlet.ContextLoaderListener</listener-class>
    </listener>
    <servlet>
        <servlet-name>PhpJavaServlet</servlet-name>
        <servlet-class>php.java.servlet.PhpJavaServlet</servlet-class>
    </servlet>
    <servlet>
        <servlet-name>PhpCGIServlet</servlet-name>
        <servlet-class>php.java.servlet.fastcgi.FastCGIServlet</servlet-class>
        <load-on-startup>0</load-on-startup>
    </servlet>
    <servlet-mapping>
        <servlet-name>PhpJavaServlet</servlet-name>
        <url-pattern>*.phpjavabridge</url-pattern>
    </servlet-mapping>
    <servlet-mapping>
        <servlet-name>PhpCGIServlet</servlet-name>
        <url-pattern>*.php</url-pattern>
    </servlet-mapping>
- 在 <welcome-file-list>字段添加如下配置,以打开欢迎页面
    <welcome-file-list>
        <welcome-file>index.html</welcome-file>
        <welcome-file>index.htm</welcome-file>
        <welcome-file>index.jsp</welcome-file>
        <welcome-file>index.php</welcome-file>
    </welcome-file-list>
- 写入测试页面
vim /usr/local/tomcat/webapps/ROOT/index.php
<?php
        phpinfo();
?>
- 测试解析
  
安装论坛
- 上传源码包
  
- 部署
unzip upload.zip 
cd upload
mv * /usr/local/tomcat/webapps/ROOT/
- 进入网页开始引导
  
- 进入端口测试
  
- 在 nginx 反向代理集群中增加 tomcat 的端口
  
Keepalived 搭建
关闭 ARP 转发
Vim /etc/sysctl. Conf
- 修改配置文件的时候需要注意网卡的对应关系
Net. Ipv 4. Conf. Ens 33. Arp_ignore = 1
net.ipv4.conf.ens33.arp_announce = 2
或者使用一下配置,上下都可以。
net.ipv4.conf.ens33.arp_ignore = 1
net.ipv4.conf.ens33.arp_announce = 2
net.ipv4.conf.all.arp_ignore = 1
net.ipv4.conf.all.arp_announce = 2
net.ipv4.conf.lo.arp_ignore = 1
net.ipv4.conf.lo.arp_announce = 2
主节点配置
- 安装软件
yum install -y keepalived 
- 配置配置文件
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
   router_id keep2
}
vrrp_instance lvs-dr {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.45.88
    }
}
virtual_server 192.168.45.88 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
    #persistence_timeout 50
    protocol TCP
    real_server 192.168.45.27 80 {
        weight 1
        TCP_CHECK {    
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }
    }
    real_server 192.168.45.28 80 {
        weight 1
        TCP_CHECK {
            connect_timeout 3
            nb_get_retry 3
            delay_before_retry 3
            connect_port 80
        }   
    }
}
从节点搭建
- 安装 Keepalived
yum install -y keepalived 
- 调整配置文件
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
   router_id LVS_DEVEL
}
vrrp_instance lvs-dr {
    state BACKUP
    interface ens33
    nopreempt
    virtual_router_id 51
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       192.168.45.88 
    }
}
virtual_server 192.168.45.88 80 {
    delay_loop 6
    lb_algo rr
    lb_kind DR
    persistence_timeout 2
    protocol TCP
  real_server 192.168.186.181 80  {
      weight 1
      TCP_CHECK {    
          connect_timeout 3
          nb_get_retry 3
          delay_before_retry 3
          connect_port 80
      }
  }
  
  real_server 192.168.186.182 80  {
      weight 1
      TCP_CHECK {    
          connect_timeout 3
          nb_get_retry 3
          delay_before_retry 3
          connect_port 80
      }
  }
  
  real_server 192.168.186.183 80  {
      weight 1
      TCP_CHECK {    
          connect_timeout 3
          nb_get_retry 3
          delay_before_retry 3
          connect_port 80
      }
  }
}
- 重启生效服务
Systemctl start keepalived
测试 lvs
 [root@keep2 network-scripts]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.45.88:80 rr
  -> 192.168.45.27:80             Route   1      0          0         
  -> 192.168.45.28:80             Route   1      2          0




Discuz 论坛搭建
获取源码包
上传部署
- 上传源码包
rz -E upload.zip
unzip upload.zip
- 分发到 nginx 端跟 Apache 端
vim discuz.sh
#!/bin/bash
for i in 1 4 5;
do 
 scp -r /root/upload 192.168.186.18$i:/usr/local/nginx/html/ 
done
for i in 2 3;
do
 scp -r /root/upload 192.168.186.18$i:/var/www/html/
done
sh discuz.sh
- Nginx 端授权
chown -R nginx:nginx nginx
nginx -s reload
搭建论坛





Zabbix 使用和监控
部署所有节点的 agent/agent 2
Ansible 批量安装 agent
ansible all -a ‘wget https://repo.zabbix.com/zabbix/6.2/rhel/7/x86_64/zabbix-release-6.2-1.el7.noarch.rpm --no-check-certificate’
ansible all -a ‘yum install -y zabbix-agent2-6.2.8-release1.el7.x86_64.rpm’
ansible all -a ‘yum install -y zabbix-agent-6.2.0-1.el7.x86_64.rpm’
Agent 配置文件渲染
- Agent 配置文件
egrep -v '^#|^$' /etc/zabbix/zabbix_agent.conf.j2
PidFile=/run/zabbix/zabbix_agent2.pid
LogFile=/var/log/zabbix/zabbix_agent2.log
LogFileSize=0
Server=192.168.186.189
ListenPort=10050
ServerActive=192.168.186.189:10051
Hostname= {{  ansible_hostname  }}
Timeout=10
Include=/etc/zabbix/zabbix_agent2.d/*.conf
PluginSocket=/run/zabbix/agent.plugin.sock
UnsafeUserParameters=1
ControlSocket=/run/zabbix/agent.sock
Include=./zabbix_agent2.d/plugins.d/*.conf
- Agent 2 配置文件
egrep -v '^#|^$' /etc/zabbix/zabbix_agent2.conf.j2
PidFile=/run/zabbix/zabbix_agent2.pid
LogFile=/var/log/zabbix/zabbix_agent2.log
LogFileSize=0
Server=192.168.186.189
ListenPort=10052
ServerActive=192.168.186.189:10051
Hostname={{  ansible_hostname  }}
Timeout=10
Include=/etc/zabbix/zabbix_agent2.d/*.conf
PluginSocket=/run/zabbix/agent.plugin.sock
UnsafeUserParameters=1
ControlSocket=/run/zabbix/agent.sock
Include=./zabbix_agent2.d/plugins.d/*.conf
分发配置文件
- 剧本展示
- name: copy_conf
  hosts: all
  tasks:
  - name: '分发配置文件'
    template:
      src: "{{ item.src }}"
      dest: "{{ item.dest }}"
    with_items:
    - { src: 'zabbix_agent.conf.j2', dest: '/etc/zabbix/zabbix_agent.conf' }
    - { src: 'zabbix_agent2.conf.j2', dest: '/etc/zabbix/zabbix_agent2.conf' }
- 发送
ansible-playbook copy.yml -f8
批量启动 agent&agent 2
ansible all -a 'systemctl restart zabbix-agent2'
ansible all -a 'systemctl restart zabbix-agent'
编写测试脚本
vim zabbix_test.sh
#!/bin/bash
# version 2023年3月31日
# test zabbix-agent and zabbix-agent2
######################################################
bridge=("180" "181" "182" "183" "184" "185" "186" "187" "188" "189" "190" "191" "192" "200")
Test_agent (){
for i in ${bridge[*]};
do
        zabbix_get -s 192.168.186.$i -p10050 -k system.uname
        zabbix_get -s 192.168.186.$i -p10052 -k system.uname
done
}
######################################################
test_agent >a.txt
- 查看
cat a.txt
Linux keep 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux nginx 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux nginx 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux Apache 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux Apache 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux DB1 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux DB1 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux DB2 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux DB 2 3.10.0-1160. El 7. X 86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x 86_64
Linux DB3 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux DB3 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux ceph1 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux ceph1 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux ceph2 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux ceph2 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux ceph3 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux ceph3 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux Zabbix 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux Zabbix 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux delk1 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux delk2 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux delk3 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
Linux jumpserver 3.10.0-1160.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64
配置 Zabbix 的自动发现
配置自动发现规则


配置自动发现动作




- 查看发现的主机
  
Agent 2 的自动发现
- 重复以上操作
  
Zabbix 监控 Ceph
单独增加 agent 2 的主机

配置 Ceph 的 API
ceph mgr module enable restful
ceph restful create-self-signed-cert
ceph restful create-key zabbix
ceph restful list-keys

测试 API 连接
curl -k https://zabbix:7d593747-1029-4cd6-865c-9d397b971cef@192.168.186.186:8003/server

Web 端新增秘钥

跳过 SSL 验证
echo "Plugins.Ceph.InsecureSkipVerify=true">> /etc/zabbix/zabbix_agent2.conf
systemctl restart zabbix-agent2
Zabbix 监控 nginx
配置文件定义监控参数
vim /usr/local/nginx/conf/nginx.conf
location /status {
             stub_status on;
             access_log off;
             allow 127.0.0.1;
             allow 192.168.1.11;
             deny all;
        }
- 测试配置
nginx -s reload
curl http://192.168.186.181/status 
Active connections: 1 
server accepts handled requests
 13035 13035 682 
Reading: 0 Writing: 1 Waiting: 0
mkdir /etc/zabbix/scripts  
vim /etc/zabbix/scripts/nginx_status.sh
cat /etc/zabbix/scripts/nginx_status.sh
#! /bin/bash
# Description:zabbix监控nginx状态
# Note:此脚本需要配置在被监控端
HOST="127.0.0.1"
PORT="80"
# 检测nginx进程是否存在
function ping {
    /sbin/pidof nginx | wc -l
}
# 检测nginx性能
function active {
    /usr/bin/curl "http://$HOST:$PORT/status/" 2>/dev/null| grep 'Active' | awk '{print $NF}'
}
function reading {
    /usr/bin/curl "http://$HOST:$PORT/status/" 2>/dev/null| grep 'Reading' | awk '{print $2}'
}
function writing {
    /usr/bin/curl "http://$HOST:$PORT/status/" 2>/dev/null| grep 'Writing' | awk '{print $4}'
}
function waiting {
    /usr/bin/curl "http://$HOST:$PORT/status/" 2>/dev/null| grep 'Waiting' | awk '{print $6}'
}
function accepts {
    /usr/bin/curl "http://$HOST:$PORT/status/" 2>/dev/null| awk NR==3 | awk '{print $1}'
}
function handled {
    /usr/bin/curl "http://$HOST:$PORT/status/" 2>/dev/null| awk NR==3 | awk '{print $2}'
}
function requests {
    /usr/bin/curl " http://$HOST:$PORT/status/" 2>/dev/null| awk NR==3 | awk '{print $3}'
}
# 执行function
$1
- 赋权测试
Chmod +x /etc/zabbix/scripts/nginx_status. Sh   
sh scripts/nginx_status.sh active
- 定义监控参数
vim /etc/zabbix/zabbix_agentd.d/nginx_status.conf
UserParameter=nginx.status[*],/etc/zabbix/scripts/nginx_status.sh $1
- 测试
systemctl restart zabbix-agent 
zabbix_get -s 192.168.186.181 -p 10050 -k nginx.status[handled]
Web 界面配置
创建 nginx 模板
- 配置--模板--创建模板
  
- 模板创建监控项
  
- 创建监控项要注意命名方式,能够见名知意,最关键的是键值,这里的键值一定要和 agent 端的配置文件中定义的键值一致。
  
  
- 创建图像
  
- 关联模板
  
- 查看监控图
  
Zabbix 监控 MySQL
Agent 监控
- 寻找 agent 模块目录
find / -name userparameter_mysql.conf
cd /etc/zabbix
- 建立数据库与 zabbix 的连接
vim /etc/zabbix/etc/.my.cnf
[client]
host=localhost
user='root'
password='123456'
- 给模板添加 mysql 链接文件路径并给命令加上绝对路径
vim /etc/zabbix/zabbix_agentd.d/userparameter_mysql.conf
UserParameter=mysql.ping[*],HOME=/etc/zabbix/etc  /usr/local/mysql/bin/mysqladmin -h"$1" -P"$2" ping
UserParameter=mysql.get_status_variables[*],HOME=/etc/zabbix/etc /usr/local/mysql/bin/mysql -h"$1" -P"$2" -sNX -e "show global status"
UserParameter=mysql.version[*],HOME=/etc/zabbix/etc /usr/local/mysql/bin/mysqladmin -s -h"$1" -P"$2" version
UserParameter=mysql.db.discovery[*],HOME=/etc/zabbix/etc /usr/local/mysql/bin/mysql -h"$1" -P"$2" -sN -e "show databases"
UserParameter=mysql.dbsize[*],HOME=/etc/zabbix/etc /usr/local/mysql/bin/mysql -h"$1" -P"$2" -sN -e "SELECT SUM(DATA_LENGTH + INDEX_LENGTH) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA='$3'"
UserParameter=mysql.replication.discovery[*],HOME=/etc/zabbix/etc /usr/local/mysql/bin/mysql -h"$1" -P"$2" -sNX -e "show slave status"
UserParameter=mysql.slave_status[*],HOME=/etc/zabbix/etc /usr/local/mysql/bin/mysql -h"$1" -P"$2" -sNX -e "show slave status"
systemctl restart zabbix-agent
- 使用 zabbix server 进行连接测试
zabbix_get -s 192.168.186.183 -p 10050 -k "mysql.ping[192.168.186.183,3306]"
mysqld is alive
Zabbix 监控 Redis
写脚本定义参数
- 在主机 DELK 上部署有 Redis 做中间件,在该主机上编写。
- 在 redis 主机创建脚本目录
mkdir /etc/zabbix/scripts
cd !$
- 写一个简单脚本查看 redis 状态
vim redis_status.sh
#!/bin/bash
systemctl status redis|grep Active|awk '{print $3}'
- 赋权,并定义监控参数
chmod +x redis_status.sh
vim /etc/zabbix/zabbix_agentd.d/redis_status.conf
UserParameter=redis.status,/etc/zabbix/scripts/redis_status.sh
- 重启 agent 生效
systemctl restart zabbix-agent
zabbix_get -s 192.168.186.191 -p 10050 -k redis.status
Web 端添加监控
- 
新建一个用户自定义模板组 
  
- 
创建 redis_status 模板 
  
- 
添加触发器 
  
- 
使用表达式构造器构建触发器 
  
  
- 
点击添加表达式 
  
- 
主机添加 redis 模板 
  
监控 elk,kafka,zookeeper
编写监控脚本
- 创建脚本目录
- 查看 jps 数量,jps 中应该包含 elasticsearch, logstash, kibana, kafka, zookeeper 和 jps, 如果数量低于 6 说明体系中有服务宕机
[root@delk1 ~]# mkdir /etc/zabbix/scripts    
[root@delk1 ~]# cd !$
[root@delk1 scripts]# vim elk_status.sh
#!/bin/bash
jps|wc -l
- 授权后继续编写
[root@delk1 scripts]# chmod +x elk_status.sh   
[root@delk1 scripts]# vim /etc/zabbix/zabbix_agentd.d/elk_status.conf
UserParameter=elk.status,/etc/zabbix/scripts/elk_status.sh
- 重启 agent 生效配置
[root@delk1 scripts]# systemctl restart zabbix-agent   
Web 端配置监控
- 
添加模板 elk status 到自定义模板组 
  
- 
创建监控项 
  
- 
添加触发器,键值小于 6 报警,elk 体系异常 
  
  
- 
主机添加 elk 模板 
  
ELK 搭建
部署 Elasticsearch 集群
- 相关支持:官网文档:Install Elasticsearch from archive on Linux or MacOS | Elasticsearch Guide [8.4] | Elastic
安装
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-8.4.0-linux-x86_64.tar.gz
tar -zxvf elasticsearch-8.4.0-linux-x86_64.tar.gz -C /usr/local/
useradd elk
chown -R elk:elk /usr/local/elasticsearch-8.4.0
环境准备
系统内核环境准备
echo "vm.max_map_count = 655360" >>/etc/sysctl.conf
sysctl -p
vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
* soft nproc 65536
* hard nproc 65536
JDK 环境准备
- 
说明:虽然 elasticsearch 自带 JDK,但是捆绑在安装包里面的不稳定,这里选用 oracle 的 JDK 17 
- 
上传安装包 
- 
安装 JDK 
rpm -ivh jdk-20_linux-x64_bin.rpm
java -version

启动 elasticsearch 集群
配置
- 集群完整参数官方文档:Discovery and cluster formation settings
- 所有节点切换用户,编辑配置文件
su - elk
cd /usr/local/elasticsearch-8.4.0/
vim config/elasticsearch.yml
- 主节点配置文件
cluster.name: my-application
node.name: node-1
bootstrap.memory_lock: false
network.host: 192.168.186.190
http.port: 9200
discovery.seed_hosts: ["192.168.186.190:9300", "192.168.186.191:9300", "192.168.186.192:9300"]
cluster.initial_master_nodes: ["node-1"]
xpack.security.enabled: false   #关闭安全认证,否则使用http访问不到es
xpack.security.http.ssl:
  enabled: false
- 从节点配置文件
cluster.name: my-application
node.name: node-2
bootstrap.memory_lock: false
network.host: 192.168.186.191
http.port: 9200
discovery.seed_hosts: ["192.168.186.190:9300", "192.168.186.191:9300", "192.168.186.192:9300"]
cluster.initial_master_nodes: ["node-1"]
xpack.security.enabled: false   #关闭安全认证,否则使用http访问不到es
xpack.security.http.ssl:
  enabled: false
启动测试
./bin/elasticsearch

- 端口进程检查
jps -m
ss -antup|grep 9300
ss -antup|grep 9200

完全启动
vim /etc/init.d/elasticsearch
#!/bin/sh
#chkconfig: 2345 80 05
#description: elasticsearch
#author: taft
export JAVA_HOME=/usr/local/elasticsearch-8.4.0/jdk
Export PATH=$PATH:$JAVA_HOME/bin
Export CLASSPATH=$JAVA_HOME/lib/dt. Jar:$JAVA_HOME/lib/tools. Jar
export JAVA_HOME  PATH CLASSPATH
case "$1" in
start)
    su elk<<!
    cd /usr/local/elasticsearch-8.4.0/
    ./bin/elasticsearch -d
!
    echo "elasticsearch startup"
    ;;
stop)
    es_pid=`jps | grep Elasticsearch | awk '{print $1}'`
    kill -9 $es_pid
    echo "elasticsearch stopped"
    ;;
restart)
    es_pid=`jps | grep Elasticsearch | awk '{print $1}'`
    kill -9 $es_pid
    echo "elasticsearch stopped"
    su elk<<!
    cd /usr/local/elasticsearch-8.4.0/
    ./bin/elasticsearch -d
!
    echo "elasticsearch startup"
    ;;
*)
    echo "start|stop|restart"
    ;;
esac
exit $?
chmod +x /etc/init.d/elasticsearch
chkconfig --add /etc/init.d/elasticsearch
systemctl restart elasticsearch
ps -aux | grep elasticsearch
jps
cd && rm -rf elasticsearch-8.4.0-linux-x86_64.tar.gz 

安装 Kibana & logstash & redis
- 安装 Kibana
ansible  ELK -m unarchive -a "src=kibana-8.4.0-linux-x86_64.tar.gz dest=/usr/local/"
ansible ELK -m unarchive -a "src=kafka_2.13-3.2.1.tgz dest=/usr/local"
- 安装 redis
ansible ELK -m yum -a "name=epel-release"
ansible ELK -m yum -a "name=redis"
配置 Kibana
- 调整配置文件
vim /usr/local/kibana-8.4.0/config/kibana.yml
server.port: 5601
server.host: "192.168.186.190"
elasticsearch.hosts: ["http://192.168.186.190:9200"]
i18n.locale: "zh-CN"

- 启动
/usr/local/kibana-8.4.0/bin/kibana --allow-root
nohup /usr/local/kibana-8.4.0/bin/kibana --allow-root &
echo "nohup /usr/local/kibana-8.4.0-linux-x86_64/bin/kibana --allow-root &" >> /etc/rc.local
ss -antup|grep 5601
配置 Logstash
vim /usr/local/logstash-8.4.0/config/messages_logstash.conf
input{
    kafka {
        codec => plain{charset => "UTF-8"}
        bootstrap_servers => "192.168.186.190:9092"
        client_id => "messages_logs"
        group_id => "messages_logs"
        consumer_threads => 5
        auto_offset_reset => "latest"
        decorate_events => true
        topics => "messages_logs"
    }
}
output {
  stdout {
      codec => "rubydebug"
  }
  elasticsearch {
      hosts => [ "192.168.186.190:9200" ]
      index => "messages-logs-%{+YYYY.MM.dd}"
  }
}
- 启动
Logstash  -f /usr/local/logstash-8.4.0/config/messages_logstash. Conf
配置 ZooKeeper
- 解压安装包
tar -zxvf apache-zookeeper-3.7.1-bin.tar.gz -C /usr/local/ 
- 准备数据库和日志目录
mkdir -p /data/zk/data
mkdir -p /data/zk/datalog
- 准备配置文件
 cd /usr/local/apache-zookeeper-3.7.1-bin/conf/
 cp zoo_sample.cfg zoo.cfg
- 配置文件参数详解
vim zoo.cfg
- 修改参数
dataDir=/data/zk/data
dataLogDir=/data/zk/datalog
tickTime=2000
#心跳时长,单位毫秒,即为每两秒检查一次
initLimit=10
#最大心跳次数,10次心跳未能收到返回信息服务端失效
syncLimit=5
#leader和follower之间发送信息最大间隔时间为5次心跳时间
clientPort=2181
#ZooKeeper的端口号
- 添加 PATH 环境变量,不能使用软链接
vim /etc/profile
export ZOOKEEPER_HOME=/usr/local/apache-zookeeper-3.7.1-bin
export PATH=$ZOOKEEPER_HOME/bin:$PATH"
source /etc/profile
- 编辑启动脚本启动 ZooKeeper
 [root@delk1 conf]# vim /lib/systemd/system/zookeeper.service  
[Unit]
Description=Zookeeper service
After=network. Target
[Service]
Type=forking
Environment="JAVA_HOME=/usr/local/jdk1.8.0_171"
User=root
Group=root
ExecStart=/usr/local/apache-zookeeper-3.7.1-bin/bin/zkServer.sh start
ExecStop=/usr/local/apache-zookeeper-3.7.1-bin/bin/zkServer.sh stop
[Install]
WantedBy=multi-user.target
 ```
8. 启动服务
```sh
[root@delk1 conf]# systemctl daemon-reload
[root@delk1 conf]# systemctl start zookeeper
[root@delk1 conf]# systemctl enable zookeeper
[root@delk1 conf]# jps
Kafka 配置
- 修改配置文件
[root@delk1 ~]# vim /usr/local/kafka_2.13-3.2.1/config/server.properties  
[root@delk1 ~]# cat !$ |grep -v ^$|grep -v ^#
broker.id=0
listeners=PLAINTEXT://:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/data/kafka/log
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=18000
group.initial.rebalance.delay.ms=0
- 创建 kafka 日志文件
mkdir -p /data/kafka/log
- 添加 path 环境变量
vim /etc/profile
Export KAFKA_HOME=/usr/local/kafka_2.13-3.2.1
export PATH=$KAFKA_HOME/bin:$PATH
source !$
- 创建启动脚本
vim /lib/systemd/system/kafka.service
[Unit]
Description=Apache Kafka server (broker)
After=network.target  zookeeper.service
[Service]
Type=simple
Environment="PATH=/usr/local/jdk1.8.0_171/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin"
User=root
Group=root
ExecStart=/usr/local/kafka_2.13-3.2.1/bin/kafka-server-start.sh /usr/local/kafka_2.13-3.2.1/config/server.properties
ExecStop=/usr/local/kafka_2.13-3.2.1/bin/kafka-server-stop.sh
Restart=on-failure
[Install]
WantedBy=multi-user.target
- 重载服务
systemctl daemon-reload   #重载守护进程
systemctl start kafka
systemctl enable kafka
jps -m
12421 Kafka /usr/local/kafka_2.13-3.2.1/config/server.properties
4041 CliToolLauncher
12780 Jps -m
4109 Elasticsearch
配置 Redis
- 调整配置文件
vim /etc/redis.conf
bind 0.0.0.0
requirepass 123456
- 启动 Redis
systemctl start redis
- 进入 Redis 创建列表缓存数据
auth 123456
lpush httpd_log 0
配置 filebeat
- Filebeat 是日志收集的工具,需要在所有节点进行部署。
输入到 Kafka
vim /usr/local/filebeat-8.4.0-linux-x86_64/filebeat.yml
filebeat.inputs:
- type: filestream
  id: my-filestream-id
  enabled: true
  paths:
    - /var/log/messages
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.kibana:
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
output.kafka:           
  enabled: true         
  hosts: ["192.168.186.190:9092"] 
  topic: messages_logs  
cd /usr/local/filebeat-8.4.0-linux-x86_64/
./filebeat -e -c filebeat.yml &
输入到 Redis
vim /usr/local/Apache filebeat-8.4.0-linux-x86_64/filebeat.yml 
filebeat.inputs:
- type: log
  id: Apache
  enabled: true
  paths:
     - /var/log/httpd/*
  json.keys_under_root: true
  json.overwrite_keys: true
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.kibana:
  host: "192.168.186.190:5601"
output.redis:
  hosts: ["192.168.186.192:6379", "192.168.186.191:6379"]
  password: 123456
  key: "httpd_log"
  db: 0
  timeout: 5
setup.template.name: "apache"
setup.template.pattern: "apache-*"
setup.template.enabled: false
setup.template.overwrite: true
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~
配置 logstash
- 调整配置文件,从 kafka 拉取消息
vim /usr/local/logstash-8.4.0/config/messages_logstash.conf
input{
    kafka {
        codec => plain{charset => "UTF-8"}
        bootstrap_servers => "192.168.186.190:9092"
        client_id => "messages_logs"  
        group_id => "messages_logs"        consumer_threads => 5
        auto_offset_reset => "latest"  
        decorate_events => true  
        topics => "messages_logs"
    }
}
output {
  stdout {
      codec => "rubydebug"
  }
  elasticsearch {
      hosts => [ "192.168.186.190:9200" ]
      index => "messages-logs-%{+YYYY.MM.dd}"
  }
}
- 软链接并启动
Ln -s /usr/local/logstash-8.4.0/bin/* /usr/local/bin/
logstash -f /usr/local/logstash-8.4.0/config/messages_logstash.conf
- 去 Kibana 查看数据
  
 
 
DNS 部署
安装 DNS
- 主从节点都需要安装
 yum install -y bind bind-utils bind-chroot
配置主节点
配置主配置文件
vim /etc/named.conf
13         listen-on port 53 { any; };
14         listen-on-v6 port 53 { any; };
21         allow-query     { any; };
54 zone "discuz.com" IN {
55         type master;
56         file "discuz.zone";
57     allow-transfer { 192.168.186.191;192.168.186.192;   192.168.186.190; };
58 };
59 zone "discuz. Arpa" IN {
60         type master;
61         file "discuz.arpa";
62     allow-transfer { 192.168.186.191;192.168.186.192;   192.168.186.190; };
63 };
配置正向解析文件
cp /var/named/named.localhost /var/named/discuz.zone
vim /var/named/discuz.zone
$TTL 1D
@       IN SOA  discuz.com. rname.invalid. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
discuz.com.     IN      NS      dns01.discuz.com.
discuz.com.     IN      NS      dns02.discuz.com.
discuz.com.     IN      NS      dns03.discuz.com.
dns01.discuz.com.       IN      A       192.168.186.191
dns02.discuz.com.       IN      A       192.168.186.192
dns03.discuz.com.       IN      A       192.168.186.190
www.discuz.com. IN      A       192.168.45.88
mail.discuz.com.        IN      A       192.168.186.180
discuz.com.     IN      MX      180     mail.discuz.com.
配置反向解析文件
cp /var/named/named.localhost /var/named/discuz.arpa
vim /var/named/discuz.arpa
$TTL 1D
@       IN SOA  discuz.com. rname.invalid. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
        IN      NS      dns01.discuz.com.
        IN      NS      dns02.discuz.com.
        IN      NS      dns03.discuz.com.
191     IN      PTR     dns 01. Discuz. Com.
192     IN      PTR     dns02.discuz.com.
190     IN      PTR     dns03.discuz.com.
88      IN      PTR     www.discuz.com.
180     IN      PTR     mail.discuz.com.
- 调整文件权限
chown root:named /var/named/discuz.zone
chown root:named /var/named/discuz.arpa
- 检查配置文件
named-checkconf -z /etc/named.conf  
zone discuz.com/IN: loaded serial 0
zone discuz.arpa/IN: loaded serial 0
zone localhost.localdomain/IN: loaded serial 0
zone localhost/IN: loaded serial 0
zone 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa/IN: loaded serial 0
zone 1.0.0.127.in-addr.arpa/IN: loaded serial 0
zone 0.in-addr.arpa/IN: loaded serial 0
启动 DNS
- 启动
Systemctl start named
systemctl enable named
- 修改域名服务
cat /etc/resolv.conf
# Generated by NetworkManager
#nameserver 8.8.8.8
nameserver 192.168.186.191
- 测试
curl discuz.com

配置从节点
调整解析文件
- 主配置文件
vim /etc/named.conf
zone "discuz.com" IN {
        type slave;
        file "slave/discuz.zone";
    masters { 192.168.186.191; };
};
zone "discuz.arpa" IN {
        type slave;
        file "slave/discuz.arpa";
    masters { 192.168.186.191; };
};
named-checkconf /etc/named.conf
- 启动服务
systemctl start named
systemctl enable named
项目结果展示
Web 展示
Nginx 最终配置文件
worker_processes auto;   # 指定Worker进程的数量,auto表示将和CPU数量相等
worker_cpu_affinity auto;  # 用于在多个CPU核之间平衡分配工作负载的功能,默认值为auto表示由操作系统自行调度
#error_log  logs/error.log;    # 指定错误日志的路径和名称
#error_log  logs/error.log  notice;  # 指定错误日志的级别为notice
#error_log  logs/error.log  info;    # 指定错误日志的级别为info
#pid        logs/nginx.pid;   # 指定Nginx主进程的pid文件路径
events {   # 事件模块
    use epoll;   # 指定一个事件模块,用于处理IO事件(epoll / kqueue / etc),默认使用select模块
    worker_connections 51200;   # 每个Worker进程的最大连接数
    multi_accept off;   # 一个Worker进程是否同时处理多个连接,默认为off
    accept_mutex off;   # 是否启用共享锁来同步Worker进程,当accept_mutex为off时,使用atomic operations进行操作
}
http {    # Http模块
    include       mime.types;   # 导入类型定义文件,用于定义文件的Content-Type
    default_type  application/octet-stream;  # 默认的Content-Type
    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';
    #access_log  logs/access. Log  main;  # 指定访问日志的路径和格式
    server_names_hash_bucket_size 128;   # 散列表中Server Name的个数
    client_header_buffer_size 32k;    # 客户端请求头缓冲区大小
    large_client_header_buffers 4 32k;   # 每个连接的客户端请求头缓冲区的个数和大小
    client_max_body_size 50m;    # 上传请求的最大大小
    sendfile on;   # 内核级别的文件传输方式
    sendfile_max_chunk 512k;   # 在Sendfile模式下限制每次传输的最大数据块大小
    tcp_nopush on;   # 在发送响应头部之后将数据立即发送给客户端
    keepalive_timeout 60;   # 客户端接收响应后保持连接的时间,单位秒
    Tcp_nodelay on;    # 禁用 Nagle 算法(将小的数据包合并成一个大的数据包),提高传输效率
    fastcgi_connect_timeout 300;   # 连接FastCGI进程的超时时间
    fastcgi_send_timeout 300;   # 向FastCGI进程发送数据的超时时间
    fastcgi_read_timeout 300;   # 从FastCGI进程读取响应数据的超时时间
    fastcgi_buffer_size 64k;   # 缓冲发送到FastCGI进程的数据的缓冲区大小
    fastcgi_buffers 4 64k;   # 缓冲FastCGI模块响应的数据的缓冲区的个数和大小
    fastcgi_busy_buffers_size 128k;   # 用于缓存繁忙工作进程响应数据的缓冲区大小
    fastcgi_temp_file_write_size 256k;   # 用于将FastCGI服务的数据写入临时文件的缓冲区大小
    gzip on;  # 开启Gzip压缩
    gzip_min_length  1k;  # 指定压缩的文件最小大小
    gzip_buffers     4 16k;   # 压缩数据的缓存大小和数量
    gzip_http_version 1.1;   # 指定HTTP协议版本
    gzip_comp_level 2;   # 压缩级别
    gzip_types     text/plain application/javascript application/x-javascript text/javascript text/css application/xml application/xml+rss;   # 压缩类型
    gzip_vary on;   # 对于不同的User-Agent选择不同的压缩方法
    gzip_proxied   expired no-cache no-store private auth;   # 用于控制缓存的Gzip压缩
    
    #tcp_nopush     on;
    
    
    upstream apache {
 server 192.168.186.183:80;
 server 192.168.186.182:80;
 server 192.168.186.180:80;
    }
 upstream nginx {
 server 192.168.186.181:808;
 server 192.168.186.184:808;
 server 192.168.186.185:808;
    } 
    server{
        listen 808;
        server_name  localhost;
        charset utf-8;
        
        location  ~  .*\.(gif|jpg|jpeg|png|bmp|rar|zip|txt|flv|mid|doc|ppt|xls|mp3|wma|html|htm|css|js|ico|swf|pdf)$ {
            root html ;
            expires 30d;
        }
    }
    server {
        listen       80;
        server_name  localhost;
        #charset koi8-r;
        #access_log  logs/host.access.log  main;
  client_max_body_size 100m; # 
        location / {
  root html;
  index  index.php index.html index.htm;
  proxy_pass http://apache;
 proxy_redirect off;
            proxy_set_header  Host  $host;
            proxy_set_header  X-Forwarded-For  $remote_addr;
            proxy_set_header   X-Forwarded-Proto $scheme;
            proxy_ignore_headers Set-Cookie;
            proxy_hide_header Set-Cookie;
            proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;  
        }
 Location ~ .*\. (gif|jpg|jpeg|png|bmp|rar|zip|txt|flv|mid|doc|ppt|xls|mp 3|wma|html|htm|css|js|ico|swf|pdf)?$
        {
            proxy_pass http://nginx;
        }
        #error_page  404              /404.html;
        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        Location = /50 x. Html {
            root   html;
        }
        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}
        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}
        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }
    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    #    listen       8000;
    #    listen       somename:8080;
    #    server_name  somename  alias  another.alias;
    #    location / {
    #        root   html;
    #        index  index.php index.html index.htm;
    #    }
    #}
    # HTTPS server
    #
    #server {
    #    listen       443 ssl;
    #    server_name  localhost;
    #    ssl_certificate      cert.pem;
    #    ssl_certificate_key  cert.key;
    #    ssl_session_cache    shared:SSL:1m;
    #    ssl_session_timeout  5m;
    #    ssl_ciphers  HIGH:!aNULL:!MD5;
    #    ssl_prefer_server_ciphers  on;
    #    location / {
    #        root   html;
    #        index  index.php index.html index.htm;
    #    }
    #}
}
负载均衡抓包展示
- Nginx 反代后端 Apache 服务器
  
  
Zabbix 监控展示




 
                
            
         
         浙公网安备 33010602011771号
浙公网安备 33010602011771号