TIDB部署标准集群

集群规划
Role            Host          
----            ----          
pd              192.168.11.141
pd              192.168.11.142
pd              192.168.11.143
tidb-dashboard  192.168.11.144
tikv            192.168.11.141
tikv            192.168.11.142
tikv            192.168.11.143
tikv            192.168.11.144
tikv            192.168.11.145
tikv            192.168.11.146
tidb            192.168.11.143
tidb            192.168.11.144
tidb            192.168.11.145
tidb            192.168.11.146
tiflash         192.168.11.143
tiflash         192.168.11.144
tiflash         192.168.11.145
tiflash         192.168.11.146
tikv-cdc        192.168.11.142
tikv-cdc        192.168.11.143
prometheus      192.168.11.141
grafana         192.168.11.142
alertmanager    192.168.11.143




操作系统参数配置
ulimit -n 65536
ulimit -m unlimited
ulimit -s 8192
ulimit -c unlimited
cat >>/etc/security/limits.conf<<EOF
* soft nofile 65536
* hard nofile 65536
* soft nproc 65536
* hard nproc 65536
* soft rss unlimited
* hard rss unlimited
* soft nproc 8192
* hard nproc 8192
* soft core unlimited
* hard core unlimited
EOF



关闭系统 swap
echo "vm.swappiness = 0">> /etc/sysctl.conf
swapoff -a && swapon -a
sysctl -p


关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl status firewalld.service


关闭SELinux:
setenforce 0
sed -i 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config
 
 

配置ntp时钟同步
卸载系统原装的chrony,安装NTP服务
yum -y remove chronyd
yum -y install ntp


所有节点做如下配置
注意: 192.168.200.216 是公司内部的NTP时间同步服务器
mv /etc/ntp.conf /etc/ntp.conf.bak

cat <<EOF > /etc/ntp.conf
server 192.168.200.216
Fudge 192.168.200.216 stratum 10
EOF

在每台服务器上启动ntpd服务,并配置服务开机自启动
systemctl restart ntpd
systemctl enable ntpd.service



检查THP(Transparent Huge Pages)是否开启(以下情况为开启)
cat /sys/kernel/mm/transparent_hugepage/defrag
[always] madvise never

cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never


禁用THP(立即生效)
echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled
echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag
cat /sys/kernel/mm/transparent_hugepage/defrag
cat /sys/kernel/mm/transparent_hugepage/enabled


禁用THP(重启生效)
cat >> /etc/rc.d/rc.local << EOF
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag
fi
EOF

chmod +x /etc/rc.d/rc.local



执行 grubby 命令查看默认内核版本
grubby --default-kernel

输出内容为:
/boot/vmlinuz-3.10.0-693.el7.x86_64


执行 grubby --update-kernel 命令修改内核配置
grubby --args="transparent_hugepage=never" --update-kernel /boot/vmlinuz-3.10.0-693.el7.x86_64


执行 grubby --info 命令查看修改后的默认内核配置
grubby --info /boot/vmlinuz-3.10.0-693.el7.x86_64



执行以下命令查看磁盘的唯一标识 ID_SERIAL
udevadm info --name=/dev/sda | grep ID_SERIAL

我执行的时候,输出是空,不知为何

配置 udev 脚本应用 IO 调度器策略
vi /etc/udev/rules.d/60-tidb-schedulers.rules
ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_SERIAL}=="36d0946606d79f90025f3e09a0c1fc035", ATTR{queue/scheduler}="noop"
ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_SERIAL}=="36d0946606d79f90025f3e09a0c1f9e81", ATTR{queue/scheduler}="noop"


应用 udev 脚本
udevadm control --reload-rules
udevadm trigger --type=devices --action=change

上面的操作没做,因为没查出 ID_SERIAL



查看 cpufreq 模块选用的节能策略,需要调整为 performance 策略
如果是虚拟机或者云主机,则不需要调整,命令输出通常为 Unable to determine current policy
cpupower frequency-info --policy


创建 CPU 节能策略配置服务,应用 CPU 节能策略配置服务
cat >> /etc/systemd/system/cpupower.service << EOF
[Unit]
Description=CPU performance
[Service]
Type=oneshot
ExecStart=/usr/bin/cpupower frequency-set --governor performance
[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable cpupower.service
systemctl start cpupower.service



安装 numactl 工具
yum -y install numactl



中控机安装 TiUP 工具
curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh



部署离线环境 TiUP 组件,合并离线包
tar -xvf tidb-community-server-v6.1.2-linux-amd64.tar.gz
sh  tidb-community-server-v6.1.2-linux-amd64/local_install.sh
source /root/.bash_profile

tar -xvf tidb-community-toolkit-v6.1.2-linux-amd64.tar.gz
ls -ld tidb-community-server-v6.1.2-linux-amd64  tidb-community-toolkit-v6.1.2-linux-amd64
cd tidb-community-server-v6.1.2-linux-amd64/
cp -rp keys ~/.tiup/
tiup mirror merge ../tidb-community-toolkit-v6.1.2-linux-amd64


生成集群初始化配置文件
tiup cluster template --full > topology.yaml


执行 vi topology.yaml,查看配置文件的内容
global:
  user: "tidb"
  ssh_port: 22
  deploy_dir: "/tidb-deploy"
  data_dir: "/tidb-data"
  arch: "amd64"

monitored:
  node_exporter_port: 9100
  blackbox_exporter_port: 9115

pd_servers:
  - host: 192.168.11.141
  - host: 192.168.11.142
  - host: 192.168.11.143

tidb_servers:
  - host: 192.168.11.143
    port: 4000
    status_port: 10080
    deploy_dir: "/tidb-deploy/tidb-4000"
    log_dir: "/tidb-deploy/tidb-4000/log"
  - host: 192.168.11.144
    port: 4001
    status_port: 10081
    deploy_dir: "/tidb-deploy/tidb-4001"
    log_dir: "/tidb-deploy/tidb-4001/log"
  - host: 192.168.11.145
    port: 4000
    status_port: 10080
    deploy_dir: "/tidb-deploy/tidb-4000"
    log_dir: "/tidb-deploy/tidb-4000/log"
  - host: 192.168.11.146
    port: 4001
    status_port: 10081
    deploy_dir: "/tidb-deploy/tidb-4001"
    log_dir: "/tidb-deploy/tidb-4001/log"

tikv_servers:
  - host: 192.168.11.141
    port: 20160
    status_port: 20180
    deploy_dir: "/data1/tidb-deploy/tikv-20160"
    data_dir: "/data1/tidb-data/tikv-20160"
    log_dir: "/data1/tidb-deploy/tikv-20160/log"
  - host: 192.168.11.142
    port: 20161
    status_port: 20181
    deploy_dir: "/data2/tidb-deploy/tikv-20161"
    data_dir: "/data2/tidb-data/tikv-20161"
    log_dir: "/data2/tidb-deploy/tikv-20161/log"
  - host: 192.168.11.143
    port: 20160
    status_port: 20180
    deploy_dir: "/data1/tidb-deploy/tikv-20160"
    data_dir: "/data1/tidb-data/tikv-20160"
    log_dir: "/data1/tidb-deploy/tikv-20160/log"
  - host: 192.168.11.144
    port: 20161
    status_port: 20181
    deploy_dir: "/data2/tidb-deploy/tikv-20161"
    data_dir: "/data2/tidb-data/tikv-20161"
    log_dir: "/data2/tidb-deploy/tikv-20161/log"
  - host: 192.168.11.145
    port: 20160
    status_port: 20180
    deploy_dir: "/data1/tidb-deploy/tikv-20160"
    data_dir: "/data1/tidb-data/tikv-20160"
    log_dir: "/data1/tidb-deploy/tikv-20160/log"
  - host: 192.168.11.146
    port: 20161
    status_port: 20181
    deploy_dir: "/data2/tidb-deploy/tikv-20161"
    data_dir: "/data2/tidb-data/tikv-20161"
    log_dir: "/data2/tidb-deploy/tikv-20161/log"

tiflash_servers:
  - host: 192.168.11.143
    tcp_port: 9000
    http_port: 8123
    flash_service_port: 3930
    flash_proxy_port: 20170
    flash_proxy_status_port: 20292
    metrics_port: 8234
    deploy_dir: /data1/tidb-deploy/tiflash-9000
    data_dir: /data1/tidb-data/tiflash-9000
    log_dir: /data1/tidb-deploy/tiflash-9000/log
  - host: 192.168.11.144
    tcp_port: 9001
    http_port: 8124
    flash_service_port: 3931
    flash_proxy_port: 20171
    flash_proxy_status_port: 20293
    metrics_port: 8235
    deploy_dir: /data2/tidb-deploy/tiflash-9001
    data_dir: /data2/tidb-data/tiflash-9001
    log_dir: /data2/tidb-deploy/tiflash-9001/log
  - host: 192.168.11.145
    tcp_port: 9000
    http_port: 8123
    flash_service_port: 3930
    flash_proxy_port: 20170
    flash_proxy_status_port: 20292
    metrics_port: 8234
    deploy_dir: /data1/tidb-deploy/tiflash-9000
    data_dir: /data1/tidb-data/tiflash-9000
    log_dir: /data1/tidb-deploy/tiflash-9000/log
  - host: 192.168.11.146
    tcp_port: 9001
    http_port: 8124
    flash_service_port: 3931
    flash_proxy_port: 20171
    flash_proxy_status_port: 20293
    metrics_port: 8235
    deploy_dir: /data2/tidb-deploy/tiflash-9001
    data_dir: /data2/tidb-data/tiflash-9001
    log_dir: /data2/tidb-deploy/tiflash-9001/log

monitoring_servers:
  - host: 192.168.11.141 

grafana_servers:
  - host: 192.168.11.142

alertmanager_servers:
  - host: 192.168.11.143

  
  
检查集群存在的潜在风险:
tiup cluster check ./topology.yaml --user root -p


自动修复集群存在的潜在风险:
tiup cluster check ./topology.yaml --apply --user root -p


部署 TiDB 集群:
tiup cluster deploy tidb-test v6.1.2 ./topology.yaml --user root -p


启动集群
tiup cluster start tidb-test --init

The new password is: '$92&S8_3U4-sAxwVj7'.
 
验证集群运行状态
tiup cluster display tidb-test

[root@testnode1 ~]# tiup cluster display tidb-test
tiup is checking updates for component cluster ...
Starting component `cluster`: /root/.tiup/components/cluster/v1.11.0/tiup-cluster display tidb-test
Cluster type:       tidb
Cluster name:       tidb-test
Cluster version:    v6.1.2
Deploy user:        tidb
SSH type:           builtin
Dashboard URL:      http://192.168.11.143:2379/dashboard
Grafana URL:        http://192.168.11.142:3000
ID                    Role          Host            Ports                            OS/Arch       Status  Data Dir                       Deploy Dir
--                    ----          ----            -----                            -------       ------  --------                       ----------
192.168.11.143:9093   alertmanager  192.168.11.143  9093/9094                        linux/x86_64  Up      /tidb-data/alertmanager-9093   /tidb-deploy/alertmanager-9093
192.168.11.142:3000   grafana       192.168.11.142  3000                             linux/x86_64  Up      -                              /tidb-deploy/grafana-3000
192.168.11.141:2379   pd            192.168.11.141  2379/2380                        linux/x86_64  Up      /tidb-data/pd-2379             /tidb-deploy/pd-2379
192.168.11.142:2379   pd            192.168.11.142  2379/2380                        linux/x86_64  Up|L    /tidb-data/pd-2379             /tidb-deploy/pd-2379
192.168.11.143:2379   pd            192.168.11.143  2379/2380                        linux/x86_64  Up|UI   /tidb-data/pd-2379             /tidb-deploy/pd-2379
192.168.11.141:9090   prometheus    192.168.11.141  9090/12020                       linux/x86_64  Up      /tidb-data/prometheus-9090     /tidb-deploy/prometheus-9090
192.168.11.143:4000   tidb          192.168.11.143  4000/10080                       linux/x86_64  Up      -                              /tidb-deploy/tidb-4000
192.168.11.144:4001   tidb          192.168.11.144  4001/10081                       linux/x86_64  Up      -                              /tidb-deploy/tidb-4001
192.168.11.145:4000   tidb          192.168.11.145  4000/10080                       linux/x86_64  Up      -                              /tidb-deploy/tidb-4000
192.168.11.146:4001   tidb          192.168.11.146  4001/10081                       linux/x86_64  Up      -                              /tidb-deploy/tidb-4001
192.168.11.143:9000   tiflash       192.168.11.143  9000/8123/3930/20170/20292/8234  linux/x86_64  Up      /data1/tidb-data/tiflash-9000  /data1/tidb-deploy/tiflash-9000
192.168.11.144:9001   tiflash       192.168.11.144  9001/8124/3931/20171/20293/8235  linux/x86_64  Up      /data2/tidb-data/tiflash-9001  /data2/tidb-deploy/tiflash-9001
192.168.11.145:9000   tiflash       192.168.11.145  9000/8123/3930/20170/20292/8234  linux/x86_64  Up      /data1/tidb-data/tiflash-9000  /data1/tidb-deploy/tiflash-9000
192.168.11.146:9001   tiflash       192.168.11.146  9001/8124/3931/20171/20293/8235  linux/x86_64  Up      /data2/tidb-data/tiflash-9001  /data2/tidb-deploy/tiflash-9001
192.168.11.141:20160  tikv          192.168.11.141  20160/20180                      linux/x86_64  Up      /data1/tidb-data/tikv-20160    /data1/tidb-deploy/tikv-20160
192.168.11.142:20161  tikv          192.168.11.142  20161/20181                      linux/x86_64  Up      /data2/tidb-data/tikv-20161    /data2/tidb-deploy/tikv-20161
192.168.11.143:20160  tikv          192.168.11.143  20160/20180                      linux/x86_64  Up      /data1/tidb-data/tikv-20160    /data1/tidb-deploy/tikv-20160
192.168.11.144:20161  tikv          192.168.11.144  20161/20181                      linux/x86_64  Up      /data2/tidb-data/tikv-20161    /data2/tidb-deploy/tikv-20161
192.168.11.145:20160  tikv          192.168.11.145  20160/20180                      linux/x86_64  Up      /data1/tidb-data/tikv-20160    /data1/tidb-deploy/tikv-20160
192.168.11.146:20161  tikv          192.168.11.146  20161/20181                      linux/x86_64  Up      /data2/tidb-data/tikv-20161    /data2/tidb-deploy/tikv-20161
Total nodes: 20

 

posted @ 2022-11-30 18:07  屠魔的少年  阅读(4)  评论(0)    收藏  举报