MySQL数据库服务器安装标准

MySQL数据库服务器安装标准

(1).BIOS优化,阵列配置

   1.1:关闭CPU节能,因为服务器品牌众多,BIOS设置不相同,主要是关闭CPU节能,如C1,DELLR730,已经智能设置,直接有个performance选项,帮你关闭了CPU节能,numa特性

   1.2:果服务器是8块硬盘,建议两块做RAID1装系统,剩余6块做RAID10做数据分区,RAID1阵列缓存设置成 WriteThrough ,RAID10设置成writeback,将有限阵列卡缓存给RAID10阵列用,如果是8块以上的盘,组建两组RAID10,两组缓存策略都是writeback,一组装系统以及存放顺序IO类型的数据,比如redolog,归档日志,mysql的binlog,一组做数据分区

   1.3:使用XFS文件系统,数据分区用XFS文件系统,挂载参数用defaults,noatime,nodiratime,nobarrier,记住根分区是不能用这个挂载参数,不然你根分区下的目录文件都没有访问时间,修改时间,只能用于数据库文件分区

   1.4:修改IO调度策略以及关闭numa:vim /etc/grub.conf 在kernel那行最末尾加上elevtor=deadline numa=off

kernel /vmlinuz-2.6.32-504.el6.x86_64 ro root=UUID=af13b3dc-c142-42b7-8ed6-cb7c60608af2 rd_NO_LUKS  KEYBOARDTYPE=pc KEYTABLE=us rd_NO_MD crashkernel=auto LANG=zh_CN.UTF-8 rd_NO_LVM rd_NO_DM rhgb quiet elevator=deadline numa=off

要当前生效可以这样设置

cat /sys/block/sda/queue/scheduler
echo deadline > /sys/block/sda/queue/scheduler

 

 

(2)操作系统基础优化

 2.1:关闭selinux,修改资源配置

vim /etc/security/limits.conf

添加以下这段
 *               soft nofile          65535
 *               soft nproc           65535
 *                hard nofile         65535
 *                hard nproc          65535

sed -ri 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

当前设置selinux 和资源限制, setenforce 0 然后在getenforce 查看是否已经禁用了selinux,设置资源限制,直接ulimit -n 65535,也可以下面这种方式,建议使用下面的方式。

就不用去编辑/etc/security/limits.conf这个文件了

 

[root@localhost ~]# cd /etc/security/limits.d/
[root@localhost limits.d]# pwd
/etc/security/limits.d
[root@CRM-mysql limits.d]# ll
-rw-r--r-- 1 root root 227 8月 24 11:39 90-nproc.conf
[root@localhost limits.d]# cp -rpf 90-nproc.conf 90-nofile.conf
这是已经编辑好的文件
[root@localhost limits.d]# cat 90-nofile.conf
# Default limit for number of user's processes to prevent
# accidental fork bombs.
# See rhbz #432903 for reasoning.

 

* soft nofile 65535
* hard nofile 65535
root soft nofile unlimited
[root@localhost limits.d]# cat 90-nproc.conf
# Default limit for number of user's processes to prevent
# accidental fork bombs.
# See rhbz #432903 for reasoning.

 

* soft nproc 65535
* hard nproc 65535
root soft nproc unlimited

#########su -   一下
[root@localhost limits.d]# su -

####
[root@localhost ~]# ulimit -a
core file size (blocks, -c) 0
data seg size (kbytes, -d) unlimited
scheduling priority (-e) 0
file size (blocks, -f) unlimited
pending signals (-i) 256591
max locked memory (kbytes, -l) 64
max memory size (kbytes, -m) unlimited
open files (-n) 65535
pipe size (512 bytes, -p) 8
POSIX message queues (bytes, -q) 819200
real-time priority (-r) 0
stack size (kbytes, -s) 10240
cpu time (seconds, -t) unlimited
max user processes (-u) 65535
virtual memory (kbytes, -v) unlimited
file locks (-x) unlimited

 

2.2:关闭不需要的服务,只留下crond,network,rsyslog,sshd ,sysstat,udev-post

2.3:内核参数调整

###########减少swap的使用率#如果设置为0,需要结合vm.overcommit_memory=2,可以尽量避免OOM,一般设置为1
vm.swappiness=1
###########默认值是3797,保证物理内存有足够空闲空间,防止突发性换页
vm.min_free_kbytes=204800
############默认是100,增大这个参数设置了虚拟内存回收directory和i-node缓冲的倾向,这个值越大。越易回收,尽量保留可用内存
vm.vfs_cache_pressure=150
######确保能持续将脏数据刷新到磁盘,避免瞬间I/O写,产生严重等待和设置MySQL中的innodb_max_dirty_pages_pct低一点原理类似

###当文件系统缓存脏页数量达到系统内存百分之多少时(默认10%)就会触发pdflush/flush/kdmflush等后台回写进程运行,将一定缓存的脏页异步地刷入硬盘;

vm.dirty_background_ratio=5

##当文件系统缓存脏页数量达到系统内存百分之多少时(默认20%),系统会停止所有的应用层的IO写操作,等待刷完数据后恢复IO。

vm.dirty_ratio=10
#######################减少TIME_WAIT,提高TCP效率;
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_tw_reuse=1

(3)安装jemalloc内存管理器

jemalloc内存分配方式与系统默认安装的glic的malloc内存分配方式相比,能提高MySQL的性能,降低了系统CPU和内存资源的利用,关于这方便的压测数据,请参考:http://www.linuxeye.com/Linux/1914.html

http://www.canonware.com/download/jemalloc/jemalloc-3.6.0.tar.bz2

编译安装很简单,tar xvf jemalloc-3.6.0.tar.bz2  ./configure && make && make install

已方便安装好mysql后使用,使用也很简单在[mysqld_safe] 加上malloc-lib= /usr/local/lib/libjemalloc.so

 (4)安装异步IO支持

为了提高磁盘操作性能,当前的数据库系统都采用异步IO(Asynchronous IO,AIO)的方式来处理磁盘操作。InnoDB存储引擎亦是如此。
在InnoDB1.1.x之前,AIO的实现通过InnoDB存储引擎中的代码来模拟实现。而从InnoDB 1.1.x开始(InnoDB Plugin不支持),
提供了内核级别AIO的支持,称为Native AIO。因此在编译或者运行该版本MySQL时,需要libaio库的支持,centos最小化安装默认是没有安装的,安装也简单:

yum install libaio-devel

(5)网卡绑定软中断

网卡软中断不平衡,集中在一个CPU核心上(mpstat 查看%soft集中,通常是cpu0),绑定软中断多个核心上,可以用两个脚本来绑定。自己喜欢用哪个都行

vim set_irq_affinity.py。

 

#!/usr/bin/env python


import re
from os import system,popen
from os import walk as walkdir
from optparse import OptionParser

RPS_CPUS_VALUE = 'ffffffff'
RPS_FLOW_VALUE = '4096'
RPS_RFS_DEFAULT = '0'
interrupts_file = '/proc/interrupts'
rps_cpus_list = []
rps_flow_list = []
#ENTRY_VALUE=32768

def get_device():
return re.findall(r'([a-z]+\d+)\s+Link.*',popen('ifconfig').read())

def get_rfs_rps_file(net_device):
rps_path = '/sys/class/net/' + net_device + '/queues/'
for s in walkdir(rps_path):
if len(s[2]) == 2:
rps_cpus_list.append('/'.join([s[0],s[2][0]]))
rps_flow_list.append('/'.join([s[0],s[2][1]]))

def file_hander(TARGET,VALUE='0'):
try:
f_hander = open(TARGET,'w')
f_hander.write(VALUE)
finally:
f_hander.close()

def set_rfs_rps(net_device):
get_rfs_rps_file(net_device)
def set_rps_cpus_value(PATH):
file_hander(PATH,RPS_CPUS_VALUE)

def set_rps_flow_value(PATH):
file_hander(PATH,RPS_FLOW_VALUE)

map(set_rps_cpus_value,rps_cpus_list)
map(set_rps_flow_value,rps_flow_list)

def unset_rfs_rps(net_device):
get_rfs_rps_file(net_device)
def unset_rps_cpus_value(PATH):
file_hander(PATH,RPS_RFS_DEFAULT)

def unset_rps_flow_value(PATH):
file_hander(PATH,RPS_RFS_DEFAULT)

map(unset_rps_cpus_value,rps_cpus_list)
map(unset_rps_flow_value,rps_flow_list)

def set_irq_balance():
stop_irq_balance = 'service irqbalance stop'
system(stop_irq_balance)
interrupts_ct = open(interrupts_file)
cores_nr = len(interrupts_ct.readline().split()) # 获取CPU核心数
irq_bit = 0
while True:
inter_line = interrupts_ct.readline()
if inter_line == "":
break
js = inter_line.split()
if len(js[-1]) > 5:
if re.match(r'eth.-',js[-1][:5]):
irq_nr = js[0][:-1]
TARGET = '/proc/irq/%s/smp_affinity' %(irq_nr)
VALUE = str(re.sub('0x','',hex(1 << irq_bit))) #1 << irq_bit 相对于2的N次方 ,hex() 二进制转十六进制
file_hander(TARGET,VALUE)
irq_bit += 1
if irq_bit == cores_nr:
irq_bit = 0

def unset_irq_balance():
start_irq_balance = 'service irqbalance start'
system(start_irq_balance)

def usage():
usage = '''''=================================================
Description: irq_balance_set && rfs_rps_set tools
Usage:
<script> -i : set irq smp_affinity
-I : unset irq smp_affinity
-r : set rfs && rps
-R : unset rfs && rps
'''
print usage

if __name__ == '__main__':

parser = OptionParser()
parser.add_option("-i", action="store_true",
dest="irq_true",
default=False)
parser.add_option("-I", action="store_true",
dest="irq_false",
default=False)
parser.add_option("-r", action="store_true",
dest="rps_true",
default=False)
parser.add_option("-R", action="store_true",
dest="rps_false",
default=False)
(options, args) = parser.parse_args()

if options.irq_true == True:
set_irq_balance()
print "irq_balance_set successfully"
elif options.irq_false == True:
unset_irq_balance()
print "unset irq balance successfully"
elif options.rps_true == True:
device_list = get_device()
map(set_rfs_rps,device_list)
print "rfs&&rps configured successfully"
elif options.rps_false == True:
device_list = get_device()
map(unset_rfs_rps,device_list)
print "unconfigured rfs&&rps successfully"
else:
usage()

 

 

 

加入到开机启动中

 #!/bin/bash
CORE_SUM="`grep -c '^processor' /proc/cpuinfo`"
IRQ_SUM="`echo "1 2 4 8 10 20 40 80 100 200 400 800 1000 2000 4000 8000 10000 20000 40000 80000 100000 200000 400000 800000"| cut -d " " -f -${CORE_SUM}`"
IRQ_NUM="`echo ${IRQ_SUM}`"
for i in `grep -E '(eth[0-9]+|em[0-9]+)' /proc/interrupts | awk -F ":" '{print $1}' | sed 's/\ //g'`; do
        echo -e "${i}\t:`cat /proc/irq/${i}/smp_affinity`"
 
        y="`echo ${IRQ_NUM} | awk '{print $1}'`"
        echo ${y} > /proc/irq/${i}/smp_affinity
#echo "echo ${y} > /proc/irq/${i}/smp_affinity"
        if [ "${y}" == "`echo ${IRQ_SUM} | awk '{print $NF}'`" ]; then
                IRQ_NUM="`echo ${IRQ_SUM}`"
        else
                IRQ_NUM="`echo ${IRQ_NUM} | sed 's/^\([0-9]\+\)\ \(.*\)/\2/g'`"
        fi
 
        echo -e "----\t `cat /proc/irq/${i}/smp_affinity`"
done
###### Enable RPS (Receive Packet Steering)
rfc=4096
cc=$(grep -c processor /proc/cpuinfo)
rsfe=$(echo $cc*$rfc | bc)
sysctl -w net.core.rps_sock_flow_entries=$rsfe
for fileRps in $(ls /sys/class/net/eth*/queues/rx-*/rps_cpus)
do
    echo fff > $fileRps
done
 
for fileRfc in $(ls /sys/class/net/eth*/queues/rx-*/rps_flow_cnt)
do
    echo $rfc > $fileRfc
done
 
tail /sys/class/net/eth*/queues/rx-*/{rps_cpus,rps_flow_cnt}

posted @ 2015-07-24 16:37  文采飞扬  阅读(896)  评论(0编辑  收藏  举报