代码改变世界

Vertica 7.1安装最佳实践(RHEL6.4)

2015-03-25 15:35  AlfredZhao  阅读(2694)  评论(0编辑  收藏  举报

一、前期准备工作

二、Vertica安装

三、集群性能评估

一、前期准备工作:

1.1各节点IP和主机名

192.168.1.137 DB01
192.168.1.138 DB02
192.168.1.139 DB03
192.168.1.140 DB04

在节点1配置/etc/hosts,添加上面信息。

1.2上传脚本并设定环境变量

在节点1上传两个安装脚本到/usr/local/bin

cluster_copy_all_nodes 

#!/bin/bash
SELF=`hostname`
if [ -z "$NODE_LIST" ]; then
  echo
  echo Error: NODE_LIST environment variable must be set in .bash_profile
  exit 1
fi

for i in $NODE_LIST; do
  if [ ! $i = $SELF ]; then
    if [ $1 = "-r" ]; then
      scp -oStrictHostKeyChecking=no -r $2 $i:$3
    else
      scp -oStrictHostKeyChecking=no $1 $i:$2
    fi
  fi
done
wait

cluster_run_all_nodes  

#!/bin/bash
if [ -z "$NODE_LIST" ]; then
  echo
  echo Error: NODE_LIST environment variable must be set in .bash_profile
  exit 1
fi

if [[ $1 = '--background' ]]; then
  shift
  for i in $NODE_LIST; do
    ssh -oStrictHostKeyChecking=no -n $i "$@" &
  done
else
  for i in $NODE_LIST; do
    ssh -oStrictHostKeyChecking=no $i "$@"
  done
fi
wait

配置节点1的环境变量

vi /root/.bash_profile       

export NODE_LIST='DB01 DB02 DB03 DB04'

1.3添加信任

ssh-keygen -q -t rsa  -N "" -f  ~/.ssh/id_rsa

ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.137
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.138
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.139
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.140

cluster_run_all_nodes "hostname ; date"

1.4前期准备检查并调整

1.4.1 同步检查系统版本,运行级别,挂载目录结构,网卡信息

cluster_run_all_nodes "hostname;cat /etc/redhat-release"
cluster_run_all_nodes "hostname; /sbin/runlevel "
cluster_run_all_nodes "hostname; df -h"
cluster_run_all_nodes "hostname; ethtool eth0 | grep Speed"

1.4.2 同步脚本,校对主机名,同步/etc/hosts

cluster_copy_all_nodes /root/.bash_profile /root/
cluster_copy_all_nodes /usr/local/bin/cluster_run_all_nodes  /usr/local/bin/
cluster_copy_all_nodes /usr/local/bin/cluster_copy_all_nodes /usr/local/bin/
cluster_run_all_nodes "hostname; /bin/hostname -f; grep HOSTNAME /etc/sysconfig/network"
cluster_copy_all_nodes /etc/hosts /etc/
cluster_run_all_nodes "hostname; cat /etc/hosts"

1.4.3 同步时间、时区、NTP服务状态

cluster_run_all_nodes 'hostname;date'
cluster_run_all_nodes "date 032411082015.00"
cluster_run_all_nodes "hwclock -r"
cluster_run_all_nodes "hwclock -w"
cluster_run_all_nodes "hostname; echo \${TZ}; echo \${LANG}"
cluster_run_all_nodes "hostname; cat /etc/sysconfig/clock"
cluster_run_all_nodes "hostname; /sbin/chkconfig --list ntpd"

1.4.4 同步Selinux配置,防火墙配置

cluster_run_all_nodes "hostname; grep 'SELINUX=' /etc/selinux/config"
cluster_run_all_nodes "hostname; setenforce 0"
vi /etc/selinux/config disabled
cluster_copy_all_nodes /etc/selinux/config /etc/selinux/
cluster_run_all_nodes "hostname; /sbin/chkconfig --list iptables"
cluster_run_all_nodes "hostname; /sbin/chkconfig --level 0123456 iptables off"
cluster_run_all_nodes "service iptables stop"

1.4.5 同步CPU、内存配置

cluster_run_all_nodes "hostname; grep processor /proc/cpuinfo | wc -l"
cluster_run_all_nodes "hostname; grep MHz /proc/cpuinfo | sort -u"
cluster_run_all_nodes "hostname; grep MemTotal /proc/meminfo"
cluster_run_all_nodes "hostname; /sbin/chkconfig --list cpuspeed"
cluster_run_all_nodes "/sbin/chkconfig --level 0123456 cpuspeed off;/sbin/service cpuspeed stop"

1.4.6 同步检查rsync、python版本

cluster_run_all_nodes "hostname; rsync --version | grep version"
cluster_run_all_nodes "hostname; /usr/bin/python -V"

1.4.7 同步IO配置

--选择<deadline>
cluster_run_all_nodes "hostname; /sbin/modinfo cciss | grep version"
cluster_run_all_nodes "hostname; cat /sys/block/sda/queue/scheduler"
--cluster_run_all_nodes "hostname; cat /sys/block/sdb/queue/scheduler"
cluster_run_all_nodes "hostname; echo deadline > /sys/block/sda/queue/scheduler"
--cluster_run_all_nodes "hostname; echo deadline > /sys/block/sdb/queue/scheduler"
cluster_run_all_nodes "echo 'deadline > /sys/block/sda/queue/scheduler' >> /etc/rc.d/rc.local"
--cluster_run_all_nodes "echo 'deadline > /sys/block/sdb/queue/scheduler' >> /etc/rc.d/rc.local"
cluster_run_all_nodes "grep scheduler /etc/rc.d/rc.local"
noop anticipatory [deadline] cfq 
--修改/sys/kernel/mm/redhat_transparent_hugepage/enabled
cluster_run_all_nodes "hostname; cat /sys/kernel/mm/redhat_transparent_hugepage/enabled"
cluster_run_all_nodes "hostname; echo never > /sys/kernel/mm/redhat_transparent_hugepage/enabled"
cluster_run_all_nodes "echo 'never > /sys/kernel/mm/redhat_transparent_hugepage/enabled' >> /etc/rc.d/rc.local"
cluster_run_all_nodes "grep hugepage /etc/rc.d/rc.local"
always [never]
--修改/sys/kernel/mm/redhat_transparent_hugepage/defrag
cluster_run_all_nodes "hostname; cat /sys/kernel/mm/redhat_transparent_hugepage/defrag"
cluster_run_all_nodes "hostname; echo never > /sys/kernel/mm/redhat_transparent_hugepage/defrag"
cluster_run_all_nodes "echo 'never > /sys/kernel/mm/redhat_transparent_hugepage/defrag' >> /etc/rc.d/rc.local"
cluster_run_all_nodes "grep hugepage /etc/rc.d/rc.local"
always [never]
--修改/sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag
cluster_run_all_nodes "hostname; cat /sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag"
cluster_run_all_nodes "hostname; echo no > /sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag"
cluster_run_all_nodes "echo 'no > /sys/kernel/mm/redhat_transparent_hugepage/khugepaged/defrag' >> /etc/rc.d/rc.local"
cluster_run_all_nodes "grep hugepage /etc/rc.d/rc.local"
yes [no]
--修改/proc/sys/vm/swappiness
cluster_run_all_nodes "hostname; cat /proc/sys/vm/swappiness"
cluster_run_all_nodes "hostname; echo 0 > /proc/sys/vm/swappiness"
cluster_run_all_nodes "echo '0 > /proc/sys/vm/swappiness' >> /etc/rc.d/rc.local"
cluster_run_all_nodes "grep swappiness /etc/rc.d/rc.local"
--修改readahead
cluster_run_all_nodes "hostname; /sbin/blockdev --getra /dev/sda"
cluster_run_all_nodes "hostname; /sbin/blockdev --setra 8192 /dev/sda"
cluster_run_all_nodes "echo '/sbin/blockdev --setra 8192 /dev/sda' >> /etc/rc.d/rc.local"
cluster_run_all_nodes "grep blockdev /etc/rc.d/rc.local"

1.4.8 同步系统配置

--同步/etc/security/limits.conf
vi /etc/security/limits.conf
* soft nofile 655360
* hard nofile 655360
dbadmin - nice 0 
dbadmin soft nproc 145209
dbadmin hard nproc 145209
cluster_run_all_nodes "hostname; ulimit -n 655360"
cluster_run_all_nodes "hostname; ulimit -n"
cluster_copy_all_nodes /etc/security/limits.conf /etc/security/
--同步/etc/sysctl.conf
vm.max_map_count=9293346
vm.min_free_kbytes=65535
fs.file-max=13226642
net.core.rmem_default=262144
net.core.rmem_max=262144
net.core.wmem_default=262144
net.core.wmem_max=262144
cluster_copy_all_nodes /etc/sysctl.conf /etc/
cluster_run_all_nodes "hostname;sysctl -p /etc/sysctl.conf"
--同步/etc/pam.d/su
session required pam_limits.so
cluster_copy_all_nodes /etc/pam.d/su /etc/pam.d/
cluster_run_all_nodes "hostname; grep session /etc/pam.d/su"

二、Vertica安装

rpm -ivh vertica-7.1.0-3.x86_64.RHEL5.rpm

/opt/vertica/sbin/install_vertica -s DB01,DB02,DB03,DB04 -r /usr2/vertica-7.1.0-3.x86_64.RHEL5.rpm --failure-threshold=HALT -u dbadmin -p vertica
cluster_run_all_nodes "hostname;mkdir -p /data/verticadb"
cluster_run_all_nodes "hostname;chown -R dbadmin:verticadba /data/verticadb" 

三、集群性能评估

cluster_run_all_nodes "hostname; /opt/vertica/bin/vcpuperf" > /tmp/vcpuperf.log

cluster_run_all_nodes "hostname; /opt/vertica/bin/vioperf /data" > /tmp/vioperf_data.log

su - dbadmin
/opt/vertica/bin/vnetperf > /tmp/vnetperf.log