MongoDB 12节点高可用分片集群安装搭建(3.2版)
12节点分片集群安装搭建(3.2版)
| 主机 | MongoDB01 | MongoDB02 | MongoDB03 | MongoDB04 | MongoDB05 | MongoDB06 |
| IP | 10.10.1.5 | 10.10.1.6 | 10.10.1.7 | 10.10.1.8 | 10.10.1.9 | 10.10.1.10 |
| 副本集(20081)数据 | SH1RS | SH1RS | SH1RS | SH2RS | SH2RS | SH2RS |
| 主机 | MongoDB07 | MongoDB08 | MongoDB09 | MongoDB10 | MongoDB11 | MongoDB12 |
| IP | 10.10.1.11 | 10.10.1.12 | 10.10.1.13 | 10.10.1.14 | 10.10.1.15 | 10.10.1.16 |
| 副本集(20081)数据 | SH3RS | SH3RS | SH3RS | SH4RS | SH4RS | SH4RS |
| 副本集(20082)配置 | CFRS | CFRS | CFRS | |||
| 28003 路由 | Y | Y | Y |
前提是安装好操作系统(本案例基于rhel6.10),数据库使用的存储可使用(xfs)
一、针对集群的每个节点,都需要创建用户和安装软件的基本操作:
--创建用户和路径
root/root123
groupadd -g 1000 mongodb
useradd -u 1000 -g mongodb mongodb
passwd mongodb
密码:mongodb
mkdir /mongodb
chown mongodb:mongodb /mongodb
--修改原文件系统挂载到/mongodb
vi /etc/fstab
umount /data
mount -a
chown mongodb:mongodb /mongodb
--解压软件:
su - mongodb
cd /mongodb
mkdir data conf soft log
cd soft
mkdir 3.2.22
cd 3.2.22
--先行上传安装包到/mongodb目录
tar -xzvf /mongodb/mongodb-linux-x86_64-rhel62-3.2.22.tgz
cd mongodb-linux-x86_64-rhel62-3.2.22
mv * ../
cd ../
rm -r mongodb-linux-x86_64-rhel62-3.2.22
--添加环境变量
cd
vi .bash_profile
添加
export PATH=$PATH:/mongodb/soft/3.2.22/bin
source .bash_profile
--操作系统参数配置--
vi /etc/hosts
10.10.1.5 MongoDB01
10.10.1.6 MongoDB02
10.10.1.7 MongoDB03
10.10.1.8 MongoDB04
10.10.1.9 MongoDB05
10.10.1.10 MongoDB06
10.10.1.11 MongoDB07
10.10.1.12 MongoDB08
10.10.1.13 MongoDB09
10.10.1.14 MongoDB10
10.10.1.15 MongoDB11
10.10.1.16 MongoDB12
--关闭selinux
setenforce 0
getenforce
vi /etc/selinux/config
SELINUX=disabled
--关闭防火墙:
chkconfig iptables off
chkconfig ip6tables off
service iptables stop
service ip6tables stop
--修改系统参数:
vi /etc/sysctl.conf
fs.file-max = 6815744
kernel.sem = 10000 1280000 512 1024
kernel.shmmni = 4096
kernel.shmall = 1073741824
kernel.shmmax = 4398046511104
net.core.rmem_default = 262144
net.core.wmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_max = 1048576
fs.aio-max-nr = 4194304
vm.dirty_ratio=20
vm.dirty_background_ratio=3
vm.dirty_writeback_centisecs=100
vm.dirty_expire_centisecs=500
vm.swappiness=10
vm.min_free_kbytes=524288
sysctl -p
--关闭透明大页:
vi /etc/rc.local
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi
if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
echo never > /sys/kernel/mm/transparent_hugepage/defrag
fi
--设置mongodb用户的limits
vi /etc/security/limits.conf
mongodb soft nproc 4095
mongodb hard nproc 16384
mongodb soft nofile 8192
mongodb hard nofile 65536
mongodb soft stack 10240
mongodb hard stack 32768
vi /etc/security/limits.d/90-nproc.conf
添加
mongodb soft nproc unlimited
二、mongodb副本集配置(数据节点和配置节点):
su - mongodb
mkdir /mongodb/data/28001
mkdir /mongodb/conf/28001
mkdir /mongodb/log/28001
cd /mongodb/conf/28001
openssl rand -base64 102 > .keyFile
chmod 400 .keyFile
将文件拷到其它节点相同目录下
vi mongo_sh1rs_28001.conf --不同sh的配置文件全称不同
port = 28001
dbpath = /mongodb/data/28001
logpath = /mongodb/log/28001
pidfilepath = /mongodb/data/28001/28001.pid
logappend = true
fork = true
oplogSize = 204800
replSet = SH1RS --其它对应为SH2RS/SH3RS/SH4RS
#keyFile = /mongodb/conf/28001/.keyFile
--启动mongod
numactl --interleave=all mongod -f /mongodb/conf/28001/mongo_sh1rs_28001.conf
--配置副本集
repConfig = {
_id:'SH1RS',
members:[
{_id:0,host:'MongoDB01:28001'},
{_id:1,host:'MongoDB02:28001'},
{_id:2,host:'MongoDB03:28001'}]
}
rs.initiate(repConfig)
rs.status()
<wiz_code_mirror>
1
SH1RS:SECONDARY> rs.status()
2
3
"set" "SH1RS"
4
"date" ISODate("2019-03-14T14:12:17.210Z")
5
"myState" 2
6
"term" NumberLong(1)
7
"syncingTo" "MongoDB01:28001"
8
"heartbeatIntervalMillis" NumberLong(2000)
9
"members"
10
11
"_id" 0
12
"name" "MongoDB01:28001"
13
"health" 1
14
"state" 1
15
"stateStr" "PRIMARY"
16
"uptime" 655
17
"optime"
18
"ts" Timestamp(1552572310 2)
19
"t" NumberLong(1)
20
21
"optimeDate" ISODate("2019-03-14T14:05:10Z")
22
"lastHeartbeat" ISODate("2019-03-14T14:12:15.436Z")
23
"lastHeartbeatRecv" ISODate("2019-03-14T14:12:16.769Z")
24
"pingMs" NumberLong(0)
25
"electionTime" Timestamp(1552572066 1)
26
"electionDate" ISODate("2019-03-14T14:01:06Z")
27
"configVersion" 1
28
29
30
"_id" 1
31
"name" "MongoDB02:28001"
32
"health" 1
33
"state" 2
34
"stateStr" "SECONDARY"
35
"uptime" 655
36
"optime"
37
"ts" Timestamp(1552572310 2)
38
"t" NumberLong(1)
39
40
"optimeDate" ISODate("2019-03-14T14:05:10Z")
41
"lastHeartbeat" ISODate("2019-03-14T14:12:15.436Z")
42
"lastHeartbeatRecv" ISODate("2019-03-14T14:12:15.436Z")
43
"pingMs" NumberLong(0)
44
"syncingTo" "MongoDB01:28001"
45
"configVersion" 1
46
47
48
"_id" 2
49
"name" "MongoDB03:28001"
50
"health" 1
51
"state" 2
52
"stateStr" "SECONDARY"
53
"uptime" 1002
54
"optime"
55
"ts" Timestamp(1552572310 2)
56
"t" NumberLong(1)
57
58
"optimeDate" ISODate("2019-03-14T14:05:10Z")
59
"syncingTo" "MongoDB01:28001"
60
"configVersion" 1
61
"self" true
62
63
64
"ok" 1
65
--测试复制:
use test
db.testc.insert({id:1,test:"from Mongodb01,the primary"})
db.testc.find()
--连接到备节点:
<wiz_code_mirror>
1
use test
2
rs.slaveOk()
3
db.testc.find()
4
SH1RS:SECONDARY> db.testc.find()db.testc.find()
5
"_id" ObjectId("5c8a5f960304e7b4a4ac66a4") "id" 1 "test" "from Mongodb01,the primary"
----------------------------------------------------------------------------------------------------
--Deploy the config servers as a three-member replica set
mkdir /mongodb/data/28002
mkdir /mongodb/conf/28002
mkdir /mongodb/log/28002
cd /mongodb/conf/28002
KeyFile从MongoDB01点拷过来,统一放在28001目录下
vi mongo_cfgrs_28002.conf
port = 28002
dbpath = /mongodb/data/28002
logpath = /mongodb/log/28002
pidfilepath = /mongodb/data/28002/28002.pid
logappend = true
fork = true
oplogSize = 204800
replSet = CFGRS
#keyFile = /mongodb/conf/28001/.keyFile
--启动mongod
numactl --interleave=all mongod --configsvr -f /mongodb/conf/28001/mongo_sh1rs_28001.conf -- 注意,配置服务库的启动一定要加上--configsvr参数
--配置副本集
repConfig = {
_id:'CFGRS',
configsvr: true,
members:[
{_id:0,host:'MongoDB07:28002'},
{_id:1,host:'MongoDB08:28002'},
{_id:2,host:'MongoDB09:28002'}]
}
rs.initiate(repConfig)
rs.status()
--Config and Start mongos instance
mkdir /mongodb/conf/28003
mkdir /mongodb/log/28003
cd /mongodb/conf/28003
KeyFile从其它节点拷过来
vi mongos_28003.conf
port = 28003
logpath = /mongodb/log/28002
pidfilepath = /mongodb/data/28002/28002.pid
logappend = true
fork = true
configdb = CFGRS/MongoDB07:28002,MongoDB08:28002,MongoDB09:28002
#keyFile = /mongodb/conf/28001/.keyFile
--启动mongos
numactl --interleave=all mongos -f /mongodb/conf/28003/mongos_28003.conf
--Add Replica Sets as Shards
连接到mongos
mongo MongoDB10:28003/admin
sh.addShard( "SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001" );
sh.addShard( "SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001" );
sh.addShard( "SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001" );
sh.addShard( "SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001" );
sh.status()
--Enable Keyfile Access Control
在所有角色的配置文件中添加:
keyFile = /mongodb/conf/28001/.keyFile
Create the shard-local user administrator (optional)
在四个分片的主节点都执行:
admin = db.getSiblingDB("admin")
admin.createUser(
{
user: "mongoadmin",
pwd: "Super**********",
roles: [ { role: "root", db: "admin" } ]
}
)
-------------------------
<wiz_code_mirror>
1
SH1RS:PRIMARY> admin = db.getSiblingDB("admin")admin = db.getSiblingDB("admin")
2
admin
3
SH1RS:PRIMARY> admin.createUser(admin.createUser(
4
...
5
... user"mongoadmin" user"mongoadmin"
6
... pwd"Super**********" pwd"Super**********"
7
... roles role"root" db"admin" roles role"root" db"admin"
8
...
9
... ))
10
Successfully added user
11
"user" "mongoadmin"
12
"roles"
13
14
"role" "root"
15
"db" "admin"
16
17
18
mongo -u "mongoadmin" -p "Super**********" --authenticationDatabase "admin" --port 28001
-----------------------------------------
Create the user administrator
admin = db.getSiblingDB("admin")
admin.createUser(
{
user: "shadmin",
pwd: "shSuper**********",
roles: [ { role: "root", db: "admin" } ]
}
)
-----------------------
<wiz_code_mirror>
1
mongos> admin = db.getSiblingDB("admin")admin = db.getSiblingDB("admin")
2
admin
3
mongos> admin.createUser(admin.createUser(
4
...
5
... user"shadmin" user"shadmin"
6
... pwd"shSuper**********" pwd"shSuper**********"
7
... roles role"root" db"admin" roles role"root" db"admin"
8
...
9
... ))
10
Successfully added user
11
"user" "shadmin"
12
"roles"
13
14
"role" "root"
15
"db" "admin"
16
17
18
注意:创建用户之后,本地操作系统登录就失效了,后续需要使用账号密码登录
-------------------------------
查看分片状态:
<wiz_code_mirror>
1
mongodb@MongoDB10 28003$ mongo -u "shadmin" -p "shSuper**********" --authenticationDatabase "admin" --port 28003
2
MongoDB shell version3.2.22
3
connecting to127.0.0.128003/test
4
mongos>
5
mongos>
6
mongos> sh.status()sh.status()
7
--- Sharding Status ---
8
sharding version
9
"_id" 1
10
"minCompatibleVersion" 5
11
"currentVersion" 6
12
"clusterId" ObjectId("5c939dcc871df831ca6178dc")
13
14
shards
15
{ "_id" "SH1RS", "host" "SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
16
{ "_id" "SH2RS", "host" "SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
17
{ "_id" "SH3RS", "host" "SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
18
{ "_id" "SH4RS", "host" "SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
19
active mongoses
20
"3.2.22" 1
21
balancer
22
Currently enabledyes
23
Currently runningno
24
Failed balancer rounds in last 5 attempts5
25
Last reported errorcould not find host matching read preference mode"primary" for set SH1RS
26
Time of Reported errorSat Mar 23 2019 144858 GMT+0800 (CST)
27
Migration Results for the last 24 hours
28
No recent migrations
29
databases
30
{ "_id" "test", "primary" "SH1RS", "partitioned" false
--------SHARD功能测试:
在未shard的情况下插入数据:
<wiz_code_mirror>
1
use test
2
var bulk = db.test_collection.initializeUnorderedBulkOp();
3
people = "Marc" "Bill" "George" "Eliot" "Matt" "Trey" "Tracy" "Greg" "Steve" "Kristina" "Katie" "Jeff";
4
for(var i=0; i<1000000; i++)
5
user_id = i;
6
name = peopleMath.floor(Math.random()*people.length);
7
number = Math.floor(Math.random()*10001);
8
bulk.insert( "user_id":user_id "name":name "number":number );
9
10
bulk.execute();
数据只在一节点:
<wiz_code_mirror>
1
mongos> db.stats()db.stats()
2
3
"raw"
4
"SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
5
"db" "test"
6
"collections" 2
7
"objects" 1520654
8
"avgObjSize" 70.83500454409747
9
"dataSize" 107715533
10
"storageSize" 39489536
11
"numExtents" 0
12
"indexes" 3
13
"indexSize" 30613504
14
"ok" 1
15
"$gleStats"
16
"lastOpTime" Timestamp(0 0)
17
"electionId" ObjectId("7fffffff0000000000000005")
18
19
20
"SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
21
"db" "test"
22
"collections" 0
23
"objects" 0
24
"avgObjSize" 0
25
"dataSize" 0
26
"storageSize" 0
27
"numExtents" 0
28
"indexes" 0
29
"indexSize" 0
30
"fileSize" 0
31
"ok" 1
32
"$gleStats"
33
"lastOpTime" Timestamp(0 0)
34
"electionId" ObjectId("7fffffff0000000000000004")
35
36
37
"SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
38
"db" "test"
39
"collections" 0
40
"objects" 0
41
"avgObjSize" 0
42
"dataSize" 0
43
"storageSize" 0
44
"numExtents" 0
45
"indexes" 0
46
"indexSize" 0
47
"fileSize" 0
48
"ok" 1
49
"$gleStats"
50
"lastOpTime" Timestamp(0 0)
51
"electionId" ObjectId("7fffffff0000000000000004")
52
53
54
"SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
55
"db" "test"
56
"collections" 0
57
"objects" 0
58
"avgObjSize" 0
59
"dataSize" 0
60
"storageSize" 0
61
"numExtents" 0
62
"indexes" 0
63
"indexSize" 0
64
"fileSize" 0
65
"ok" 1
66
"$gleStats"
67
"lastOpTime" Timestamp(0 0)
68
"electionId" ObjectId("7fffffff0000000000000003")
69
70
71
72
"objects" 1520654
73
"avgObjSize" 70
74
"dataSize" 107715533
75
"storageSize" 39489536
76
"numExtents" 0
77
"indexes" 3
78
"indexSize" 30613504
79
"fileSize" 0
80
"extentFreeList"
81
"num" 0
82
"totalSize" 0
83
84
"ok" 1
85
启用分片:
<wiz_code_mirror>
1
mongos> sh.enableSharding( "test" )sh.enableSharding( "test" )
2
"ok" 1
3
mongos> db.test_collection.createIndex( { number 1 )db.test_collection.createIndex( number 1 )
4
5
"raw"
6
"SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
7
"createdCollectionAutomatically" false
8
"numIndexesBefore" 2
9
"numIndexesAfter" 2
10
"note" "all indexes already exist"
11
"ok" 1
12
"$gleStats"
13
"lastOpTime" Timestamp(1553326440 4000)
14
"electionId" ObjectId("7fffffff0000000000000005")
15
16
17
,
18
"ok" 1
19
20
mongos> sh.shardCollection( "test.test_collection", { "number" 1 )sh.shardCollection( "test.test_collection", "number" 1 )
21
"collectionsharded" "test.test_collection", "ok" 1
22
mongos>
23
查看数据自动迁移:
<wiz_code_mirror>
1
mongos> sh.status()sh.status()
2
--- Sharding Status ---
3
sharding version
4
"_id" 1
5
"minCompatibleVersion" 5
6
"currentVersion" 6
7
"clusterId" ObjectId("5c939dcc871df831ca6178dc")
8
9
shards
10
{ "_id" "SH1RS", "host" "SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
11
{ "_id" "SH2RS", "host" "SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
12
{ "_id" "SH3RS", "host" "SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
13
{ "_id" "SH4RS", "host" "SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
14
active mongoses
15
"3.2.22" 3
16
balancer
17
Currently enabledyes
18
Currently runningyes
19
NaN
20
Collections with active migrations
21
test.test_collection started at Sat Mar 23 2019 15:39:27 GMT+0800 (CST)
22
Failed balancer rounds in last 5 attempts4
23
Last reported errorcould not find host matching read preference mode"primary" for set SH1RS
24
Time of Reported errorSat Mar 23 2019 144913 GMT+0800 (CST)
25
Migration Results for the last 24 hours
26
1 Failed with error 'chunk too big to move', from SH1RS to SH2RS
27
databases
28
{ "_id" "test", "primary" "SH1RS", "partitioned" true
29
test.test_collection
30
shard key "number" 1
31
uniquefalse
32
balancingtrue
33
chunks
34
SH1RS 7
35
{ "number" "$minKey" 1 -->> "number" 1198 on SH1RS Timestamp(1, 4)
36
{ "number" 1198 -->> "number" 2396 on SH1RS Timestamp(1, 5)
37
{ "number" 2396 -->> "number" 3591 on SH1RS Timestamp(1, 6)
38
{ "number" 3591 -->> "number" 4789 on SH1RS Timestamp(1, 7)
39
{ "number" 4789 -->> "number" 7188 on SH1RS Timestamp(1, 1)
40
{ "number" 7188 -->> "number" 9585 on SH1RS Timestamp(1, 2)
41
{ "number" 9585 -->> "number" "$maxKey" 1 on SH1RS Timestamp(1, 3)
42
43
mongos> db.stats()db.stats()
44
45
"raw"
46
"SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
47
"db" "test",
48
"collections" 2,
49
"objects" 1520654,
50
"avgObjSize" 70.83500454409747,
51
"dataSize" 107715533,
52
"storageSize" 39489536,
53
"numExtents" 0,
54
"indexes" 3,
55
"indexSize" 30613504,
56
"ok" 1,
57
"$gleStats"
58
"lastOpTime" Timestamp(0, 0),
59
"electionId" ObjectId("7fffffff0000000000000005")
60
61
,
62
"SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
63
"db" "test",
64
"collections" 1,
65
"objects" 54412,
66
"avgObjSize" 70.83121370285966,
67
"dataSize" 3854068,
68
"storageSize" 1032192,
69
"numExtents" 0,
70
"indexes" 2,
71
"indexSize" 815104,
72
"ok" 1,
73
"$gleStats"
74
"lastOpTime" Timestamp(0, 0),
75
"electionId" ObjectId("7fffffff0000000000000004")
76
77
,
78
"SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
79
"db" "test",
80
"collections" 0,
81
"objects" 0,
82
"avgObjSize" 0,
83
"dataSize" 0,
84
"storageSize" 0,
85
"numExtents" 0,
86
"indexes" 0,
87
"indexSize" 0,
88
"fileSize" 0,
89
"ok" 1,
90
"$gleStats"
91
"lastOpTime" Timestamp(0, 0),
92
"electionId" ObjectId("7fffffff0000000000000004")
93
94
,
95
"SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
96
"db" "test",
97
"collections" 0,
98
"objects" 0,
99
"avgObjSize" 0,
100
"dataSize" 0,
101
"storageSize" 0,
102
"numExtents" 0,
103
"indexes" 0,
104
"indexSize" 0,
105
"fileSize" 0,
106
"ok" 1,
107
"$gleStats"
108
"lastOpTime" Timestamp(0, 0),
109
"electionId" ObjectId("7fffffff0000000000000003")
110
111
112
,
113
"objects" 1575066,
114
"avgObjSize" 70,
115
"dataSize" 111569601,
116
"storageSize" 40521728,
117
"numExtents" 0,
118
"indexes" 5,
119
"indexSize" 31428608,
120
"fileSize" 0,
121
"extentFreeList"
122
"num" 0,
123
"totalSize" 0
124
,
125
"ok" 1
126
127
数据在逐渐迁移中.....
三个路由mongos启动在14、15、16的28003端口,进行连接测试:
<wiz_code_mirror>
x
1
mongodb@MongoDB03 ~$ mongo 172.1.1.15:28003 -u "shadmin" -p "shSuper**********" --authenticationDatabase "admin"
2
MongoDB shell version3.2.22
3
connecting to10.10.1.1528003/test
4
mongos> exitexit
5
bye
6
mongodb@MongoDB03 ~$ mongo 172.1.1.16:28003 -u "shadmin" -p "shSuper**********" --authenticationDatabase "admin"
7
MongoDB shell version3.2.22
8
connecting to10.10.1.1628003/test
9
10
mongodb@MongoDB03 ~$ mongo 172.1.1.14:28003 -u "shadmin" -p "shSuper**********" --authenticationDatabase "admin"
11
MongoDB shell version3.2.22
12
connecting to10.10.1.1428003/test
13
mongos>
14
mongos>
-------------------网卡绑定():
<wiz_code_mirror>
1
cd /etc/sysconfig/network-scripts/
2
vi ifcfg-bond0
3
DEVICE=bond0
4
BOOTPROTO=static
5
IPADDR=10.10.1.6
6
NETMASK=255.255.0.0
7
ONBOOT=yes
8
USECTL=no
9
TYPE=Ethernet
10
11
cp ifcfg-eth1 ifcfg-eth1.bak
12
vi ifcfg-eth1
13
注释掉IP和NETMASK和GATEWAY,添加
14
MASTER=bond0
15
SLAVE=yes
16
17
cd /etc/sysconfig/network-scripts/
18
echo "DEVICE=bond0">>ifcfg-bond0
19
echo "BOOTPROTO=static">>ifcfg-bond0
20
echo "IPADDR=172.1.1.7">>ifcfg-bond0
21
echo "NETMASK=255.255.0.0">>ifcfg-bond0
22
echo "ONBOOT=yes">>ifcfg-bond0
23
echo "USECTL=no">>ifcfg-bond0
24
echo "TYPE=Ethernet">>ifcfg-bond0
25
cp ifcfg-eth1 ifcfg-eth1.bak
26
cp ifcfg-eth2 ifcfg-eth2.bak
27
echo "MASTER=bond0">>ifcfg-eth1
28
echo "SLAVE=yes">>ifcfg-eth1
29
echo "MASTER=bond0">>ifcfg-eth2
30
echo "SLAVE=yes">>ifcfg-eth2
31
echo "alias bond0 bonding">>/etc/modprobe.d/dist.conf
32
echo "options bond0 miimon=100 mode=1">>/etc/modprobe.d/dist.conf
33
编辑ifcfg-eth1和ifcfg-eth2,注释掉IP和NETMASK和GATEWAY
34
service network restart
提供给维护人员信息:
<wiz_code_mirror>
x
1
分片本地管理员(该账号仅供特殊本地维护之用,无法从外部mongos登录):
2
SH1RS:PRIMARY> admin = db.getSiblingDB("admin")
3
admin
4
SH1RS:PRIMARY> admin.createUser(
5
...
6
... user"mongoadmin"
7
... pwd"Super**********"
8
... roles role"root" db"admin"
9
...
10
... )
11
Successfully added user
12
"user" "mongoadmin"
13
"roles"
14
15
"role" "root"
16
"db" "admin"
17
18
19
20
21
22
集群层管理员(日常超级账号,可用于创建其它用户,维护管理集群和数据库等,只限管理员使用,不可用于应用连接):
23
mongos> admin = db.getSiblingDB("admin")
24
admin
25
mongos> admin.createUser(
26
...
27
... user"shadmin"
28
... pwd"shSuper**********"
29
... roles role"root" db"admin"
30
...
31
... )
32
Successfully added user
33
"user" "shadmin"
34
"roles"
35
36
"role" "root"
37
"db" "admin"
38
39
40
41
42
43
mongodb@MongoDB03 ~$ mongo 172.1.1.14:28003 -u "shadmin" -p "shSuper**********" --authenticationDatabase "admin"
44
MongoDB shell version3.2.22
45
connecting to10.10.1.1428003/test
46
mongos>
47
48
49
集群管理账号: shadmin 密码:shSuper**********
50
mongos:共配置了三个,10.10.1.14/15/16 端口:28003 任意一个IP均可连接到集群

浙公网安备 33010602011771号