K8S-后端ceph部署准备
集群命令执行

[node66][DEBUG ] status for monitor: mon.node66
[node66][DEBUG ] {
[node66][DEBUG ] "election_epoch": 3,
[node66][DEBUG ] "extra_probe_peers": [],
[node66][DEBUG ] "monmap": {
[node66][DEBUG ] "created": "2019-04-24 17:23:05.931421",
[node66][DEBUG ] "epoch": 1,
[node66][DEBUG ] "fsid": "3339ccc5-3353-4327-b212-767ff79eadb8",
[node66][DEBUG ] "modified": "2019-04-24 17:23:05.931421",
[node66][DEBUG ] "mons": [
[node66][DEBUG ] {
[node66][DEBUG ] "addr": "192.168.1.66:6789/0",
[node66][DEBUG ] "name": "node66",
[node66][DEBUG ] "rank": 0
[node66][DEBUG ] }
[node66][DEBUG ] ]
[node66][DEBUG ] },
[node66][DEBUG ] "name": "node66",
[node66][DEBUG ] "outside_quorum": [],
[node66][DEBUG ] "quorum": [
[node66][DEBUG ] 0
[node66][DEBUG ] ],
[node66][DEBUG ] "rank": 0,
[node66][DEBUG ] "state": "leader",
[node66][DEBUG ] "sync_provider": []
[node66][DEBUG ] }
===================================================
mysql -uroot -p123456rd -P 30009 -h 192.168.1.62
ERROR 2059 (HY000): Authentication plugin 'caching_sha2_password' cannot be loaded:#认证类型的问题建议用5.7镜像
kubectl exec -it 5b09aede8738
docker exec -it 5b09aede8738 /bin/bash
mysql -uroot -p123456rd
vi /etc/hosts
192.168.1.66 node66
192.168.1.67 node67
192.168.1.68 node68
[root@xuegod66 ~]# ssh-keygen #所有的输入选项都直接回车生成。
[root@xuegod66 ~]# ssh-copy-id node66
[root@xuegod66 ~]# ssh-copy-id node67
[root@xuegod66 ~]# ssh-copy-id node68
root@xuegod66 ~]# mkdir /etc/yum.repos.d/yum/
[root@xuegod66 ~]# mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/yum/
[root@xuegod66 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
[root@xuegod66 ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@xuegod66 ~]# vi /etc/yum.repos.d/ceph.repo #添加如下内容:
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/SRPMS
gpgcheck=0
priority=1
rm -rf /etc/yum.repos.d/*
scp -r /etc/yum.repos.d/* xuegod64:/etc/yum.repos.d/
scp -r /etc/yum.repos.d/* node67:/etc/yum.repos.d/
scp -r /etc/yum.repos.d/* node68:/etc/yum.repos.d/
scp /etc/hosts node67:/etc/hosts
scp /etc/hosts node68:/etc/hosts
ssh node67
ssh node68
yum clean all ; yum makecache
rm /etc/localtime #删除link
ln -vs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime #软件link
yum install ntpdate -y
ntpdate time.nist.gov
systemctl stop firewalld ;systemctl disable firewalld ; iptables -F ;setenforce 0
[root@xuegod63 ~]# mkfs.xfs /dev/sdb
[root@xuegod63 ~]# mkdir /var/local/osd{0,1,2}
[root@xuegod63 ~]# mount /dev/sdb /var/local/osd0/
[root@xuegod62 ~]# mkfs.xfs /dev/sdb
[root@xuegod62 ~]# mkdir /var/local/osd{0,1,2}
[root@xuegod62 ~]# mount /dev/sdb /var/local/osd1/
[root@xuegod64 ~]# mkfs.xfs /dev/sdb
[root@xuegod64 ~]# mkdir /var/local/osd{0,1,2}
[root@xuegod64 ~]# mount /dev/sdb /var/local/osd2/
安装ceph-deploy 管理工具
[root@xuegod66 ~]# yum -y install ceph-deploy
创建monitor服务
[root@xuegod66 ~]# mkdir /etc/ceph && cd /etc/ceph
[root@xuegod66 ceph]# ceph-deploy new node66 #mon安装在控制节点
vi ceph.conf
osd_pool_default_size = 2
ceph-deploy install node66 node67 node68
8.2.3 安装ceph monitor
cd /etc/ceph
[root@xuegod63 ceph]# ceph-deploy mon create node66
收集节点的keyring文件
[root@xuegod63 ceph]# ceph-deploy gatherkeys node66
激活osd
[root@xuegod63 ceph]# ceph-deploy osd prepare node66:/var/local/osd0 node67:/var/local/osd1 node68:/var/local/osd2 #创建
ceph-deploy osd activate node66:/var/local/osd0 node67:/var/local/osd1 node68:/var/local/osd2#激活
各个文件给权限
chmod 777 -R /var/local/osd0/
chmod 777 -R /var/local/osd1/
chmod 777 -R /var/local/osd2/
复制管理秘钥到各节点
ceph-deploy admin node66 node67 node68
ceph-deploy osd list node66 node67 node68
各个节点操作
chmod +r /etc/ceph/ceph.client.admin.keyring
部署mds服务
[root@xuegod63 ceph]# ceph-deploy mds create node67 node68 #我们MDS安装2台
[root@xuegod63 ceph]# ceph mds stat #查看状态
ceph -s
rados df#查看使用率
vi /etc/ceph/ceph.conf#为K8S准备
rbd_default_features = 1
---------------------
1、创建池(主要使用存储类来进行持久卷的挂载,其他的挂载方式不好使也太麻烦):
ceph osd pool create k8s 64
[root@node66 ~]# grep key /etc/ceph/ceph.client.admin.keyring |awk '{printf "%s", $NF}'|base64
QVFEN0tzQmNUeW5jS1JBQW9kRFlpTnVCMEZGL3dsQ1BrNFUrZnc9PQ==
[root@node66 ~]# ceph auth get-key client.admin |base64
QVFEN0tzQmNUeW5jS1JBQW9kRFlpTnVCMEZGL3dsQ1BrNFUrZnc9PQ==
vi ceph-secret.yaml
**********************
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
type: "kubernetes.io/rbd"
data:
key: QVFEN0tzQmNUeW5jS1JBQW9kRFlpTnVCMEZGL3dsQ1BrNFUrZnc9PQ==
kubectl get secret
/dev/sdb /var/local/osd0
scp -r /etc/ceph/* 192.168.1.62:/etc/ceph/
docker restart $(docker ps -a | awk '{ print $1}' | tail -n +2)docker restart $(docker ps -a | awk '{ print $1}' | tail -n +2)
参考
https://www.cnblogs.com/ltxdzh/p/9173570.html
测试ceph集群
1.挂载准备
rados df
ceph osd pool create k8s 64
ceph osd pool create k8s2 64
ceph fs new 64 k8s k8s2
ceph fs ls
2.挂载测试
记得配置源
66 vi /etc/yum.repos.d/ceph.repo
[ceph] name=ceph baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/ gpgcheck=0 priority=1 [ceph-noarch] name=cephnoarch baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/ gpgcheck=0 priority=1 [ceph-source] name=Ceph source packages baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/SRPMS gpgcheck=0 priority=1
67 yum clean all
68 yum makecache
yum install -y ceph-fuse
scp /etc/ceph/ceph.client.admin.keyring 192.168.1.63:/etc/ceph/ceph.client.admin.keyring
mkdir /opt2
[root@xuegod65 ceph]# ceph-fuse -m 192.168.1.66:6789 /opt2
取消挂载
[root@xuegod65 ceph]# umount /opt2 #卸载挂载
开机挂载写法
scp -r /etc/ceph/* 192.168.1.63:/etc/ceph/ #拷贝配置文件到客户端
vi /etc/fstab
id=admin /opt2 fuse.ceph defaults 0 0
mount -a #挂载测试
[root@node66 ~]# ceph auth get-key client.admin |base64
QVFEN0tzQmNUeW5jS1JBQW9kRFlpTnVCMEZGL3dsQ1BrNFUrZnc9PQ==
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret
type: "kubernetes.io/rbd"
data:
key: QVFEN0tzQmNUeW5jS1JBQW9kRFlpTnVCMEZGL3dsQ1BrNFUrZnc9PQ==

浙公网安备 33010602011771号