Ceph 对象存储网关-RadosWG【十三】

部署RadosGW服务

cephadmin@ceph-deploy:~$ ceph -s
  cluster:
    id:     0d8fb726-ee6d-4aaf-aeca-54c68e2584af
    health: HEALTH_WARN
            1 pool(s) do not have an application enabled

  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 58m)
    mgr: ceph-mgr2(active, since 58m), standbys: ceph-mgr1
    mds: 2/2 daemons up, 2 standby
    osd: 9 osds: 9 up (since 58m), 9 in (since 86m)

  data:
    volumes: 1/1 healthy
    pools:   6 pools, 225 pgs
    objects: 132 objects, 168 MiB
    usage:   3.1 GiB used, 267 GiB / 270 GiB avail
    pgs:     225 active+clean
##将ceph-mgr1、ceph-mgr2服务器部署高可用的radosGW 服务
root@ceph-mgr1:~# apt install -y radosgw
root@ceph-mgr2:~# apt install -y radosgw

#在ceph-deploy 服务器将 ceph-mgr1 初始化为rasdosGW服务
cephadmin@ceph-deploy:~$ ceph-deploy rgw create ceph-mgr1
cephadmin@ceph-deploy:~$ ceph-deploy rgw create ceph-mgr2

#    rgw: 2 daemons active (2 hosts, 1 zones)
cephadmin@ceph-deploy:~$ ceph -s
  cluster:
    id:     0d8fb726-ee6d-4aaf-aeca-54c68e2584af
    health: HEALTH_WARN
            1 pool(s) do not have an application enabled

  services:
    mon: 3 daemons, quorum ceph-mon1,ceph-mon2,ceph-mon3 (age 61m)
    mgr: ceph-mgr2(active, since 61m), standbys: ceph-mgr1
    mds: 2/2 daemons up, 2 standby
    osd: 9 osds: 9 up (since 61m), 9 in (since 89m)
    rgw: 2 daemons active (2 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   10 pools, 353 pgs
    objects: 321 objects, 168 MiB
    usage:   3.1 GiB used, 267 GiB / 270 GiB avail
    pgs:     353 active+clean

  io:
    client:   81 KiB/s rd, 596 B/s wr, 95 op/s rd, 55 op/s wr
#验证radosge 服务进程
root@ceph-mgr1:~# ps -ef | grep rados
ceph       7120      1  0 09:56 ?        00:00:00 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-mgr1 --setuser ceph --setgroup ceph
root       7779   1867  0 09:57 pts/0    00:00:00 grep --color=auto rados
#radosge的存储池类型
cephadmin@ceph-deploy:~$ ceph osd pool ls
device_health_metrics
mypool
cephfs-metadata
cephfs-data
rbd-ibm
cephfs-metadatat
.rgw.root
default.rgw.log      #存储日志信息,用于记录各种log信息
default.rgw.control #系统控制池,在有数据更新时,通知其它 RGW 更新缓存
default.rgw.meta    #元数据存储池,通过不同的名称空间分别存储不同的rados 对象,这些名称空间包括用戶UID 及其 bucket 映射信息的名称空间 users.uid
#用戶的密钥名称空门users.keys、用戶的 email 名称空间 users.email、用戶的 subuser 的名称空间 users.swif以及 bucket 的名称空间 root 等。
#default.rgw.data.root:存放 bucket 的元数据,结构体对应 RGWBucketlnfo,比如存放桶名、桶ID、data pool等 

#查看默认radosge的存储池
cephadmin@ceph-deploy:~$ radosgw-admin zone get --rgw-zone=default --rgw-zonegroup=default
{
    "id": "4c2d1a02-86a8-49de-936c-332dbbebdc10",
    "name": "default",
    "domain_root": "default.rgw.meta:root",
    "control_pool": "default.rgw.control",
    "gc_pool": "default.rgw.log:gc",
    "lc_pool": "default.rgw.log:lc",
    "log_pool": "default.rgw.log",
    "intent_log_pool": "default.rgw.log:intent",
    "usage_log_pool": "default.rgw.log:usage",
    "roles_pool": "default.rgw.meta:roles",
    "reshard_pool": "default.rgw.log:reshard",
    "user_keys_pool": "default.rgw.meta:users.keys",
    "user_email_pool": "default.rgw.meta:users.email",
    "user_swift_pool": "default.rgw.meta:users.swift",
    "user_uid_pool": "default.rgw.meta:users.uid", #存放用户信息的存储池
    "otp_pool": "default.rgw.otp",
    "system_key": {
        "access_key": "",
        "secret_key": ""
    },
    "placement_pools": [
        {
            "key": "default-placement",
            "val": {
                "index_pool": "default.rgw.buckets.index", #存放 bucket 到 object 的索引信息。
                "storage_classes": {
                    "STANDARD": {
                        "data_pool": "default.rgw.buckets.data" #存放对象的数据
                    }
                },
                "data_extra_pool": "default.rgw.buckets.non-ec", #数据的额外信息存储池
                "index_type": 0
            }
        }
    ],
    "realm_id": "",
    "notif_pool": "default.rgw.log:notif"
}


cephadmin@ceph-deploy:~$ ceph osd pool ls
device_health_metrics
mypool
cephfs-metadata
cephfs-data
rbd-ibm
cephfs-metadatat
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
cephadmin@ceph-deploy:~$ ceph osd pool get default.rgw.meta crush_rule #查看默认副本池
crush_rule: replicated_rule
cephadmin@ceph-deploy:~$ ceph osd pool get default.rgw.meta size #默认副本数
size: 3
cephadmin@ceph-deploy:~$ ceph osd pool get default.rgw.meta pgp_num #默认pg数量
pgp_num: 32
cephadmin@ceph-deploy:~$ ceph osd pool get default.rgw.meta pg_num
pg_num: 32

#访问radossgw
root@ceph-mgr1:~# netstat -nltp | grep 7480
tcp        0      0 0.0.0.0:7480            0.0.0.0:*               LISTEN      7120/radosgw
tcp6       0      0 :::7480                 :::*                    LISTEN      7120/radosgw
#http://192.168.40.154:7480/

RadosGW服务高可用配置

rasdosgw http高可用

自定义http端口

#自定义http端口
#配置文件可以在 ceph deploy 服务器修改然后统一推送,或者单独修改每个 radosgw 服务器的配置为统一配置,然后重启 RGW 服务。
#ceph-mgr1/2 都进行配置改为9900 端口
#ceph-mgr1
root@ceph-mgr1:~# vim /etc/ceph/ceph.conf
#末尾添加
[client.rgw.ceph-mgr1]
rgw_host=ceph-mgr1
rgw_frontends=civetweb port=9900

root@ceph-mgr1:~# systemctl restart ceph-radosgw@rgw.ceph-mgr1.service
root@ceph-mgr1:~# netstat -ntlp | grep 9900
tcp        0      0 0.0.0.0:9900            0.0.0.0:*               LISTEN      3971/radosgw
#http://192.168.40.154:9900/

#ceph-mgr2
root@ceph-mgr2:~# vim /etc/ceph/ceph.conf
#末尾添加
[client.rgw.ceph-mgr2]
rgw_host=ceph-mgr2
rgw_frontends=civetweb port=9900

root@ceph-mgr2:~# systemctl restart ceph-radosgw@rgw.ceph-mgr2.service
root@ceph-mgr2:~# netstat -ntlp | grep 9900
tcp        0      0 0.0.0.0:9900            0.0.0.0:*               LISTEN      3971/radosgw
#http://192.168.40.155:9900/

实现高可用

#有个VIP 192.168.40.188 
[root@k8s-haproxy02 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:71:49:08 brd ff:ff:ff:ff:ff:ff
    inet 192.168.40.110/24 brd 192.168.40.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.40.188/24 scope global secondary ens33:0
       valid_lft forever preferred_lft forever
    inet6 fe80::69bc:84e1:b573:669e/64 scope link noprefixroute
       valid_lft forever preferred_lft forever

#安装打开haproxy
[root@k8s-haproxy02 ~]# ps -ef | grep haproxy
root       1668      1  0 10:52 ?        00:00:00 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
haproxy    1669   1668  0 10:52 ?        00:00:00 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
haproxy    1670   1669  0 10:52 ?        00:00:00 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
root       1688   1371  0 10:55 pts/0    00:00:00 grep --color=auto haproxy

#添加配置 转发到实际后面2台服务
[root@k8s-haproxy02 ~]# cat /etc/haproxy/haproxy.cfg
listen ceph-rgw
    mode tcp
    bind 192.168.40.188:80
    server 192.168.40.154 192.168.40.154:9900 check inter 3s rise 5 fall 3
    server 192.168.40.155 192.168.40.155:9900 check inter 3s rise 5 fall 3

#重启访问
systemctl restart haproxy
#浏览器访问:192.168.40.188

rasdosgw https高可用

##ceph-mgr2 配置
root@ceph-mgr2:~# mkdir -p /etc/ceph/certs
root@ceph-mgr2:~# cd /etc/ceph/certs/
root@ceph-mgr2:/etc/ceph/certs# openssl genrsa -out civetweb.key 2048
root@ceph-mgr2:/etc/ceph/certs# openssl req -new -x509 -key civetweb.key -out civetweb.crt
Country Name (2 letter code) [AU]:CH
State or Province Name (full name) [Some-State]:CH
Locality Name (eg, city) []:CH
Organization Name (eg, company) [Internet Widgits Pty Ltd]:rgw.sheca.com
Organizational Unit Name (eg, section) []:rgw.sheca.com
Common Name (e.g. server FQDN or YOUR name) []:rgw.sheca.com
Email Address []:

#生成civetweb.pem文件
root@ceph-mgr2:/etc/ceph/certs# cat civetweb.key civetweb.crt > civetweb.pem
#配置ceph.conf文件 支持9900 9443端口
root@ceph-mgr2:/etc/ceph/certs# vim /etc/ceph/ceph.conf
[client.rgw.ceph-mgr2]
rgw_host=ceph-mgr2
rgw_frontends = "civetweb port=9900+9443s ssl_certificate=/etc/ceph/certs/civetweb.pem"
#重启
root@ceph-mgr2:/etc/ceph/certs# systemctl restart ceph-radosgw@rgw.ceph-mgr2.service
root@ceph-mgr2:/etc/ceph/certs# ss -nltp | grep 9900
LISTEN   0         128                  0.0.0.0:9900             0.0.0.0:*       users:(("radosgw",pid=6204,fd=67))
root@ceph-mgr2:/etc/ceph/certs# ss -nltp | grep 9443
LISTEN   0         128                  0.0.0.0:9443             0.0.0.0:*       users:(("radosgw",pid=6204,fd=68))
#验证访问
root@ceph-mgr2:/etc/ceph/certs# curl -k https://192.168.40.155:9443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

##ceph-mgr2 配置
#同样 ceph-mgr1也配置一下
root@ceph-mgr1:~# mkdir -p /etc/ceph/certs
root@ceph-mgr2:/etc/ceph/certs# scp civetweb.pem 192.168.40.154:/etc/ceph/certs
root@ceph-mgr1:~# cat /etc/ceph/ceph.conf #添加下面配置
[client.rgw.ceph-mgr1]
rgw_host=ceph-mgr1
rgw_frontends = "civetweb port=9900+9443s ssl_certificate=/etc/ceph/certs/civetweb.pem"
#重启
root@ceph-mgr1:/etc/ceph/certs# systemctl restart ceph-radosgw@rgw.ceph-mgr1.service
root@ceph-mgr1:/etc/ceph/certs# ss -nltp | grep 9900
LISTEN   0         128                  0.0.0.0:9900             0.0.0.0:*       users:(("radosgw",pid=6204,fd=67))
root@ceph-mgr1:/etc/ceph/certs# ss -nltp | grep 9443
LISTEN   0         128                  0.0.0.0:9443             0.0.0.0:*       users:(("radosgw",pid=6204,fd=68))
#验证访问
root@ceph-mgr1:/etc/ceph/certs# curl -k https://192.168.40.154:9443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

配置haproxy

#
[root@k8s-haproxy02 ~]# cat /etc/haproxy/haproxy.cfg
listen ceph-rgw-https
    mode tcp
    bind 192.168.40.188:443
    server 192.168.40.154 192.168.40.154:9443 check inter 3s rise 5 fall 3
    server 192.168.40.155 192.168.40.155:9443 check inter 3s rise 5 fall 3

[root@k8s-haproxy02 ~]# systemctl restart haproxy

#配置windows hosts解析 
C:\Windows\System32\drivers\etc\hosts
192.168.40.188 rgw.sheca.com
#访问
rgw.sheca.com

日志及其它优化配置

#创建日志目录 ceph-mgr2
root@ceph-mgr2:/etc/ceph/certs# mkdir /var/log/radosgw
root@ceph-mgr2:/etc/ceph/certs# chown ceph.ceph /var/log/radosgw
root@ceph-mgr2:/etc/ceph/certs# vim /etc/ceph/ceph.conf
[client.rgw.ceph-mgr2]
rgw_host = ceph-mgr2
rgw_frontends = "civetweb port=9900+9443s ssl_certificate=/etc/ceph/certs/civetweb.pem error_log_file=/var/log/radosgw/civetweb.error.log access_log_file=/var/log/radosgw/civetweb.access.log request_timeout_ms=30000 num_threads=200"

root@ceph-mgr2:/etc/ceph/certs# systemctl restart ceph-radosgw@rgw.ceph-mgr2.service
root@ceph-mgr2:/etc/ceph/certs# systemctl status ceph-radosgw@rgw.ceph-mgr2.service

#访问查看日志
root@ceph-mgr2:/var/log# curl -k https://192.168.40.155:9443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>
root@ceph-mgr2:/var/log# curl -k https://192.168.40.155:9443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>
root@ceph-mgr2:/var/log# curl -k https://192.168.40.155:9443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>
root@ceph-mgr2:/var/log# curl -k https://192.168.40.155:9443
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>
root@ceph-mgr2:/var/log# tail -f /var/log/radosgw/civetweb.access.log
192.168.40.155 - - [15/Mar/2024:11:39:32 +0800] "GET / HTTP/1.1" 200 413 - curl/7.68.0
192.168.40.155 - - [15/Mar/2024:11:39:32 +0800] "GET / HTTP/1.1" 200 413 - curl/7.68.0
192.168.40.155 - - [15/Mar/2024:11:39:33 +0800] "GET / HTTP/1.1" 200 413 - curl/7.68.0
192.168.40.155 - - [15/Mar/2024:11:39:34 +0800] "GET / HTTP/1.1" 200 413 - curl/7.68.0

 

posted @ 2024-03-15 11:28  しみずよしだ  阅读(71)  评论(0)    收藏  举报