Ceph 客户端使用-s3cmd测试数据读写对象存储【十四】
RGW Server配置
#在实际的生产环境,RGW1和 RGW2 的配置参数是完全一样的
root@ceph-mgr1:~# vim /etc/ceph/ceph.conf
root@ceph-mgr2:~# vim /etc/ceph/ceph.conf
[global]
fsid = 0d8fb726-ee6d-4aaf-aeca-54c68e2584af
public_network = 192.168.40.0/24
cluster_network = 172.31.40.0/24
mon_initial_members = ceph-mon1
mon_host = 192.168.40.151
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
[mds.ceph-mgr2]
#mds_standby_for_fscid=mycephfs
mds_standby_for_name=ceph-mgr1
mds_standby_replay=true
[mds.ceph-mgr1]
#mds_standby_for_fscid=mycephfs
mds_standby_for_name=ceph-mgr2
mds_standby_replay=true
[mds.ceph-mon2]
#mds_standby_for_fscid=mycephfs
mds_standby_for_name=ceph-mon1
mds_standby_replay=true
[mds.ceph-mon1]
#mds_standby_for_fscid=mycephfs
mds_standby_for_name=ceph-mon2
mds_standby_replay=true
#主要加这里
[client.rgw.ceph-mgr1]
rgw_host = ceph-mgr1
rgw_frontends = "civetweb port=9900"
rgw_dns_name = rgw.sheca.com
[client.rgw.ceph-mgr2]
rgw_host = ceph-mgr2
rgw_frontends = "civetweb port=9900"
rgw_dns_name = rgw.sheca.com
##创建RGW账户
cephadmin@ceph-deploy:~$ radosgw-admin user create --uid="xks" --display-name="xks"
{
"user_id": "xks",
"display_name": "xks",
"email": "",
"suspended": 0,
"max_buckets": 1000,
"subusers": [],
"keys": [
{
"user": "xks",
"access_key": "IAM69EXJDG2995NLCKU6",
"secret_key": "7pwGKMSHoHXaaycfdCHWvtZ6yxPOsJB0hYzPXJQP"
}
],
"swift_keys": [],
"caps": [],
"op_mask": "read, write, delete",
"default_placement": "",
"default_storage_class": "",
"placement_tags": [],
"bucket_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"check_on_raw": false,
"max_size": -1,
"max_size_kb": 0,
"max_objects": -1
},
"temp_url_keys": [],
"type": "rgw",
"mfa_ids": []
}
##安装s3cmd客户端
#对象存储的命令行客户端工具s3cmd 是一个通过命令行访问 ceph RGW 实现创建存储同桶、上传、下载以及管理数据到
root@ceph-deploy:~# apt-cache madison s3cmd
s3cmd | 2.0.2-1ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal-updates/universe amd64 Packages
s3cmd | 2.0.2-1ubuntu1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal-updates/universe i386 Packages
s3cmd | 2.0.2-1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal/universe amd64 Packages
s3cmd | 2.0.2-1 | https://mirrors.tuna.tsinghua.edu.cn/ubuntu focal/universe i386 Packages
root@ceph-deploy:~# apt install -y s3cmd
##配置s3cmd客户端执行环境
#s3cmd客户端添加域名解析- 可以解析到单机也可以解析到负责均衡
#这里之前配置了haproxy 所有这里写负载均衡地址
listen ceph-rgw
mode tcp
bind 192.168.40.188:80
server 192.168.40.154 192.168.40.154:9900 check inter 3s rise 5 fall 3
server 192.168.40.155 192.168.40.155:9900 check inter 3s rise 5 fall 3
root@ceph-deploy:~# cat /etc/hosts
127.0.0.1 localhost
127.0.1.1 ceph-deploy.sheca.com ceph-deploy
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
192.168.40.151 ceph-mon1.sheca.com ceph-mon1
192.168.40.152 ceph-mon2.sheca.com ceph-mon2
192.168.40.153 ceph-mon3.sheca.com ceph-mon3
192.168.40.154 ceph-mgr1.sheca.com ceph-mgr1
192.168.40.155 ceph-mgr2.sheca.com ceph-mgr2
192.168.40.156 ceph-node1.sheca.com ceph-node1
192.168.40.157 ceph-node2.sheca.com ceph-node2
192.168.40.158 ceph-node3.sheca.com ceph-node3
192.168.40.159 ceph-deploy.sheca.com ceph-deploy
192.168.40.188 rgw.sheca.com
#配置命令执行环境
root@ceph-deploy:~# s3cmd --configure
Enter new values or accept defaults in brackets with Enter.
Refer to user manual for detailed description of all options.
Access key and Secret key are your identifiers for Amazon S3. Leave them empty for using the env variables.
Access Key: IAM69EXJDG2995NLCKU6 #输入用户access key
Secret Key: 7pwGKMSHoHXaaycfdCHWvtZ6yxPOsJB0hYzPXJQP #输入用户secret key
Default Region [US]:
Use "s3.amazonaws.com" for S3 Endpoint and not modify it to the target Amazon S3.
S3 Endpoint [s3.amazonaws.com]: rgw.sheca.com #RGW域名
Use "%(bucket)s.s3.amazonaws.com" to the target Amazon S3. "%(bucket)s" and "%(location)s" vars can be used
if the target S3 system supports dns based buckets.
DNS-style bucket+hostname:port template for accessing a bucket [%(bucket)s.s3.amazonaws.com]: rgw.sheca.com/%(bucket) #bucket域名格式
Encryption password is used to protect your files from reading
by unauthorized persons while in transfer to S3
Encryption password: #为空 回车
Path to GPG program [/usr/bin/gpg]: #回车
When using secure HTTPS protocol all communication with Amazon S3
servers is protected from 3rd party eavesdropping. This method is
slower than plain HTTP, and can only be proxied with Python 2.7 or newer
Use HTTPS protocol [Yes]: No #是否使用https
On some networks all internet access must go through a HTTP proxy.
Try setting it here if you cant connect to S3 directly
HTTP Proxy server name: #代理 回车无
New settings:
Access Key: IAM69EXJDG2995NLCKU6
Secret Key: 7pwGKMSHoHXaaycfdCHWvtZ6yxPOsJB0hYzPXJQP
Default Region: US
S3 Endpoint: rgw.sheca.com
DNS-style bucket+hostname:port template for accessing a bucket: rgw.sheca.com/%(bucket)
Encryption password:
Path to GPG program: /usr/bin/gpg
Use HTTPS protocol: False
HTTP Proxy server name:
HTTP Proxy server port: 0
Test access with supplied credentials? [Y/n] y #确认配置
Please wait, attempting to list all buckets...
Success. Your access key and secret key worked fine :-)
Now verifying that encryption works...
Not configured. Never mind.
Save settings? [y/N] y #确认配置
Configuration saved to '/root/.s3cfg' #保存位置
##验证认证文件
root@ceph-deploy:~# cat /root/.s3cfg
[default]
access_key = IAM69EXJDG2995NLCKU6
access_token =
add_encoding_exts =
add_headers =
bucket_location = US
ca_certs_file =
cache_file =
check_ssl_certificate = True
check_ssl_hostname = True
cloudfront_host = cloudfront.amazonaws.com
content_disposition =
content_type =
default_mime_type = binary/octet-stream
delay_updates = False
delete_after = False
delete_after_fetch = False
delete_removed = False
dry_run = False
enable_multipart = True
encoding = UTF-8
encrypt = False
expiry_date =
expiry_days =
expiry_prefix =
follow_symlinks = False
force = False
get_continue = False
gpg_command = /usr/bin/gpg
gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s
gpg_passphrase =
guess_mime_type = True
host_base = rgw.sheca.com
host_bucket = rgw.sheca.com/%(bucket)
human_readable_sizes = False
invalidate_default_index_on_cf = False
invalidate_default_index_root_on_cf = True
invalidate_on_cf = False
kms_key =
limit = -1
limitrate = 0
list_md5 = False
log_target_prefix =
long_listing = False
max_delete = -1
mime_type =
multipart_chunk_size_mb = 15
multipart_max_chunks = 10000
preserve_attrs = True
progress_meter = True
proxy_host =
proxy_port = 0
put_continue = False
recursive = False
recv_chunk = 65536
reduced_redundancy = False
requester_pays = False
restore_days = 1
restore_priority = Standard
secret_key = 7pwGKMSHoHXaaycfdCHWvtZ6yxPOsJB0hYzPXJQP
send_chunk = 65536
server_side_encryption = False
signature_v2 = False
signurl_use_https = False
simpledb_host = sdb.amazonaws.com
skip_existing = False
socket_timeout = 300
stats = False
stop_on_error = False
storage_class =
throttle_max = 100
upload_id =
urlencoding_mode = normal
use_http_expect = False
use_https = False
use_mime_magic = True
verbosity = WARNING
website_endpoint = http://%(bucket)s.s3-website-%(location)s.amazonaws.com/
website_error =
website_index = index.html
##命令行客户端s3cmd验证数据上传
#查看帮助信息
root@ceph-deploy:~# s3cmd --help
##创建bucket以验证权限
#存储空间(Bucket)是用于存储对象(Object)的容器,在上传任意类型的 Object 前,您需要要先创建 Bucket
root@ceph-deploy:~# s3cmd mb s3://mybucket
Bucket 's3://mybucket/' created
root@ceph-deploy:~# s3cmd mb s3://css
Bucket 's3://css/' created
root@ceph-deploy:~# s3cmd mb s3://images
Bucket 's3://images/' created
#验证上传数据
root@ceph-deploy:~# wget https://img1.jcloudcs.com/portal/brand/2021/fl1-2.jpg #先下载一个图片
root@ceph-deploy:~# s3cmd put fl1-2.jpg s3://images/jpg/ #上传数据
upload: 'fl1-2.jpg' -> 's3://images/jpg/fl1-2.jpg' [1 of 1]
1294719 of 1294719 100% in 0s 27.12 MB/s done
root@ceph-deploy:~# s3cmd ls s3://images/jpg/ #验证数据
2024-03-15 08:40 1294719 s3://images/jpg/fl1-2.jpg
#验证下载文件
root@ceph-deploy:~# s3cmd get s3://images/jpg/fl1-2.jpg /opt
download: 's3://images/jpg/fl1-2.jpg' -> '/opt/fl1-2.jpg' [1 of 1]
1294719 of 1294719 100% in 0s 124.76 MB/s done
root@ceph-deploy:~# ll /opt/
total 1652
drwxr-xr-x 2 root root 4096 Mar 15 16:43 ./
drwxr-xr-x 23 root root 4096 Mar 8 10:24 ../
-rw-r--r-- 1 root root 191353 Mar 11 10:23 2.txt
-rw-r--r-- 1 root root 1294719 Mar 15 08:40 fl1-2.jpg
#删除文件
root@ceph-deploy:~# s3cmd ls s3://images/jpg/
2024-03-15 08:40 1294719 s3://images/jpg/fl1-2.jpg
root@ceph-deploy:~# s3cmd rm s3://images/jpg/fl1-2.jpg
delete: 's3://images/jpg/fl1-2.jpg'
root@ceph-deploy:~# s3cmd ls s3://images/jpg/
Python脚本操作S3-实验未成功
#coding utf-8
#Authorzhang shilie
# python 3.8
from boto3.session import Session
#新版本boto3
import os
class objectclient():
def __init__(self):
access_key = 'IAM69EXJDG2995NLCKU6'
secret_key ='7pwGKMSHoHXaaycfdCHWvtZ6yxPOsJB0hYzPXJQP'
self.session = Session(aws_access_key_id=access_key,aws_secret_access_key=secret_key)
self.s3_client = self.session.client('s3', endpoint_url='http://192.168.40.188:80')
print(access_key)
print(secret_key)
print(endpoint_url)
def get_bucket(self):
buckets = [bucket['Name'] for bucket in self.s3_client.list_buckets()['Buckets']]
print(buckets)
return buckets
def create_bucket(self):
# 指定创建的存储桶名称,默认为私有的存储桶
#self s3 client create bucket(Bucket='mytest111111111111')#指定存储桶的权限
# ACL有如下几种"private","public-read","public-read-write","authenticated-read"
self.s3_client.create_bucket(Bucket='20220403',ACL='public-read')
def upload(self):
file_list=os.listdir("./videos/")
for name in file_list:
print(name)
resp = self.s3_client.put_object(
ContentType = 'video/mp4',
Bucket = "video",#上传到这个存储桶里面
Key = "%s" % name, #上传后的目的文件名称
Body = open("./videos/%s" % name, 'rb').read()
)
print(resp)
def download(self):
resp = self.s3_client.get_object(
Bucket='test-s3cmd',
Key='node_exporter-1.3.1.linux-amd64.tar.gz'
)
with open('./test.tar.gz','wb') as f: #保存到本地的此文件
f.write(resp['Body'].read())
if __name__ == "main":
# boto3
print("XKS")
s3_boto3 = objectclient()
#s3_boto3.crete_bucket()#创建buckets3
s3_boto3.get_bucket() #查询buckets
#s3_boto3.upload() #上传文件
#s3_boto3.download() #下载文件
S3 客户端-实验成功
#查看服务器上有多少buckets
root@ceph-deploy:~# s3cmd ls
2024-03-19 03:30 s3://abc
2024-03-15 08:36 s3://css
2024-03-15 08:36 s3://images
2024-03-15 08:35 s3://mybucket
root@ceph-deploy:~# s3cmd ls s3://images/jpg/
2024-03-19 05:44 1294719 s3://images/jpg/fl1-2.jpg




浙公网安备 33010602011771号