[root@master ~]# radosgw-admin zonegroup list { "default_info": "", "zonegroups": [ "default" ] } [root@master ~]# radosgw-admin zonegroup get failed to init zonegroup: (2) No such file or directory
[root@master ~]# radosgw-admin -h | grep -i zone | grep -i rm zonegroup rm remove a zone group info zonegroup rm remove a zone from a zonegroup zonegroup placement rm remove a placement target from a zonegroup zone rm remove a zone zone placement rm remove a zone placement target --tags-rm=<list> list of tags to remove for zonegroup placement modify command --sync-from-rm=[zone-name][,...] [root@master ~]# radosgw-admin -h | grep -i delete replica mdlog get/delete replica datalog get/delete (NOTE: required to delete a non-empty bucket)
# Inventory host group variables mon_group_name:mons osd_group_name:osds #rgw_group_name: rgws #mds_group_name: mdss #nfs_group_name: nfss #restapi_group_name: restapis #rbdmirror_group_name: rbdmirrors #client_group_name: clients #iscsi_gw_group_name: iscsigws #mgr_group_name: mgrs # 上述 Inventory host 相关变量的默认值与 Inventory配置中的标签名一致,也就是说这里注释打开与否没有影响 ... # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. #configure_firewall: True configure_firewall:False # 建议Firewall配不明白的同学将Firewall关闭,免得找麻烦。 ... # Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd # Note that this selection is currently ignored on containerized deployments #ntp_daemon_type: timesyncd ntp_daemon_type:chronyd # Ceph需要做时间同步,具体用什么做可根据自己环境来选择。目前提供支持的有三种 chronyd, ntpd, timesyncd ... # ORIGIN SOURCE # # Choose between: # - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs', 'dev' or 'obs' # - 'distro' means that no separate repo file will be added # you will get whatever version of Ceph is included in your Linux distro. # 'local' means that the ceph binaries will be copied over from the local machine #ceph_origin: dummy ceph_origin:repository #valid_ceph_origins: # - repository # - distro # - local
ceph_repository:community #valid_ceph_repository: # - community # - rhcs # - dev # - uca # - custom # - obs
# REPOSITORY: COMMUNITY VERSION # # Enabled when ceph_repository == 'community' # #ceph_mirror: http://download.ceph.com #ceph_stable_key: https://download.ceph.com/keys/release.asc #ceph_stable_release: dummy #ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" ceph_mirror:http://mirrors.163.com/ceph ceph_stable_key:https://mirrors.163.com/ceph/keys/release.asc ceph_stable_release:mimic ceph_stable_repo:"{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}" # Ceph 软件包的安装方式,有三种,repository使用一个新的源进行安装;distro使用linux发行版自带的源进行安装;local使用本地安装包的形式进行安装。 # 具体情况根据自身要求而定吧。 ... ## Monitor options # # You must define either monitor_interface, monitor_address or monitor_address_block. # These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). # Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. # Preference will go to monitor_address if both monitor_address and monitor_interface are defined. #monitor_interface: interface monitor_interface:ens33 #monitor_address: 0.0.0.0 #monitor_address_block: subnet # set to either ipv4 or ipv6, whichever your network is using #ip_version: ipv4 #mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf # Monitor的配置,必须要在 interface, address, address_block 中选择一个定义。(更多使用方法,请仔细阅读上面的英文吧。) ... ## OSD options # #is_hci: false #hci_safety_factor: 0.2 #non_hci_safety_factor: 0.7 #osd_memory_target: 4294967296 #journal_size: 5120 # OSD journal size in MB journal_size:1024# OSD journal size in MB #block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'. #public_network: 0.0.0.0/0 public_network:172.30.12.0/24 cluster_network:172.30.12.0/24 #cluster_network: "{{ public_network | regex_replace(' ', '') }}" #osd_mkfs_type: xfs #osd_mkfs_options_xfs: -f -i size=2048 #osd_mount_options_xfs: noatime,largeio,inode64,swalloc #osd_objectstore: bluestore osd_objectstore:filestore # 根据硬盘的存储介质与速度决定Journal size的大小;配置public,cluster newtwork;选择objectstore filestore or bluestore ...
... # Even though OSD nodes should not have the admin key # at their disposal, some people might want to have it # distributed on OSD nodes. Setting 'copy_admin_key' to 'true' # will copy the admin key to the /etc/ceph/ directory #copy_admin_key: false copy_admin_key:true # 根据个人喜好来吧,我喜欢各个OSD节点都有admin key ... # Declare devices to be used as OSDs # All scenario(except 3rd) inherit from the following device declaration # Note: This scenario uses the ceph-disk tool to provision OSDs
$ docker logs -f osd 2019-09-23 10:59:14 /opt/ceph-container/bin/entrypoint.sh: static: does not generate config 2019-09-23 10:59:14 /opt/ceph-container/bin/entrypoint.sh: Creating osd 2019-09-23 10:59:14 /opt/ceph-container/bin/entrypoint.sh: OSD created with ID: 0 2019-09-23 10:59:14 /opt/ceph-container/bin/entrypoint.sh: created folder /var/lib/ceph/osd/ceph-0/ creating /var/lib/ceph/osd/ceph-0//keyring added entity osd.0 auth auth(auid = 18446744073709551615 key=AQCCpYhdbE1EGxAAxX2Av3Gzez/5j8sijQg1jQ== with 0 caps) 2019-09-23 10:59:15.014 7f255136fd80 -1 filestore(/var/lib/ceph/osd/ceph-0) WARNING: max attr value size (1024) is smaller than osd_max_object_name_len (2048). Your backend filesystem appears to not support attrs large enough to handle the configured max rados name size. You may get unexpected ENAMETOOLONG errors on rados operations or buggy behavior 2019-09-23 10:59:15.084 7f255136fd80 -1 filestore(/var/lib/ceph/osd/ceph-0) mkjournal(1101): error creating journal on /var/lib/ceph/osd/ceph-0//journal: (22) Invalid argument 2019-09-23 10:59:15.084 7f255136fd80 -1 OSD::mkfs: ObjectStore::mkfs failed with error (22) Invalid argument 2019-09-23 10:59:15.084 7f255136fd80 -1 ** ERROR: error creating empty object store in /var/lib/ceph/osd/ceph-0: (22) Invalid argument
[config] # Name of the Ceph storage cluster. A suitable Ceph configuration file allowing # access to the Ceph storage cluster from the gateway node is required, if not # colocated on an OSD node. cluster_name = ceph
# Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph # drectory and reference the filename here gateway_keyring = ceph.client.admin.keyring
# API settings. # The API supports a number of options that allow you to tailor it to your # local environment. If you want to run the API under https, you will need to # create cert/key files that are compatible for each iSCSI gateway node, that is # not locked to a specific node. SSL cert and key files *must* be called # 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory # on *each* gateway node. With the SSL files in place, you can use 'api_secure = true' # to switch to https mode.
# To support the API, the bear minimum settings are: api_secure = false
# Additional API configuration options are as follows, defaults shown. # api_user = admin # api_password = admin # api_port = 5001 # iscsi-gw 最少需要部署两个服务 trusted_ip_list = 10.53.2.13,10.53.2.16,10.53.2.19
/> cd iscsi-targets/iqn.2019-08.com.hna.iscsi-gw:iscsi-igw/host-groups/xen/ /iscsi-target...st-groups/xen> disk add sata/vol_sata_23t /iscsi-target...st-groups/xen> disk add ssd/vol_ssd_1800g