Task Objectives#
- Complete the deployment of the Ceph cluster using cephadm
Task Platform#
- Physical devices--
- Operating System: openEuler 22.03 LTS SP2
Deployment Guide#
Task One: Configuration Preparation#
- Rename hostname
# Change the hostname of 10.10.3.117 to future-ceph-node0
hostnamectl set-hostname future-ceph-node0 && bash
# Change the hostname of 10.10.3.118 to future-ceph-node1
hostnamectl set-hostname future-ceph-node1 && bash
# Change the hostname of 10.10.3.119 to future-ceph-node2
hostnamectl set-hostname future-ceph-node2 && bash
# Change the hostname of 10.10.3.120 to future-ceph-node3
hostnamectl set-hostname future-ceph-node3 && bash
- Pre-installation configuration changes
# Disable the firewall
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
# Permanently disable selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config
# Add hosts
cat >> /etc/hosts << EOF
10.10.3.117 future-ceph-node0
10.10.3.118 future-ceph-node1
10.10.3.119 future-ceph-node2
10.10.3.120 future-ceph-node3
EOF
# View
cat /etc/hosts
# Synchronize time
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony -y
systemctl start chronyd
systemctl enable chronyd
# Modify configuration, add content
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date
- Add SSH passwordless communication
ssh-keygen -t rsa
ssh-copy-id 10.10.3.118
ssh-copy-id 10.10.3.119
ssh-copy-id 10.10.3.120
Task Two: Install Docker#
- Configure the yum repository for Docker CE. Open the
docker-ce.repo
file and copy the following content into the file:
echo '
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg ' > /etc/yum.repos.d/docker-ce.repo
Save and exit the file.
- Install Docker CE. Run the following command to install Docker CE:
yum -y install docker-ce docker-ce-cli containerd.io
# Start Docker and set it to start on boot
systemctl start docker
systemctl enable docker
# Check version
docker -v
docker compose version
- Modify Docker configuration storage path, change the configuration as follows.
# Write the configuration to daemon.json file
echo '{
"data-root": "/data/docker"
} ' > /etc/docker/daemon.json
# View
cat /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker info
Task Three: Build Cluster Using cephadm#
-
Install Ceph Software#
Install Ceph software on all hosts
yum install -y cephadm ceph-common
# Check version
ceph -v
-
Enable Admin Node#
Enable the cluster on 10.10.3.117
cephadm bootstrap --mon-ip 10.10.3.117
Access the dashboard to change the default password
Fetching dashboard port number...
Ceph Dashboard is now available at:
URL: https://future-ceph-node0:8443/
User: admin
Password: p4csdavtmr
Enabling client.admin keyring and conf on hosts with "admin" label
You can access the Ceph CLI with:
sudo /usr/sbin/cephadm shell --fsid 5f344e64-85e4-11ee-9181-0050569a1378 -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
-
Add Nodes#
# Enable ceph command
cephadm shell
# Add passwordless access for ceph
ceph cephadm get-pub-key > ~/ceph.pub
ssh-copy-id -f -i ~/ceph.pub root@future-ceph-node1
ssh-copy-id -f -i ~/ceph.pub root@future-ceph-node2
ssh-copy-id -f -i ~/ceph.pub root@future-ceph-node3
# Add ceph nodes
ceph orch host add future-ceph-node1 10.10.3.118
ceph orch host add future-ceph-node2 10.10.3.119
ceph orch host add future-ceph-node3 10.10.3.120
# View nodes
ceph orch host ls
# Add _admin label to allow this node to run ceph related commands
ceph orch host label add future-ceph-node1 _admin
ceph orch host label add future-ceph-node2 _admin
ceph orch host label add future-ceph-node3 _admin
# Copy configuration files to other nodes
scp /etc/ceph/ceph.conf future-ceph-node1:/etc/ceph/
scp /etc/ceph/ceph.conf future-ceph-node2:/etc/ceph/
scp /etc/ceph/ceph.conf future-ceph-node3:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring future-ceph-node1:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring future-ceph-node2:/etc/ceph/
scp /etc/ceph/ceph.client.admin.keyring future-ceph-node3:/etc/ceph/
-
Set Up Mon Nodes (Monitoring)#
ceph orch apply mon --placement="4 future-ceph-node0 future-ceph-node1 future-ceph-node2 future-ceph-node3"
# View detailed information about mon
ceph mon dump
-
Set Up Mgr Nodes (Management)#
ceph orch apply mgr --placement="2 future-ceph-node0 future-ceph-node1 "
# View details
ceph orch ls
# Add labels
ceph orch host label add future-ceph-node0 master
ceph orch host label add future-ceph-node1 master
-
Add OSD (Object Storage Daemon)#
# View the disks to be mounted
lsblk
# All nodes
ceph orch daemon add osd future-ceph-node0:/dev/sdb
ceph orch daemon add osd future-ceph-node1:/dev/sdb
ceph orch daemon add osd future-ceph-node2:/dev/sdb
ceph orch daemon add osd future-ceph-node3:/dev/sdb
-
Create MDS (Metadata Server)#
# Create cephfs file system
ceph osd pool create cephfs_data
ceph osd pool create cephfs_metadata
ceph fs new cephfs cephfs_metadata cephfs_data
# Set up mds components
ceph orch apply mds cephfs --placement="4 future-ceph-node0 future-ceph-node1 future-ceph-node2 future-ceph-node3"
# View mds status
ceph orch ps --daemon-type mds
-
Configure RGW (RADOS Gateway)#
# Create a realm named myorg
radosgw-admin realm create --rgw-realm=myorg
# Create a zonegroup named default (set as the master zonegroup)
radosgw-admin zonegroup create --rgw-zonegroup=default --master
# Create a zone named cn-east-1 (set as the master zone)
radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=cn-east-1 --master
# Configure radosgw for realm and zone
ceph orch apply rgw myorg cn-east-1 --placement="4 future-ceph-node0 future-ceph-node1 future-ceph-node2 future-ceph-node3"
# Verify
ceph orch ps --daemon-type rgw
ceph -s
-
Access Testing#
# ceph-Dashboard
https://10.10.3.117:8443
# Prometheus
http://10.10.3.117:9095/
# Grafana
https://10.10.3.117:3000/