banner
云野阁

云野阁

闲云野鹤,八方逍遥

Ceph Cluster Deployment Guide

Task Objectives#

  1. Complete the deployment of the Ceph cluster

Task Platform#

  1. Physical devices--
  2. Operating System: openEuler 22.03 LTS SP2

Deployment Guide#

Task 1: Configuration Preparation#

  1. Rename hostname
# Change the hostname of 10.10.3.117 to future-ceph-node0
hostnamectl set-hostname future-ceph-node0 && bash
# Change the hostname of 10.10.3.118 to future-ceph-node1
hostnamectl set-hostname future-ceph-node1 && bash
# Change the hostname of 10.10.3.119 to future-ceph-node2
hostnamectl set-hostname future-ceph-node2 && bash
# Change the hostname of 10.10.3.120 to future-ceph-node3
hostnamectl set-hostname future-ceph-node3 && bash
  1. Pre-installation configuration modifications
# Stop the firewall
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
 
# Permanently disable selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config

# Add hosts
cat >> /etc/hosts << EOF
10.10.3.117 future-ceph-node0
10.10.3.118 future-ceph-node1
10.10.3.119 future-ceph-node2
10.10.3.120 future-ceph-node3
EOF
# View
cat /etc/hosts

# Synchronize time
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony -y
systemctl start chronyd
systemctl enable chronyd
# Modify configuration, add content
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date

# Add SSH mutual communication between nodes
ssh-keygen -t rsa
ssh-copy-id 10.10.3.118
ssh-copy-id 10.10.3.119
ssh-copy-id 10.10.3.120

Task 2: Deploy Ceph as Distributed Storage#

Configure download source

echo "[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/
gpgcheck=0
priority=1

[ceph-noarch]
name=cephnoarch
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/
gpgcheck=0
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS/
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
" >/etc/yum.repos.d/ceph.repo

Download Python 2

  1. Install zlib library, otherwise an error will occur when installing pip (and Python will need to be recompiled)
yum -y install zlib*
  1. Install GCC package, if GCC is not installed, please use the following command to install
yum -y install gcc openssl-devel bzip2-devel
  1. Download Python-2.7.18
cd /usr/src
yum -y install wget
wget https://www.python.org/ftp/python/2.7.18/Python-2.7.18.tgz
tar xzf Python-2.7.18.tgz
  1. Before compiling, you also need to modify the Modules/Setup.dist file in the installation source file to uncomment
#zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz
  1. Compile Python-2.7.18 (make altinstall is used to prevent replacing the default python binary file /usr/bin/python)
cd /usr/src/Python-2.7.18
./configure --enable-optimizations
yum install -y make
make altinstall

Do not overwrite or link the original Python binary file, as this may damage the system.

  1. Set environment variables
vi /etc/profile
export PYTHON_HOME=/usr/local/
PATH=$PATH:$PYTHON_HOME/bin
source /etc/profile
  1. Download and install pip script
curl "https://bootstrap.pypa.io/pip/2.7/get-pip.py" -o "get-pip.py"
  1. Run the download and installation script
python2.7 get-pip.py 

Download Ceph

# future-ceph-node0 download
pip2 install ceph-deploy
yum install -y ceph ceph-radosgw
# Other nodes download
yum install -y ceph ceph-radosgw
# Check if the installation package is complete
rpm -qa |egrep -i "ceph|rados|rbd"

Deploy Ceph Cluster#

Management Node#
Deploy Monitor
  1. Create configuration file directory and create configuration file
mkdir /etc/ceph/
touch /etc/ceph/ceph.conf
  1. Generate an FSID for the cluster:
uuidgen
38ac0084-40b7-4c93-b561-a16d6a6478c5
  1. Create a keyring for the cluster and generate a key for the Monitor service:
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
  1. Create an admin keyring, generate a client.admin user, and add this user to the keyring:
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
  1. Create a bootstrap-osd keyring and add the client.bootstrap-osd user to this keyring:
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
  1. Import the generated keys into ceph.mon.keyring.
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring

ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
  1. Generate a monitor map using the hostname and IP address along with the FSID:
monmaptool --create --add future-ceph-node0 10.10.3.117 --fsid 79fd2206-39ca-4ec4-9cd2-96e065c6361e /tmp/monmap
  1. Create the directory for the monitor, using the format cluster-name-hostname:
mkdir /var/lib/ceph/mon/ceph-future-ceph-node0
  1. Fill in the information for the first monitor daemon:
ceph-mon --mkfs -i future-ceph-node0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
  1. Configure the /etc/ceph/ceph.conf file:
cat /etc/ceph/ceph.conf
################################################
[global]
fsid = 79fd2206-39ca-4ec4-9cd2-96e065c6361e     # Generated FSID
mon initial members =future-ceph-node0
mon host = 10.10.3.117
public network = 10.10.3.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1

################################################
  1. Since we are operating with root privileges, we need to set permissions to ceph (you can also modify the systemd startup file to change the ceph user to root) and start the Monitor
chown -R ceph:ceph /var/lib/ceph
systemctl start [email protected]
systemctl enable [email protected]
  1. Confirm that the service has started normally:
ceph -s
yum install -y net-tools
netstat -lntp|grep ceph-mon
Deploy Manager

Once we have configured the ceph-mon service, we need to configure the ceph-mgr service.

  1. Generate an authentication key (ceph-mgr is a custom name):
# 10.10.3.117
ceph auth get-or-create mgr.ceph-mgr mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr]
        key = AQBMNTZl5adxEhAAk6Jk/CKNWUyNb2DoKXUPvQ==
        
# 10.10.3.118
ceph auth get-or-create mgr.ceph-mgr1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr1]
        key = AQDbRTZlgjXWBBAAGew4Xta+t9vgIWPCWC8EVg==
  1. Create a directory to store this key file
# 10.10.3.117
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr
# Store the generated key file in this directory and name it keyring
vi /var/lib/ceph/mgr/ceph-ceph-mgr/keyring 
[mgr.ceph-mgr]
        key = AQBMNTZl5adxEhAAk6Jk/CKNWUyNb2DoKXUPvQ==
        
# 10.10.3.118
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr1
# Store the generated key file in this directory and name it keyring
vi /var/lib/ceph/mgr/ceph-ceph-mgr1/keyring 
[mgr.ceph-mgr1]
        key = AQDbRTZlgjXWBBAAGew4Xta+t9vgIWPCWC8EVg==
  1. Start the ceph-mgr service
ceph-mgr -i ceph-mgr
ceph-mgr -i ceph-mgr1
systemctl enable ceph-mgr@future-ceph-node0
systemctl enable ceph-mgr@future-ceph-node1
# Check if the service has started, view ceph status
ceph -s
# View available modules in the current mgr
ceph mgr module ls
Create OSD
ceph-volume lvm create --data /dev/sdb
# View the current lvm logical volume
ceph-volume lvm list
# View ceph status
ceph -s
Install and Configure Ceph Dashboard
  1. Enable the dashboard feature
ceph mgr module enable dashboard
  1. Create a certificate
ceph dashboard create-self-signed-cert
  1. Configure the username and password for web login
# Create /etc/ceph/dashboard.key and write the password into it
echo "password" >/etc/ceph/dashboard.key
ceph dashboard ac-user-create k8s administrator -i /etc/ceph/dashboard.key
  1. Modify the default port of the dashboard (optional)

Configure the port, the default port is 8443, change it to 18443, and restart mgr for the change to take effect.

ceph config set mgr mgr/dashboard/server_port 18443
systemctl restart ceph-mgr.target
  1. View the published service address and log in
ceph mgr services

{

​ "dashboard": "https://future-ceph-node0:8443/"

}

image-20240323222001957-1711203607034-1

Node Nodes#
Expand Monitor
  1. Modify the configuration on the master node
vi /etc/ceph/ceph.conf
[global]
fsid = 79fd2206-39ca-4ec4-9cd2-96e065c6361e     # Generated FSID
mon initial members =future-ceph-node0,future-ceph-node1,future-ceph-node2,future-ceph-node3            # Hostnames
mon host = 10.10.3.117,10.10.3.118,10.10.3.119,10.10.3.120                       # Corresponding IPs
public network = 10.10.3.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true

[mds.future-ceph-node0]
host = future-ceph-node0
  1. Distribute the configuration and key files to other nodes (master node)
# Generate public key, copy to node nodes
ssh-keygen -t rsa
ssh-copy-id 10.10.3.118
ssh-copy-id 10.10.3.119
ssh-copy-id 10.10.3.120
# Copy authentication keys
scp /etc/ceph/*  10.10.3.118:/etc/ceph/
scp /etc/ceph/*  10.10.3.119:/etc/ceph/
scp /etc/ceph/*  10.10.3.120:/etc/ceph/
  1. Create ceph-related directories on node nodes and add permissions:
mkdir -p  /var/lib/ceph/{bootstrap-mds,bootstrap-mgr,bootstrap-osd,bootstrap-rbd,bootstrap-rgw,mds,mgr,mon,osd}
chown  -R ceph:ceph /var/lib/ceph

sudo -u ceph mkdir /var/lib/ceph/mon/ceph-future-ceph-node1
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-future-ceph-node2
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-future-ceph-node3
  1. Modify the configuration file on the node node, taking node1 as an example (other nodes are similar)
[global]
fsid = 79fd2206-39ca-4ec4-9cd2-96e065c6361e     # Generated FSID
mon initial members =future-ceph-node0,future-ceph-node1,future-ceph-node2,future-ceph-node3            # Hostnames
mon host = 10.10.3.117,10.10.3.118,10.10.3.119,10.10.3.120                       # Corresponding IPs
public network = 10.10.3.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true

[mon.future-ceph-node1]
mon_addr = 10.10.3.118:6789
host = future-ceph-node1
  1. Obtain the keys and map from the cluster, taking node1 as an example (other nodes are similar)
ceph auth get mon. -o /tmp/monkeyring
ceph mon getmap -o /tmp/monmap
  1. Use the existing keys and map to add a new Monitor, specifying the hostname, taking node1 as an example (other nodes are similar)
sudo -u ceph ceph-mon --mkfs -i future-ceph-node1 --monmap /tmp/monmap --keyring /tmp/monkeyring
  1. Start the service, taking node1 as an example (other nodes are similar)
systemctl start ceph-mon@future-ceph-node1
systemctl enable ceph-mon@future-ceph-node1
# View mon status
ceph -s
ceph mon stat
Add OSD

Copy the initialized key file from the existing OSD on the master node

scp -p  /var/lib/ceph/bootstrap-osd/ceph.keyring  10.10.3.118:/var/lib/ceph/bootstrap-osd/
scp -p  /var/lib/ceph/bootstrap-osd/ceph.keyring  10.10.3.119:/var/lib/ceph/bootstrap-osd/
scp -p  /var/lib/ceph/bootstrap-osd/ceph.keyring  10.10.3.120:/var/lib/ceph/bootstrap-osd/

Add OSD on node nodes

ceph-volume lvm create --data /dev/sdb

systemctl restart ceph-osd@future-ceph-node1
systemctl enable ceph-osd@future-ceph-node1
# View status
ceph -s

Add MDS#

# Create directory
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-future-ceph-node0
# Create key
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-future-ceph-node0/keyring --gen-key -n mds.future-ceph-node0
# Import key and set caps
ceph auth add mds.future-ceph-node0 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-future-ceph-node0/keyring
# Manually start service
ceph-mds --cluster ceph -i future-ceph-node0 -m future-ceph-node0:6789
chown -R ceph:ceph /var/lib/ceph/mds/
systemctl start ceph-mds@future-ceph-node0
systemctl enable ceph-mds@future-ceph-node0
# Check if the service has started
ps -ef|grep ceph-mds
# Check ceph cluster status
ceph -s

Create CephFS#

Create pools

# Store data
ceph osd pool create cephfs_data 64
# Store metadata
ceph osd pool create cephfs_metadata 64
# Enable cephfs file system
ceph fs new cephfs cephfs_metadata cephfs_data
# View file system status
ceph fs ls
ceph mds stat
  1. Mount cephfs

Kernel driver mount

# Install dependencies
yum install -y ceph-common
# Create mount point
mkdir /data
# Get storage key (management node)
cat /etc/ceph/ceph.client.admin.keyring
# Mount /data from 117 to other hosts' /data
mount -t ceph 10.10.3.117:6789:/ /data -o name=admin,secret=AQBLfS9l612IGhAAOF1iJqT6+rHJPCxqQegqCA==
 
# Unmount
umount /data  

User-controlled mount

yum install ceph-fuse -y
# Mount to other hosts
ceph-fuse -m  10.10.3.117:6789 /data
# View mount configuration
df -h |grep code

# Unmount
fusermount -u /mnt/cephfs

Create RBD Pool#

# Create rbd pool
ceph osd pool create rbd-k8s 64 64
# Enable 
ceph osd pool application enable rbd-k8s rbd
# Initialize
rbd pool init rbd-k8s
# View
ceph osd lspools
# View images
rbd ls rbd-k8s
# View image information
rbd info --pool rbd-k8s --image mars-c09bb992-963e-43dd-8e5c-e3a1d723b1c8 
# Map image
rbd map  --pool rbd-k8s --image mars-c09bb992-963e-43dd-8e5c-e3a1d723b1c8
# Format
mkfs.ext4 /dev/rbd0
# Mount to local
mount /dev/rbd0 /data/code
# View
lsblk
# Unmount
umount /dev/rbd0
# Unmap
rbd unmap /dev/rbd0
Loading...
Ownership of this post data is guaranteed by blockchain and smart contracts to the creator alone.