banner
云野阁

云野阁

闲云野鹤,八方逍遥

High Availability k8s Cluster Deployment Guide

Task Objectives#

  1. Complete the installation and deployment of a high availability k8s cluster.

Task Platform#

  1. Physical devices--
  2. Operating System: openEuler 22.03 LTS SP2

Deployment Guide#

Cluster Topology Diagram

image-20240323222414360

Task One: Configuration Preparation#

  1. Rename hostname
# Change the hostname of 10.10.3.121 to future-k8s-node0
hostnamectl set-hostname future-k8s-node0 && bash
# Change the hostname of 10.10.3.122 to future-k8s-node1
hostnamectl set-hostname future-k8s-node1 && bash
# Change the hostname of 10.10.3.123 to future-k8s-node2
hostnamectl set-hostname future-k8s-node2 && bash
# Change the hostname of 10.10.3.124 to future-k8s-node3
hostnamectl set-hostname future-k8s-node3 && bash
  1. Pre-installation configuration modifications
# Stop the firewall
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
 
# Permanently disable selinux
setenforce 0
 sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config

# Permanently disable swap
swapoff --all
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
cat /etc/fstab

# Add hosts
cat >> /etc/hosts << EOF
10.10.3.121 future-k8s-node0
10.10.3.122 future-k8s-node1
10.10.3.123 future-k8s-node2
10.10.3.124 future-k8s-node3
10.10.3.125 future-k8s-vip
EOF
# View
cat /etc/hosts

# Add bridge filtering and kernel forwarding configuration file
cat > /etc/sysctl.d/k8s.conf << EOF
 net.bridge.bridge-nf-call-ip6tables = 1
 net.bridge.bridge-nf-call-iptables = 1
 net.ipv4.ip_forward = 1
EOF
# View
cat /etc/sysctl.d/k8s.conf
# Load br_netfilter module
modprobe br_netfilter
# Check if loaded
lsmod | grep br_netfilter
# Load bridge filtering and kernel forwarding configuration file
sysctl -p /etc/sysctl.d/k8s.conf

# Synchronize time
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony  -y
systemctl start chrony
systemctl enable chronyd
# Modify configuration, add content
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date
  1. Install ipset and ipvsadm
 # Install ipset and ipvsadm
 yum -y install ipset ipvsadm
 Configure ipvsadm module loading method
 # Add modules to load
echo ' #!/bin/bash
 modprobe -- ip_vs
 modprobe -- ip_vs_rr
 modprobe -- ip_vs_wrr
 modprobe -- ip_vs_sh
 modprobe -- nf_conntrack
' > /etc/sysconfig/modules/ipvs.modules
# View
cat /etc/sysconfig/modules/ipvs.modules
 # Authorize, run, check if loaded
chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules 
lsmod | grep -e ip_vs -e nf_conntrack

# Restart
reboot

After the configuration preparation is complete, all nodes need to restart.

Task Two: Install Docker#

  1. Configure the yum repository for Docker CE. Open the docker-ce.repo file and copy the following content into the file:
echo '
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg ' > /etc/yum.repos.d/docker-ce.repo

Save and exit the file.

  1. Install Docker CE. Run the following command to install Docker CE:
 yum -y install docker-ce docker-ce-cli  containerd.io
# Start docker and set it to start on boot
systemctl start docker  
systemctl enable docker
# Check version
docker -v
docker compose version
  1. Modify Docker configuration, set cgroup driver to use systemd, modify the configuration as follows.
# Write configuration to daemon.json file
echo '{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "data-root": "/data/docker"
} ' > /etc/docker/daemon.json
# View
cat /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker info
  1. Create required directories
mkdir  cri-dockerd   calico dashboard  docker  metrics-server  script  ingress-nginx

Task Three: Install cri-dockerd (for k8s version 1.24 and above)#

cd /data/cri-dockerd
# Download cri-dockerd installation package
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el8.x86_64.rpm
# Install cri-dockerd
rpm -ivh cri-dockerd-0.3.4-3.el8.x86_64.rpm
docker pull registry.aliyuncs.com/google_containers/pause:3.9
# Change the image address to domestic, otherwise kubelet cannot pull the image leading to startup failure
sed -i.bak 's|ExecStart=.*$|ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9|g' /usr/lib/systemd/system/cri-docker.service
cat /usr/lib/systemd/system/cri-docker.service
# Start cri-dockerd
systemctl daemon-reload 
systemctl start cri-docker.service
systemctl enable cri-docker.service

Task Four: Install High Availability Components#

Deploying a high availability cluster requires installing keepalived and haproxy to achieve master node high availability, perform operations on each master node.

  1. Install keepalived and haproxy
yum install keepalived haproxy -y
  1. Backup keepalived and haproxy configuration files
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
  1. Modify the /etc/keepalived/keepalived.conf file on each master node
    1. future-k8s-node0

    2. echo '
      global_defs {
         router_id k8s
      }
      
      vrrp_script check_haproxy {
          script "killall -0 haproxy"
          interval 3
          weight -2
          fall 10
          rise 2
      }
      
      vrrp_instance VI_1 {
          state MASTER  # Master node is MASTER, others are BACKUP
          interface ens192  # Network card name
          virtual_router_id 51
          priority 250   # Priority
          nopreempt   # Set to non-preemptive mode
          advert_int 1
          authentication {
              auth_type PASS
              auth_pass ceb1b3ec013d66163d6ab
          }
          virtual_ipaddress {
              10.10.3.125/24   # Virtual IP
          }
          track_script {
              check_haproxy
          }
      }    
      ' > /etc/keepalived/keepalived.conf
      
    3. future-k8s-node1

    4. echo '
      global_defs {
         router_id k8s
      }
      
      vrrp_script check_haproxy {
          script "killall -0 haproxy"
          interval 3
          weight -2
          fall 10
          rise 2
      }
      
      vrrp_instance VI_1 {
          state BACKUP  # Master node is MASTER, others are BACKUP
          interface ens192  # Network card name
          virtual_router_id 51
          priority 200   # Priority
          nopreempt   # Set to non-preemptive mode
          advert_int 1
          authentication {
              auth_type PASS
              auth_pass ceb1b3ec013d66163d6ab
          }
          virtual_ipaddress {
              10.10.3.125/24   # Virtual IP
          }
          track_script {
              check_haproxy
          }
      }    
      ' > /etc/keepalived/keepalived.conf
      
    5. future-k8s-node2

    6. echo '
      global_defs {
         router_id k8s
      }
      
      vrrp_script check_haproxy {
          script "killall -0 haproxy"
          interval 3
          weight -2
          fall 10
          rise 2
      }
      
      vrrp_instance VI_1 {
          state BACKUP  # Master node is MASTER, others are BACKUP
          interface ens192  # Network card name
          virtual_router_id 51
          priority 150   # Priority
          nopreempt   # Set to non-preemptive mode
          advert_int 1
          authentication {
              auth_type PASS
              auth_pass ceb1b3ec013d66163d6ab
          }
          virtual_ipaddress {
              10.10.3.125/24   # Virtual IP
          }
          track_script {
              check_haproxy
          }
      }    
      ' > /etc/keepalived/keepalived.conf
      
  2. Modify the /etc/haproxy/haproxy.cfg file on each master node (the configuration files for the three master nodes are the same)
echo "
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # To have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # Turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# Common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# Kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443 # High availability monitoring port, used when initializing the k8s cluster
    option               tcplog
    default_backend      kubernetes-apiserver
#---------------------------------------------------------------------
# Round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      future-k8s-node0   10.10.3.121:6443 check
    server      future-k8s-node1   10.10.3.122:6443 check
    server      future-k8s-node2   10.10.3.123:6443 check

#---------------------------------------------------------------------
# Collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats

" > /etc/haproxy/haproxy.cfg
  1. Start (start each master node in order)
# Start keepalived  
systemctl enable keepalived  && systemctl start keepalived  
# Start haproxy 
systemctl enable haproxy && systemctl start haproxy
systemctl status keepalived
systemctl status haproxy
  1. Check the bound VIP address on future-k8s-node0

ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 00:50:56:9a:eb:48 brd ff:ff:ff:ff:ff inet 10.10.3.121/24 brd 10.10.3.255 scope global noprefixroute ens192 valid_lft forever preferred_lft forever inet 10.10.3.125/24 scope global ens192 valid_lft forever preferred_lft forever inet6 fe80::250:56ff:fe9a/64 scope link noprefixroute valid_lft forever preferred_lft forever

Task Five: Deploy k8s Cluster#

  1. Add yum software source#

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
  1. Install kubeadm, kubelet, and kubectl#

# Install kubelet, kubeadm, kubectl
yum install -y kubelet-1.27.0 kubeadm-1.27.0 kubectl-1.27.0 --disableexcludes=kubernetes

# Change cgroup to systemd
echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"' > /etc/sysconfig/kubelet
# View
cat /etc/sysconfig/kubelet
# Set to start on boot
systemctl start kubelet.service
systemctl enable kubelet.service
systemctl status kubelet.service

# Check version
 kubeadm version
 kubelet --version
 kubectl version 
  1. Initialize k8s cluster (on future-k8s-node0)#

    Method One: Initialize using configuration file#
    1. Export the default configuration file (optional)

    Temporarily unable to display this content outside of Feishu documents

    1. Configuration file
    echo '
    apiVersion: kubeadm.k8s.io/v1beta3
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 10.10.3.125  # Virtual IP
      bindPort: 6443
    nodeRegistration:
      criSocket: unix:///var/run/cri-dockerd.sock
    ---
    apiServer:
      certSANs:    # Master nodes and corresponding hostnames
        - future-k8s-node0
        - future-k8s-node1
        - future-k8s-node2
        - future-k8s-vip
        - 10.10.3.121
        - 10.10.3.122
        - 10.10.3.123
        - 10.10.3.125
        - 127.0.0.1
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta3
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: "future-k8s-vip:16443" # Virtual IP and high availability configuration port number
    controllerManager: {}
    dns: {}
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: 1.28.0
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    ' > /data/script/kubeadm-config.yaml
    
    1. Cluster initialization
    kubeadm init --config kubeadm-config.yaml --upload-certs
    
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Method Two: Initialize using command#
    1. Deploy the master node, execute on 10.10.3.121, initialize the master node
    kubeadm init \
      --apiserver-advertise-address=10.10.3.121 \
      --image-repository registry.aliyuncs.com/google_containers \
      --kubernetes-version v1.27.0 \
      --control-plane-endpoint=future-k8s-vip:16443 \  # Virtual IP (undetermined)
      --control-plane-endpoint=future-k8s-vip \  # Virtual IP (undetermined)
      --service-cidr=10.96.0.0/12 \
      --pod-network-cidr=10.244.0.0/16 \
      --cri-socket=unix:///var/run/cri-dockerd.sock \
      --ignore-preflight-errors=all 
      
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    1. Configure SSH passwordless access
    # Generate public key on 10.10.3.121, copy to other master nodes
    ssh-keygen -t rsa
    ssh-copy-id 10.10.3.122
    ssh-copy-id 10.10.3.123
    
    1. Copy the certificates from 10.10.3.121 to other master nodes
    # Create certificate storage directory on other master nodes
    cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
    
    # Copy certificates from future-k8s-node0 to future-k8s-node1
    scp /etc/kubernetes/pki/ca.crt 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/ca.key 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/sa.key 10.10.3.122:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/etcd/ca.crt 10.10.3.122:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key 10.10.3.122:/etc/kubernetes/pki/etcd/
    
    # Copy certificates from future-k8s-node0 to future-k8s-node1
    scp /etc/kubernetes/pki/ca.crt 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/ca.key 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/sa.key 10.10.3.123:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/etcd/ca.crt 10.10.3.123:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key 10.10.3.123:/etc/kubernetes/pki/etcd/
    
  2. Initialize other master nodes#

 kubeadm join future-k8s-vip:16443 --token ysl0xr.knx79yu06cldwiy1         --discovery-token-ca-cert-hash sha256:5dd8de94e08a560c7c2424dde0719a9f4e6ac4e5e5fe538ebbab0cbc5866b000         --control-plane  --cri-socket=unix:///var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
  1. Initialize node nodes#

 kubeadm join 10.10.3.121:6443 --token pzyo37.oaaqt1nrw3u7ijuj   --discovery-token-ca-cert-hash sha256:b8067f74af04b63399af1de28644223178e5d63e8258c25d465e78aca515e887 --cri-socket=unix:///var/run/cri-dockerd.sock
  1. Set master nodes to allow scheduling of PODs (optional)#

By default, Kubernetes will not schedule Pods to the Master node. If you want to use k8s-master as a Node as well, you need to remove the taint and enable scheduling.

# View the default configured taint
kubectl describe node future-k8s-node2 |grep Taints

Taints: node-role.kubernetes.io/control-plane

# Remove the taint
kubectl taint nodes future-k8s-node2 node-role.kubernetes.io/control-plane-

Add worker label

# Add worker label
kubectl label nodes future-k8s-node2 node-role.kubernetes.io/worker=
# Remove worker label
kubectl label nodes future-k8s-node2 node-role.kubernetes.io/worker-

Task Six: Install Network Plugin (master)#

Install calico

mkdir /data/calico
cd  /data/calico
wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
# Modify calico.yaml to find CALICO_IPV4POOL_CIDR
vi calico.yaml
############## Modify content ###################
 value: "10.244.0.0/16"
############## Modify content ###################
 # Install calico on master node
 kubectl apply -f calico.yaml

Check node status

# View all nodes
kubectl get nodes
kubectl get nodes -o wide
# Check cluster health
 kubectl get cs

Task Seven: Install nginx for testing#

# Create Nginx deployment
kubectl create deployment nginx --image=nginx
# Expose port 80
kubectl expose deployment nginx --port=80 --type=NodePort
# Check pod status
kubectl get pod
# Check service status
kubectl get service
##########################################################################
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        5d1h
nginx        NodePort    10.98.221.224   <none>        80:32743/TCP   23s
##########################################################################
# Access web page for testing (port number based on service status)
http://10.10.3.121:32743/

Task Eight: Install Dashboard Interface#

  1. Download yaml file
# Create storage directory
mkdir dashboard
cd dashboard/
#2.7
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
  1. Modify yaml file
vi recommended.yaml
# Set replicas to 2
################# Modify content #######################
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 32009   # Add this line, pay attention to indentation
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort          # Add this line, pay attention to indentation
  ################# Modify content #######################
  1. Apply installation, check pod and svc
# Install
kubectl apply -f recommended.yaml
# Check pod and svc
kubectl get pod,svc -o wide -n kubernetes-dashboard
#########################################################
NAME                                             READY   STATUS              RESTARTS   AGE   IP       NODE    NOMINATED NODE   READINESS GATES
pod/dashboard-metrics-scraper-5cb4f4bb9c-mg569   0/1     ContainerCreating   0          9s    <none>   node1   <none>           <none>
pod/kubernetes-dashboard-6967859bff-2968p        0/1     ContainerCreating   0          9s    <none>   node1   <none>           <none>

NAME                                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE   SELECTOR
service/dashboard-metrics-scraper   ClusterIP   10.100.129.191   <none>        8000/TCP        9s    k8s-app=dashboard-metrics-scraper
service/kubernetes-dashboard        NodePort    10.106.130.53    <none>        443:31283/TCP   9s    k8s-app=kubernetes-dashboard
########################################################

Use the svc you checked to access the Dashboard.

  1. Create a dashboard service account
# Create a service account named admin-user and bind it to the cluster
vi dashboard-adminuser.yaml
################## Content ####################
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
  
---
# Create secret to obtain long-term holder token for the service account
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
  ################## Content ####################
 
  # Execute to take effect
  kubectl apply -f dashboard-adminuser.yaml
  1. Login methods

Option One: Obtain long-term available token

# Save it in the /data/dashboard/ admin-user.token file
cd /data/dashboard/
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token 

Obtain long-term available token script

#!/bin/bash
# Author: Yun
############# Description #############
:<<!
Obtain long-term available token script
Store the token in the admin-user.token file
!
############# Description #############
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token

echo -e "\033[1;32mToken created successfully, please check in the admin-user.token file\033[m"

Option Two: Use Kubeconfig file to log in

 # Define token variable
 DASH_TOCKEN=$(kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d)
 # Set kubeconfig cluster entry
 kubectl config set-cluster kubernetes --server=10.10.3.121:6433 --kubeconfig=/root/dashbord-admin.conf
 # Set kubeconfig user entry
 kubectl config set-credentials admin-user --token=$DASH_TOCKEN --kubeconfig=/root/dashbord-admin.conf
 # Set kubeconfig context entry
 kubectl config set-context admin-user@kubernetes --cluster=kubernetes --user=admin-user --kubeconfig=/root/dashbord-admin.conf
 # Set kubeconfig current context
 kubectl config use-context admin-user@kubernetes  --kubeconfig=/root/dashbord-admin.conf

Place the generated dashbord-admin.conf file on the local host, select the Kubeconfig option when logging in, and choose the kubeconfig file to log in.

Task Nine: Install metrics-server#

Download deployment file

wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server-components.yaml

Modify the Deployment content in the yaml file

---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls  # Add
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.4 # Modify
        imagePullPolicy: IfNotPresent

# Install
kubectl apply -f metrics-server-components.yaml

Check the status of the metrics-server pod

kubectl get pods --all-namespaces | grep metrics

Wait for a while and check if various monitoring images are displayed successfully.

image-20240323222536472-1711203940377-3

Task Ten: kubectl command auto-completion#

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
echo 'source <(kubectl completion bash)' >>  ~/.bashrc
bash

Task Eleven: Install ingress-nginx controller#

# Download yaml file
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/baremetal/deploy.yaml
# Modify the image pull address in the yaml file
##################### Modify content ######################
willdockerhub/ingress-nginx-controller:v1.0.0
hzde0128/kube-webhook-certgen:v1.0
##################### Modify content ######################
# Change Deployment to DaemonSet
# Change network mode to host network
##################### Modify content ######################
template:
  spec:
    hostNetwork: true
    dnsPolicy: ClusterFirstWithHostNet
    tolerations:  # Use affinity configuration to deploy on all nodes
      - key: node-role.kubernetes.io/control-plane
        operator: Exists
        effect: NoSchedule
     nodeSelector:
          kubernetes.io/os: linux
          custem/ingress-controller-ready: 'true'
      containers:
        - name: controller
##################### Modify content ######################
# Set labels for worker nodes (required)
kubectl label nodes future-k8s-node0 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node1 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node2 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node3 custem/ingress-controller-ready=true

# Install
kubectl apply -f deploy.yaml

# Check status
kubectl get pods -n ingress-nginx
################ Status ##################
NAME                                       READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-2lz4v       0/1     Completed   0          5m46s
ingress-nginx-admission-patch-c6896        0/1     Completed   0          5m46s
ingress-nginx-controller-7575fb546-q29qn   1/1     Running     0          5m46s

Task Twelve: Configure Dashboard Proxy#

echo '
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: k8s-dashboard
  namespace: kubernetes-dashboard
  labels:
    ingress: k8s-dashboard
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /  # Rewrite path
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"  # Automatically redirect http to https
    nginx.ingress.kubernetes.io/use-regex: "true"
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"

spec:
  ingressClassName: nginx 
  rules:
    - host: k8s.yjs.51xueweb.cn
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: kubernetes-dashboard
                port:
                  number: 443
' > /data/dashboard/dashboard-ingress.yaml
Loading...
Ownership of this post data is guaranteed by blockchain and smart contracts to the creator alone.