banner
云野阁

云野阁

闲云野鹤,八方逍遥

高可用k8s集群部署指南

任务目标#

  1. 完成高可用 k8s 集群安装部署

任务平台#

  1. 物理设备 --
  2. 操作系统:openEuler 22.03 LTS SP2

部署指南#

集群拓扑图

image-20240323222414360

任务一:配置准备#

  1. 重命名 hostname
# 将10.10.3.121的主机名改为future-k8s-node0
hostnamectl set-hostname future-k8s-node0 && bash
# 将10.10.3.122的主机名改为future-k8s-node1
hostnamectl set-hostname future-k8s-node1 && bash
# 将10.10.3.123的主机名改为future-k8s-node2
hostnamectl set-hostname future-k8s-node2 && bash
# 将10.10.3.124的主机名改为future-k8s-node3
hostnamectl set-hostname future-k8s-node3 && bash
  1. 安装前的配置修改
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
 
# selinux永久关闭
setenforce 0
 sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config

# swap永久关闭
swapoff --all
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
cat /etc/fstab

# 添加hosts
cat >> /etc/hosts << EOF
10.10.3.121 future-k8s-node0
10.10.3.122 future-k8s-node1
10.10.3.123 future-k8s-node2
10.10.3.124 future-k8s-node3
10.10.3.125 future-k8s-vip
EOF
#查看
cat /etc/hosts


# 添加网桥过滤及内核转发配置文件
cat > /etc/sysctl.d/k8s.conf << EOF
 net.bridge.bridge-nf-call-ip6tables = 1
 net.bridge.bridge-nf-call-iptables = 1
 net.ipv4.ip_forward = 1
EOF
# 查看
cat /etc/sysctl.d/k8s.conf
# 加载br_netfilter模块
modprobe br_netfilter
# 查看是否加载
lsmod | grep br_netfilter
# 加载网桥过滤及内核转发配置文件
sysctl -p /etc/sysctl.d/k8s.conf

#同步时间
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony  -y
systemctl start chrony
systemctl enable chronyd
#修改配置,添加内容
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date
  1. 安装 ipset 及 ipvsadm
 # 安装ipset及ipvsadm
 yum -y install ipset ipvsadm
 配置ipvsadm模块加载方式
 # 添加需要加载的模块
echo ' #!/bin/bash
 modprobe -- ip_vs
 modprobe -- ip_vs_rr
 modprobe -- ip_vs_wrr
 modprobe -- ip_vs_sh
 modprobe -- nf_conntrack
' > /etc/sysconfig/modules/ipvs.modules
#查看
cat /etc/sysconfig/modules/ipvs.modules
 # 授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules 
lsmod | grep -e ip_vs -e nf_conntrack

#重启
reboot

配置准备完成后,所有节点都需重启

任务二:安装 docker#

  1. 配置 Docker CE 的 yum 存储库。打开docker-ce.repo的文件,并将以下内容复制到文件中:
echo '
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg ' > /etc/yum.repos.d/docker-ce.repo

保存并退出文件。

  1. 安装 Docker CE。运行以下命令来安装 Docker CE:
 yum -y install docker-ce docker-ce-cli  containerd.io
#启动docker并设置开机自启
systemctl start docker  
systemctl enable docker
#查看版本
docker -v
docker compose version
  1. Docker 配置修改,设置 cgroup 驱动,使用 systemd,配置修改为如下。
#将配置写入daemon.json文件
echo '{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "data-root": "/data/docker"
} ' > /etc/docker/daemon.json
#查看
cat /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker info
  1. 创建所需目录
mkdir  cri-dockerd   calico dashboard  docker  metrics-server  script  ingress-nginx

任务三:安装 cri-dockerd (k8s 1.24 及以上版本)#

cd /data/cri-dockerd
# 下载cri-dockerd安装包
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el8.x86_64.rpm
# 安装cri-dockerd
rpm -ivh cri-dockerd-0.3.4-3.el8.x86_64.rpm
docker pull registry.aliyuncs.com/google_containers/pause:3.9
# 修改镜像地址为国内,否则kubelet拉取不了镜像导致启动失败
sed -i.bak 's|ExecStart=.*$|ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9|g' /usr/lib/systemd/system/cri-docker.service
cat /usr/lib/systemd/system/cri-docker.service
# 启动cri-dockerd
systemctl daemon-reload 
systemctl start cri-docker.service
systemctl enable cri-docker.service

任务四:安装高可用组件#

部署高可用集群需要安装 **keepalived 和 haproxy,实现master节点高可用,**在各 master 节点操作

  1. 安装 keepalived 与 haproxy
yum install keepalived haproxy -y
  1. 备份 keepalived 与 haproxy 配置文件
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
  1. 修改各 master 节点的/etc/keepalived/keepalived.conf文件
    1. future-k8s-node0

    2. echo '
      global_defs {
         router_id k8s
      }
      
      vrrp_script check_haproxy {
          script "killall -0 haproxy"
          interval 3
          weight -2
          fall 10
          rise 2
      }
      
      vrrp_instance VI_1 {
          state MASTER  #主节点 则为MASTER ,其他则为 BACKUP
          interface ens192  #网卡名称
          virtual_router_id 51
          priority 250   #优先级
          nopreempt   #设置非抢占模式
          advert_int 1
          authentication {
              auth_type PASS
              auth_pass ceb1b3ec013d66163d6ab
          }
          virtual_ipaddress {
              10.10.3.125/24   #虚拟ip
          }
          track_script {
              check_haproxy
          }
      }    
      ' > /etc/keepalived/keepalived.conf
      
    3. future-k8s-node1

    4. echo '
      global_defs {
         router_id k8s
      }
      
      vrrp_script check_haproxy {
          script "killall -0 haproxy"
          interval 3
          weight -2
          fall 10
          rise 2
      }
      
      vrrp_instance VI_1 {
          state BACKUP  #主节点 则为MASTER ,其他则为 BACKUP
          interface ens192  #网卡名称
          virtual_router_id 51
          priority 200   #优先级
          nopreempt   #设置非抢占模式
          advert_int 1
          authentication {
              auth_type PASS
              auth_pass ceb1b3ec013d66163d6ab
          }
          virtual_ipaddress {
              10.10.3.125/24   #虚拟ip
          }
          track_script {
              check_haproxy
          }
      }    
      ' > /etc/keepalived/keepalived.conf
      
    5. future-k8s-node2

    6. echo '
      global_defs {
         router_id k8s
      }
      
      vrrp_script check_haproxy {
          script "killall -0 haproxy"
          interval 3
          weight -2
          fall 10
          rise 2
      }
      
      vrrp_instance VI_1 {
          state BACKUP  #主节点 则为MASTER ,其他则为 BACKUP
          interface ens192  #网卡名称
          virtual_router_id 51
          priority 150   #优先级
          nopreempt   #设置非抢占模式
          advert_int 1
          authentication {
              auth_type PASS
              auth_pass ceb1b3ec013d66163d6ab
          }
          virtual_ipaddress {
              10.10.3.125/24   #虚拟ip
          }
          track_script {
              check_haproxy
          }
      }    
      ' > /etc/keepalived/keepalived.conf
      
  2. 修改各 master 节点的/etc/haproxy/haproxy.cfg文件,(三个 master 节点的配置文件相同)
echo "
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443 #高可用监控端口,初始化k8s集群时会用
    option               tcplog
    default_backend      kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      future-k8s-node0   10.10.3.121:6443 check
    server      future-k8s-node1   10.10.3.122:6443 check
    server      future-k8s-node2   10.10.3.123:6443 check

#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats

" > /etc/haproxy/haproxy.cfg
  1. 启动(各 master 节点按顺序启动)
#启动keepalived  
systemctl enable keepalived  && systemctl start keepalived  
#启动haproxy 
systemctl enable haproxy && systemctl start haproxy
systemctl status keepalived
systemctl status haproxy
  1. 在 future-k8s-node0 查看绑定的 vip 地址

ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 00:50:56:9a:eb:48 brd ff:ff:ff:ff:ff inet 10.10.3.121/24 brd 10.10.3.255 scope global noprefixroute ens192 valid_lft forever preferred_lft forever inet 10.10.3.125/24 scope global ens192 valid_lft forever preferred_lft forever inet6 fe80::250:56ff:fe9a/64 scope link noprefixroute valid_lft forever preferred_lft forever

任务五:部署 k8s 集群#

  1. 添加 yum 软件源#

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
  1. 安装 kubeadm,kubelet 和 kubectl#

# 安装kubelet、kubeadm、kubectl
yum install -y kubelet-1.27.0 kubeadm-1.27.0 kubectl-1.27.0 --disableexcludes=kubernetes

#将cgroup改为systemd
echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"' > /etc/sysconfig/kubelet
# 查看
cat /etc/sysconfig/kubelet
# 设置开机启动
systemctl start kubelet.service
systemctl enable kubelet.service
systemctl status kubelet.service

#查看版本
 kubeadm version
 kubelet --version
 kubectl version 
  1. 初始化 k8s 集群(future-k8s-node0 节点)#

    方式一:使用配置文件初始化#
    1. 导出默认配置文件 (可选)

    暂时无法在飞书文档外展示此内容

    1. 配置文件
    echo '
    apiVersion: kubeadm.k8s.io/v1beta3
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 10.10.3.125  #虚拟ip
      bindPort: 6443
    nodeRegistration:
      criSocket: unix:///var/run/cri-dockerd.sock
    ---
    apiServer:
      certSANs:    #master节点与对应主机名
        - future-k8s-node0
        - future-k8s-node1
        - future-k8s-node2
        - future-k8s-vip
        - 10.10.3.121
        - 10.10.3.122
        - 10.10.3.123
        - 10.10.3.125
        - 127.0.0.1
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta3
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: "future-k8s-vip:16443" #虚拟ip及高可用配置的端口号
    controllerManager: {}
    dns: {}
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: 1.28.0
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    ' > /data/script/kubeadm-config.yaml
    
    1. 集群初始化
    kubeadm init --config kubeadm-config.yaml --upload-certs
    
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    方式二:使用命令初始化#
    1. 部署 master 节点,在 10.10.3.121 执行,初始化 master 节点
    kubeadm init \
      --apiserver-advertise-address=10.10.3.121 \
      --image-repository registry.aliyuncs.com/google_containers \
      --kubernetes-version v1.27.0 \
      --control-plane-endpoint=future-k8s-vip:16443 \  #虚拟ip(未定)
      --control-plane-endpoint=future-k8s-vip \  #虚拟ip(未定)
      --service-cidr=10.96.0.0/12 \
      --pod-network-cidr=10.244.0.0/16 \
      --cri-socket=unix:///var/run/cri-dockerd.sock \
      --ignore-preflight-errors=all 
      
      
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    1. 配置 ssh 免密码
    #在10.10.3.121上生成公钥,复制到其他master节点上
    ssh-keygen -t rsa
    ssh-copy-id 10.10.3.122
    ssh-copy-id 10.10.3.123
    
    1. 将 10.10.3.121 上的证书拷贝到其他 master 节点
    #在其他master节点创建证书存放目录
    cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
    
    #将future-k8s-node0的证书复制到future-k8s-node1
    scp /etc/kubernetes/pki/ca.crt 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/ca.key 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/sa.key 10.10.3.122:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.3.122:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/etcd/ca.crt 10.10.3.122:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key 10.10.3.122:/etc/kubernetes/pki/etcd/
    
    #将future-k8s-node0的证书复制到future-k8s-node1
    scp /etc/kubernetes/pki/ca.crt 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/ca.key 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/sa.key 10.10.3.123:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.pub 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.3.123:/etc/kubernetes/pki/ 
    scp /etc/kubernetes/pki/etcd/ca.crt 10.10.3.123:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/pki/etcd/ca.key 10.10.3.123:/etc/kubernetes/pki/etcd/
    
  2. 初始化其他 master 节点#

 kubeadm join future-k8s-vip:16443 --token ysl0xr.knx79yu06cldwiy1         --discovery-token-ca-cert-hash sha256:5dd8de94e08a560c7c2424dde0719a9f4e6ac4e5e5fe538ebbab0cbc5866b000         --control-plane  --cri-socket=unix:///var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
  1. 初始化 node 节点#

 kubeadm join 10.10.3.121:6443 --token pzyo37.oaaqt1nrw3u7ijuj   --discovery-token-ca-cert-hash sha256:b8067f74af04b63399af1de28644223178e5d63e8258c25d465e78aca515e887 --cri-socket=unix:///var/run/cri-dockerd.sock
  1. 设置 master 节点允许调度 POD (可选)#

默认配置下 Kubernetes 不会将 Pod 调度到 Master 节点。如果希望将 k8s-master 也当作 Node 使用,需去除污点,开启调度。

#查看默认配置的污点
kubectl describe node future-k8s-node2 |grep Taints

Taints: node-role.kubernetes.io/control-plane

#去除污点
kubectl taint nodes future-k8s-node2 node-role.kubernetes.io/control-plane-

添加 woker 标记

#添加worker标记
kubectl label nodes future-k8s-node2 node-role.kubernetes.io/worker=
#删除worker标记
kubectl label nodes future-k8s-node2 node-role.kubernetes.io/worker-

任务六:安装网络插件 (master)#

安装 calico

mkdir /data/calico
cd  /data/calico
wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
#修改calico.yaml找到CALICO_IPV4POOL_CIDR
vi calico.yaml
##############修改内容###################
 value: "10.244.0.0/16"
 ##############修改内容###################
 #在master节点上安装calico
 kubectl apply -f calico.yaml

查看节点状态

# 查看所有的节点
kubectl get nodes
kubectl get nodes -o wide
#查看集群健康情况
 kubectl get cs

任务七:安装 nginx 进行测试#

# 创建Nginx程序
kubectl create deployment nginx --image=nginx
# 开放80端口
kubectl expose deployment nginx --port=80 --type=NodePort
# 查看pod状态
kubectl get pod
#查看service状态
kubectl get service
##########################################################################
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        5d1h
nginx        NodePort    10.98.221.224   <none>        80:32743/TCP   23s
##########################################################################
# 访问网页测试(端口号以查看service状态得到的为准)
http://10.10.3.121:32743/

任务八:安装Dashboard 界面#

  1. 下载 yaml 文件
#创建存放目录
mkdir dashboard
cd dashboard/
#2.7
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
  1. 修改 yaml 文件
vi recommended.yaml
#将副本设置为2
#################修改内容#######################
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 32009   #添加这一行,注意缩进对齐
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort          #添加这一行,注意缩进对齐
  #################修改内容#######################
  1. 应用安装,查看 pod 和 svc
#安装
kubectl apply -f recommended.yaml
#查看pod和svc
kubectl get pod,svc -o wide -n kubernetes-dashboard
#########################################################
NAME                                             READY   STATUS              RESTARTS   AGE   IP       NODE    NOMINATED NODE   READINESS GATES
pod/dashboard-metrics-scraper-5cb4f4bb9c-mg569   0/1     ContainerCreating   0          9s    <none>   node1   <none>           <none>
pod/kubernetes-dashboard-6967859bff-2968p        0/1     ContainerCreating   0          9s    <none>   node1   <none>           <none>

NAME                                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE   SELECTOR
service/dashboard-metrics-scraper   ClusterIP   10.100.129.191   <none>        8000/TCP        9s    k8s-app=dashboard-metrics-scraper
service/kubernetes-dashboard        NodePort    10.106.130.53    <none>        443:31283/TCP   9s    k8s-app=kubernetes-dashboard
########################################################

使用所查看的 svc,所提供的端口访问Dashboard

  1. 创建 dashboard 服务账户
#创建一个admin-user的服务账户并与集群绑定
vi dashboard-adminuser.yaml
##################内容####################
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
  
---
# 创建密钥,获取服务帐户的长期持有者令牌
apiVersion: v1
kind: Secret
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
  ##################内容####################
 
  #执行生效
  kubectl apply -f dashboard-adminuser.yaml
  1. 登录方式

方案一:获取长期可用 token

#将其保存在/data/dashboard/的admin-user.token文件中
cd /data/dashboard/
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token 

获取长期可用 token 脚本

#!/bin/bash
#作者:云
#############描述#############
:<<!
获取长期可用token脚本
将token存放在admin-user.token文件中
!
#############描述#############
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token

echo -e "\033[1;32m创建token成功,请在admin-user.token文件中查看\033[m"

方案二:使用使用 Kubeconfig 文件登录

 #定义 token 变量
 DASH_TOCKEN=$(kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d)
 #设置 kubeconfig 集群条目
 kubectl config set-cluster kubernetes --server=10.10.3.121:6433 --kubeconfig=/root/dashbord-admin.conf
 #设置 kubeconfig 用户条目
 kubectl config set-credentials admin-user --token=$DASH_TOCKEN --kubeconfig=/root/dashbord-admin.conf
 #设置 kubeconfig 上下文条目
 kubectl config set-context admin-user@kubernetes --cluster=kubernetes --user=admin-user --kubeconfig=/root/dashbord-admin.conf
 #设置 kubeconfig 当前上下文
 kubectl config use-context admin-user@kubernetes  --kubeconfig=/root/dashbord-admin.conf

将生成的 dashbord-admin.conf 文件放到本地主机上,登录时选择Kubeconfig选项,选择 kubeconfig 文件登录

任务九:安装 metrics-server#

下载部署文件

wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server-components.yaml

修改 yaml 文件中的 Deployment 内容

---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        - --kubelet-insecure-tls  #添加
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.4 #修改
        imagePullPolicy: IfNotPresent

#安装
kubectl apply -f metrics-server-components.yaml

查看 metrics-server 的 pod 状态

kubectl get pods --all-namespaces | grep metrics

等待一些时间,查看查看各类监控图像已成功显示。

image-20240323222536472-1711203940377-3

任务十:kubectl 命令自动补全#

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
echo 'source <(kubectl completion bash)' >>  ~/.bashrc
bash

任务十一:ingress-nginx 控制器安装#

#下载yaml文件
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/baremetal/deploy.yaml
#修改yaml文件中拉取镜像的地址
#####################修改内容######################
willdockerhub/ingress-nginx-controller:v1.0.0
hzde0128/kube-webhook-certgen:v1.0
#####################修改内容######################
#修改Deployment修改成DaemonSet
#修改网络模式为host network
#####################修改内容######################
template:
  spec:
    hostNetwork: true
    dnsPolicy: ClusterFirstWithHostNet
    tolerations:  #使用亲和性配置可在所有节点部署
      - key: node-role.kubernetes.io/control-plane
        operator: Exists
        effect: NoSchedule
     nodeSelector:
          kubernetes.io/os: linux
          custem/ingress-controller-ready: 'true'
      containers:
        - name: controller
#####################修改内容######################
#为工作节点设置标签(必需)
kubectl label nodes future-k8s-node0 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node1 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node2 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node3 custem/ingress-controller-ready=true

#安装
kubectl apply -f deploy.yaml

#查看状态
kubectl get pods -n ingress-nginx
################状态##################
NAME                                       READY   STATUS      RESTARTS   AGE
ingress-nginx-admission-create-2lz4v       0/1     Completed   0          5m46s
ingress-nginx-admission-patch-c6896        0/1     Completed   0          5m46s
ingress-nginx-controller-7575fb546-q29qn   1/1     Running     0          5m46s

任务十二:配置Dashboard 代理#

echo '
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: k8s-dashboard
  namespace: kubernetes-dashboard
  labels:
    ingress: k8s-dashboard
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /  #重写路径
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"  #http自动转https
    nginx.ingress.kubernetes.io/use-regex: "true"
    nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"

spec:
  ingressClassName: nginx 
  rules:
    - host: k8s.yjs.51xueweb.cn
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: kubernetes-dashboard
                port:
                  number: 443
' > /data/dashboard/dashboard-ingress.yaml
加载中...
此文章数据所有权由区块链加密技术和智能合约保障仅归创作者所有。