二進制部署k8s 1.18.14(高可用)


一、安裝環境准備

1.機器列表

主機名 IP 操作系統 角色 安裝軟件 VM配置
k8s-nginx 192.168.0.100 CentOS 7 nginx代理

nginx

CPU:1c

MEM:1g

k8s-master1 192.168.0.101 CentOS 7 管理節點

docker
kube-apiserver
kube-schduler
kube-controller-manager
etcd
flannel

CPU:2c

MEM:2g

k8s-master2 192.168.0.102 CentOS 7 管理節點

docker
kube-apiserver
kube-schduler
kube-controller-manager
etcd
flannel

CPU:2c

MEM:2g

k8s-node1 192.168.0.111 CentOS 7 工作節點 docker
kubelet
kube-proxy
flannel

CPU:2c

MEM:2g

k8s-node2 192.168.0.112 CentOS 7 工作節點 docker
kubelet
kube-proxy
flannel

CPU:2c

MEM:2g

master 192.168.0.123 CentOS 7 主控節點 不需要

CPU:1c

MEM:1g

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 2.版本信息

docker: 19.03.13
etcd: 3.4.14
kubernetes: 1.18.14
pod 網段:10.244.0.0/16
service 網段:10.96.0.0/12
kubernetes 內部地址:10.96.0.1
flannel: 0.13.0
coredns: v1.6.7
coredns 地址: 10.96.0.10

3.環境初始化

  說明:以 maser 做主控機,對其他機器做遠程操作。

  3.1免密登錄

  在master主控機上執行以下兩步操作。

[root@master ~]# ssh-keygen -t rsa
[root@master ~]# for i in 101 102 111 112; do ssh-copy-id 192.168.0.$i; done

  3.2在master主機添加hosts

[root@master ~]# vim /etc/hosts
192.168.0.101   k8s-master1
192.168.0.102   k8s-master2
192.168.0.111   k8s-node1
192.168.0.112   k8s-node2

  注意:以下操作在四台k8s機器和兩台ha機器上都要執行

  3.2時間同步

  將系統時間設置為當前時間,並寫入CMOS,否則系統重啟后修改時間會失效。

#設置系統時間
[root@master ~]# date -s '2021-01-10 12:09:00'
Sun Jan 10 12:09:00 CST 2021
#系統時間強制寫入CMOS
[root@master ~]# clock -w
#顯示系統時間
[root@master ~]# date
Sun Jan 10 12:09:41 CST 2021
#顯示硬件時間
[root@master ~]# hwclock --show
Sun 10 Jan 2021 12:09:49 PM CST  -0.411765 seconds

  3.3關閉防火牆

systemctl stop firewalld  && systemctl disable firewalld

  3.4關閉selinux

setenforce 0 && sed -i 's/=enforcing/=disabled/g' /etc/selinux/config

  3.5關閉swap

swapoff -a && sed -i '/swap/s/^/#/' /etc/fstab

   默認情況下,kubelet不允許所在的主機存在交換分區,后期規划的時候,可以考慮在系統安裝的時候不創建交換分區,針對已經存在交換分區的可以設置忽略禁止使用swap的限制,不然無法啟動kubelet。一般直接禁用swap就可以了,不需要執行此步驟。

vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false"

  3.6安裝docker並配置加速器

vim install_docker.sh
#!/bin/bash
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-19.03.13 -y
systemctl enable docker
systemctl start docker

vim /etc/docker/daemon.json
{
  "registry-mirrors": ["https://wyt8gbxw.mirror.aliyuncs.com"]
}

systemctl daemon-reload
systemctl restart docker

二、安裝nginx

  為兩台master節點提供高可用,可以選用雲廠商的 slb,也可以用兩台 nginx + keepalived 實現。

  此處,為實驗環境,用單台 nginx 做四層代理實現負載均衡。

# 安裝 nginx
[root@k8s-nginx ~]# yum install -y nginx
# 創建子配置文件
[root@k8s-nginx ~]# cd /etc/nginx/conf.d/
[root@k8s-nginx conf.d]# vim lb.tcp
stream {
    upstream master {
        hash $remote_addr consistent;
        server 192.168.0.101:6443 max_fails=3 fail_timeout=30;
        server 192.168.0.102:6443 max_fails=3 fail_timeout=30;
    }

    server {
        listen 6443;
        proxy_pass master;
    }
}
# 在主配置文件中引入該文件
[root@k8s-nginx ~]# cd /etc/nginx/
[root@k8s-nginx nginx]# vim nginx.conf
...
include /etc/nginx/conf.d/*.tcp;
...
# 加入開機自啟,並啟動 nginx
[root@k8s-nginx nginx]# systemctl enable nginx
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.
[root@k8s-nginx nginx]# systemctl start nginx

三、集群搭建

  說明:相關執行腳本和文件都放在/root/k8s目錄下,提前在每個節點創建好該目錄

1.創建相應目錄

  1.1管理節點用

[root@master mkdir]# vim mkdir_master.sh
#!/bin/bash
mkdir /opt/etcd/{bin,data,cfg,ssl} -p
mkdir /opt/kubernetes/{bin,cfg,ssl,logs}  -p
mkdir /opt/kubernetes/logs/{kubelet,kube-proxy,kube-scheduler,kube-apiserver,kube-controller-manager} -p

echo 'export PATH=$PATH:/opt/kubernetes/bin' >> /etc/profile
echo 'export PATH=$PATH:/opt/etcd/bin' >> /etc/profile
source /etc/profile

  1.2工作節點用

[root@master mkdir]# vim mkdir_node.sh
#!/bin/bash
mkdir /opt/kubernetes/{bin,cfg,ssl,logs}  -p
mkdir /opt/kubernetes/logs/{kubelet,kube-proxy} -p

echo 'export PATH=$PATH:/opt/kubernetes/bin' >> /etc/profile
source /etc/profile

  1.3發送腳本到相應遠程k8s主機

[root@master mkdir]# ls
mkdir_master.sh  mkdir_node.sh
[root@master mkdir]# cd ..
[root@master k8s]# ls
install_docker.sh  mkdir
[root@master k8s]# for i in 1 2; do scp -r mkdir/ k8s-master$i:/root/k8s/; done
[root@master k8s]# for i in 1 2; do scp -r mkdir/ k8s-node$i:/root/k8s/; done

  1.4在所有k8s主機上執行該腳本

[root@k8s-master1 mkdir]# bash mkdir_master.sh
[root@k8s-master2 mkdir]# bash mkdir_master.sh
[root@k8s-node1 mkdir]# bash mkdir_node.sh
[root@k8s-node2 mkdir]# bash mkdir_node.sh

2.生成證書

  2.1腳本內容如下

#!/bin/bash

command_exists() {
    command -v "$@" > /dev/null 2>&1
}

if command_exists cfssl; then
    echo "命令已存在"
else
    # 下載生成證書命令
    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

    # 添加執行權限
    chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64

    # 移動到 /usr/local/bin 目錄下
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
fi

#配置文件,默認簽 10cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

#頒發者信息 CN - 一般名詞,O - 組織 , OU組織單位,
cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------
#頒發給組織信息
#包含master節點、node節點、nginx節點、coredns等
cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.0.101",
      "192.168.0.102",
      "192.168.0.111",
      "192.168.0.112",
      "192.168.0.100",
      "10.96.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#-----------------------
#頒發給管理員管理集群
cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#-----------------------
#頒發給proxy
cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

#-----------------------
#查看證書有效期
for item in $(ls *.pem |grep -v key) ;do echo ======================$item===================;openssl x509 -in $item -text -noout| grep Not;done

  2.2執行腳本

[root@master ssl]# bash certificate.sh
......
======================admin.pem===================
            Not Before: Jan 10 04:30:00 2021 GMT
            Not After : Jan  8 04:30:00 2031 GMT
======================ca.pem===================
            Not Before: Jan 10 04:30:00 2021 GMT
            Not After : Jan  9 04:30:00 2026 GMT
======================kube-proxy.pem===================
            Not Before: Jan 10 04:30:00 2021 GMT
            Not After : Jan  8 04:30:00 2031 GMT
======================server.pem===================
            Not Before: Jan 10 04:30:00 2021 GMT
            Not After : Jan  8 04:30:00 2031 GMT

  2.3將證書發送到相應節點

etcd集群的
[root@master ssl]# for i in 1 2; do scp ca.pem server.pem server-key.pem k8s-master$i:/opt/etcd/ssl; done

k8s集群的
[root@master ssl]# for i in 1 2; do scp *.pem k8s-master$i:/opt/kubernetes/ssl; done
[root@master ssl]# for i in 1 2; do scp *.pem k8s-node$i:/opt/kubernetes/ssl; done

3.安裝etcd集群

  3.1下載etcd二進制包,解壓,並發送到各管理節點

[root@master src]# wget https://github.com/etcd-io/etcd/releases/download/v3.4.14/etcd-v3.4.14-linux-amd64.tar.gz
[root@master src]# cd etcd-v3.4.14-linux-amd64/
[root@master etcd-v3.4.14-linux-amd64]# for i in 1 2; do scp etcd etcdctl k8s-master$i:/opt/etcd/bin; done

  3.2啟動etcd

  啟動腳本內容如下

[root@master k8s]# vim etcd.sh
#!/bin/bash
# example: bash etcd.sh etcd01 192.168.0.101 etcd01=https://192.168.0.101:2380,etcd02=https://192.168.0.102:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

ETCD_VERSION=3.4.14

cat <<EOF >/opt/etcd/cfg/etcd.yml
#etcd ${ETCD_VERSION}
name: ${ETCD_NAME}
data-dir: /opt/etcd/data
listen-peer-urls: https://${ETCD_IP}:2380
listen-client-urls: https://${ETCD_IP}:2379,https://127.0.0.1:2379

advertise-client-urls: https://${ETCD_IP}:2379
initial-advertise-peer-urls: https://${ETCD_IP}:2380
initial-cluster: ${ETCD_CLUSTER}
initial-cluster-token: etcd-cluster
initial-cluster-state: new
enable-v2: true

client-transport-security:
  cert-file: /opt/etcd/ssl/server.pem
  key-file: /opt/etcd/ssl/server-key.pem
  client-cert-auth: false
  trusted-ca-file: /opt/etcd/ssl/ca.pem
  auto-tls: false

peer-transport-security:
  cert-file: /opt/etcd/ssl/server.pem
  key-file: /opt/etcd/ssl/server-key.pem
  client-cert-auth: false
  trusted-ca-file: /opt/etcd/ssl/ca.pem
  auto-tls: false

debug: false
logger: zap
log-outputs: [stderr]
EOF

cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
Documentation=https://github.com/etcd-io/etcd
Conflicts=etcd.service
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
LimitNOFILE=65536
Restart=on-failure
RestartSec=5s
TimeoutStartSec=0
ExecStart=/opt/etcd/bin/etcd --config-file=/opt/etcd/cfg/etcd.yml

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable etcd
systemctl restart etcd

  發送啟動腳本到各管理節點

[root@master k8s]# for i in 1 2; do scp etcd.sh k8s-master$i:/root/k8s; done

  在各管理節點執行啟動腳本

[root@k8s-master1 k8s]# bash etcd.sh etcd01 192.168.0.101 etcd01=https://192.168.0.101:2380,etcd02=https://192.168.0.102:2380 
[root@k8s-master2 k8s]# bash etcd.sh etcd02 192.168.0.102 etcd01=https://192.168.0.101:2380,etcd02=https://192.168.0.102:2380 

說明:在etcd01執行后會卡住,這時是在等待其他節點加入,此時接着啟動etcd02即可

  驗證集群是否健康

[root@k8s-master1 k8s]# etcdctl --write-out="table" --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints=https://192.168.0.101:2379,https://192.168.0.102:2379 endpoint health       
+----------------------------+--------+-------------+-------+
|          ENDPOINT          | HEALTH |    TOOK     | ERROR |
+----------------------------+--------+-------------+-------+
| https://192.168.0.101:2379 |   true | 10.305523ms |       |
| https://192.168.0.102:2379 |   true | 12.624803ms |       |
+----------------------------+--------+-------------+-------+

  查看集群成員

[root@k8s-master1 k8s]# etcdctl --write-out="table" --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints=https://192.168.0.101:2379,https://192.168.0.102:2379 member list
+------------------+---------+--------+----------------------------+----------------------------+------------+
|        ID        | STATUS  |  NAME  |         PEER ADDRS         |        CLIENT ADDRS        | IS LEARNER |
+------------------+---------+--------+----------------------------+----------------------------+------------+
|  aeed280fbd4b48b | started | etcd01 | https://192.168.0.101:2380 | https://192.168.0.101:2379 |      false |
| 1e149427bfba9593 | started | etcd02 | https://192.168.0.102:2380 | https://192.168.0.102:2379 |      false |
+------------------+---------+--------+----------------------------+----------------------------+------------+

4.master管理節點部署

  4.1下載kubernetes二進制包,解壓,並將相應組件發送到各節點

[root@master src]# wget https://dl.k8s.io/v1.18.14/kubernetes-server-linux-amd64.tar.gz
#管理節點
[root@master bin]# for i in 1 2; do scp kube-apiserver kube-scheduler kube-controller-manager kubectl k8s-master$i:/opt/kubernetes/bin/; done
#工作節點
[root@master bin]# for i in 1 2; do scp kubelet kube-proxy k8s-node$i:/opt/kubernetes/bin/; done

  4.2生成kubeconfig文件

  創建token文件

[root@master kubeconfig]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
8e72984386f2ab78f0b769bb66d75b98
[root@master kubeconfig]# cat token.csv 
8e72984386f2ab78f0b769bb66d75b98,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

  腳本內容如下

[root@master kubeconfig]# vim kubeconfig.sh
#!/bin/bash

# TLS Bootstrapping Token
BOOTSTRAP_TOKEN=8e72984386f2ab78f0b769bb66d75b98

APISERVER=$1
SSL_DIR=$2

export KUBE_APISERVER="https://${APISERVER}:6443"

#----------------------

# 創建kubelet bootstrapping kubeconfig
# 設置集群參數
kubectl config set-cluster kubernetes \
  --certificate-authority=${SSL_DIR}/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig

# 設置客戶端認證參數
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=bootstrap.kubeconfig

# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig

# 設置默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

#----------------------

# 創建kube-proxy kubeconfig文件
kubectl config set-cluster kubernetes \
  --certificate-authority=${SSL_DIR}/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=${SSL_DIR}/kube-proxy.pem \
  --client-key=${SSL_DIR}/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

#----------------------

# 創建 admin kubeconfig文件
kubectl config set-cluster kubernetes \
  --certificate-authority=${SSL_DIR}/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=admin.kubeconfig

kubectl config set-credentials admin \
  --client-certificate=${SSL_DIR}/admin.pem \
  --client-key=${SSL_DIR}/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=admin \
  --kubeconfig=admin.kubeconfig

kubectl config use-context default --kubeconfig=admin.kubeconfig

  注意:設置kube-apiserver訪問地址, 因為需要對kube-apiserver配置高可用集群, 這里設置apiserver為nginx節點IP, 端口為6443

  發送token.csv文件和執行腳本到各管理節點

[root@master k8s]# for i in 1 2; do scp -r kubeconfig k8s-master$i:/root/k8s/; done

  在各管理節點上執行腳本,192.168.0.100為nginx節點IP,/opt/kubernetes/ssl為證書路徑

[root@k8s-master1 kubeconfig]# bash kubeconfig.sh 192.168.0.100 /opt/kubernetes/ssl
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "admin" set.
Context "default" created.
Switched to context "default".
[root@k8s-master1 kubeconfig]# ls
admin.kubeconfig  bootstrap.kubeconfig  kubeconfig.sh  kube-proxy.kubeconfig  token.csv
[root@k8s-master1 kubeconfig]# cp token.csv *config /opt/kubernetes/cfg/

[root@k8s-master2 kubeconfig]# bash kubeconfig.sh 192.168.0.100 /opt/kubernetes/ssl
[root@k8s-master2 kubeconfig]# ls
admin.kubeconfig  bootstrap.kubeconfig  kubeconfig.sh  kube-proxy.kubeconfig  token.csv
[root@k8s-master2 kubeconfig]# cp token.csv *config /opt/kubernetes/cfg/

  發送配置文件到各工作節點

[root@k8s-master1 kubeconfig]# for i in 111 112; do scp token.csv *config 192.168.0.$i:/opt/kubernetes/cfg/; done

  4.3啟動各組件

  4.3.1各組件啟動腳本內容如下

  kube-apiserver

[root@master master]# vim apiserver.sh
#!/bin/bash

MASTER_ADDRESS=$1
ETCD_SERVERS=$2

cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs/kube-apiserver \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.96.0.0/12 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--runtime-config=api/all=true \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-truncate-enabled=true \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserve

   kube-scheduler

[root@master master]# vim scheduler.sh
#!/bin/bash

MASTER_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs/kube-scheduler \\
--master=${MASTER_ADDRESS}:8080 \\
--address=0.0.0.0 \\
--leader-elect"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler

  kube-controller-manager

[root@master master]# vim controller-manager.sh 
#!/bin/bash

MASTER_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs/kube-controller-manager \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--bind-address=0.0.0.0 \\
--service-cluster-ip-range=10.96.0.0/12 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s \\
--feature-gates=RotateKubeletServerCertificate=true \\
--feature-gates=RotateKubeletClientCertificate=true \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem"
EOF

cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager

  4.3.2發送腳本到各管理節點

[root@master k8s]# for i in 1 2; do scp -r master k8s-master$i:/root/k8s/; done

  4.3.3在各管理節點執行啟動腳本,apiserver參數為節點IP和etcd集群地址

#管理節點1
cd /root/k8s/master
[root@k8s-master1 master]# bash apiserver.sh 192.168.0.101 https://192.168.0.101:2379,https://192.168.0.102:2379
[root@k8s-master1 master]# bash scheduler.sh 127.0.0.1
[root@k8s-master1 master]# bash controller-manager.sh 127.0.0.1
#管理節點2
cd /root/k8s/master
[root@k8s-master2 master]# bash apiserver.sh 192.168.0.102 https://192.168.0.101:2379,https://192.168.0.102:2379
[root@k8s-master2 master]# bash scheduler.sh 127.0.0.1
[root@k8s-master2 master]# bash controller-manager.sh 127.0.0.1

  4.3.4查看各組件狀態

#管理節點1
[root@k8s-master1 master]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
etcd-1               Healthy   {"health":"true"}
#管理節點2
[root@k8s-master2 master]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok                  
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-0               Healthy   {"health":"true"} 

  輸出以上結果表示各組件正常運行。

5.node工作節點部署

  5.1將kubelet-bootstrap用戶綁定到系統集群角色

  節點 kubelet 啟動時自動創建 CSR 請求,將kubelet-bootstrap用戶綁定到系統集群角色 ,這個是為了頒發證書用的權限。

  Master apiserver啟用TLS認證后,Node節點kubelet組件想要加入集群,必須使用CA簽發的有效證書才能與 apiserver通信,當Node節點很多時,簽署證書是一件很繁瑣的事情,因此有了TLS Bootstrapping機制,kubelet 會以一個低權限用戶自動向apiserver申請證書,kubelet的證書由apiserver動態簽署。

  在任意一台管理節點上執行以下命令

[root@k8s-master1 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

  5.2啟動各組件

  5.2.1各組件啟動腳本內容如下

  kubelet

[root@master node]# vim kubelet.sh
#!/bin/bash

NODE_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=2 \\
--log-dir=/opt/kubernetes/logs/kubelet \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF

cat <<EOF >/opt/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
  - 10.96.0.10
clusterDomain: cluster.local.
failSwapOn: false

# 身份驗證
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /opt/kubernetes/ssl/ca.pem

# 授權
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s

# Node 資源保留
evictionHard:
  imagefs.available: 15%
  memory.available: 300Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s

# 鏡像刪除策略
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s

# 旋轉證書
rotateCertificates: true # 旋轉kubelet client 證書
featureGates:
  RotateKubeletServerCertificate: true
  RotateKubeletClientCertificate: true

maxOpenFiles: 1000000
maxPods: 110
EOF

cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service

[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet

  kube-proxy

[root@master node]# vim kube-proxy.sh
#!/bin/bash

NODE_ADDRESS=$1

cat <<EOF >/opt/kubernetes/cfg/kube-proxy.conf
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs/kube-proxy \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF

cat <<EOF >/opt/kubernetes/cfg/kube-proxy-config.yml
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
address: 0.0.0.0 
clientConnection:
  kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig 
hostnameOverride: ${NODE_ADDRESS} 
clusterCIDR: 10.244.0.0/16
mode: iptables 
EOF

cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy

  5.2.2發送腳本到各工作節點

[root@master k8s]# for i in 1 2; do scp -r node k8s-node$i:/root/k8s/; done

  5.2.3在各工作節點執行啟動腳本

#工作節點1
cd /root/k8s/node
[root@k8s-node1 node]# bash kubelet.sh 192.168.0.111
[root@k8s-node1 node]# bash kube-proxy.sh 192.168.0.111
#工作節點2
cd /root/k8s/node
[root@k8s-node2 node]# bash kubelet.sh 192.168.0.112
[root@k8s-node2 node]# bash kube-proxy.sh 192.168.0.112

  5.2.4在master節點手動審批

  kubelet啟動后還沒加入到集群中,需要手動允許該節點才可以。

[root@k8s-master1 master]# kubectl get node
No resources found in default namespace.
[root@k8s-master1 master]# kubectl get csr
NAME        AGE    SIGNERNAME                                    REQUESTOR           CONDITION
csr-2xx8v   2m2s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending
csr-6n8bw   4s     kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Pending

  未審批的時候,csr請求處於Pending狀態,審批通過后即處於Approved,Issued狀態

[root@k8s-master1 master]# kubectl certificate approve csr-2xx8v
certificatesigningrequest.certificates.k8s.io/csr-2xx8v approved
[root@k8s-master1 master]# kubectl certificate approve csr-6n8bw
certificatesigningrequest.certificates.k8s.io/csr-6n8bw approved
[root@k8s-master1 master]# kubectl get csr
NAME        AGE    SIGNERNAME                                    REQUESTOR           CONDITION
csr-2xx8v   8m7s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
csr-6n8bw   6m9s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued 

  等待一會,即可查看node節點已處於Ready狀態

[root@k8s-master1 master]# kubectl get node
NAME            STATUS   ROLES    AGE   VERSION
192.168.0.111   Ready    <none>   82s   v1.18.14
192.168.0.112   Ready    <none>   76s   v1.18.14

6.安裝flannel網絡插件

  6.1下載flannel二進制包,解壓並發送到各節點

#下載
[root@master flannel]# wget https://github.com/coreos/flannel/releases/download/v0.13.0/flannel-v0.13.0-linux-amd64.tar.gz
#管理節點
[root@master flannel]# for i in 1 2; do scp flanneld k8s-master$i:/opt/kubernetes/bin/; done
#工作節點
[root@master flannel]# for i in 1 2; do scp flanneld k8s-node$i:/opt/kubernetes/bin/; done

  6.2將 pod 網段信息寫入 etcd 中

  falnnel要用etcd存儲自身一個子網信息,所以要保證能成功連接etcd,寫入預定義子網段。

[root@k8s-master1 ~]# ETCDCTL_API=2 etcdctl --ca-file=/opt/kubernetes/ssl/ca.pem --cert-file=/opt/kubernetes/ssl/server.pem --key-file=/opt/kubernetes/ssl/server-key.pem --endpoints=https://192.168.0.101:2379,https://192.168.0.102:2379 set /coreos.com/network/config  '{"Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}'
{"Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}

  6.3啟動腳本內容如下

[root@master flannel]# vim flannel.sh
#!/bin/bash

ETCD_ENDPOINTS=$1

cat <<EOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/kubernetes/ssl/ca.pem \
-etcd-certfile=/opt/kubernetes/ssl/server.pem \
-etcd-keyfile=/opt/kubernetes/ssl/server-key.pem"
EOF

cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld

  6.4將腳本發送到各節點

#管理節點
[root@master flannel]# for i in 1 2; do scp flannel.sh k8s-master$i:/root/k8s/; done
#工作節點
[root@master flannel]# for i in 1 2; do scp flannel.sh k8s-node$i:/root/k8s/; done

  6.5在各節點執行腳本

[root@k8s-master1 k8s]# bash flannel.sh https://192.168.0.101:2379,https://192.168.0.102:2379
[root@k8s-master1 k8s]# ifconfig flannel.1
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 10.244.30.0  netmask 255.255.255.255  broadcast 10.244.30.0
        inet6 fe80::ec32:5bff:fee7:a854  prefixlen 64  scopeid 0x20<link>
        ether ee:32:5b:e7:a8:54  txqueuelen 0  (Ethernet)
        RX packets 2  bytes 168 (168.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 2  bytes 168 (168.0 B)
        TX errors 0  dropped 20 overruns 0  carrier 0  collisions 0

  如上,生成flannel.1虛擬網卡,且各節點flannel.1網卡能夠互相ping通,則表示flannel安裝成功。

7.安裝coredns

  注意:k8s 與 coredns 的版本對應關系

       https://github.com/coredns/deployment/blob/master/kubernetes/CoreDNS-k8s_version.md

  7.1yaml文件內容如下

  注意修改 clusterIP 和鏡像版本1.6.7

[root@master k8s]# vim coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           preferredDuringSchedulingIgnoredDuringExecution:
           - weight: 100
             podAffinityTerm:
               labelSelector:
                 matchExpressions:
                   - key: k8s-app
                     operator: In
                     values: ["kube-dns"]
               topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: coredns/coredns:1.6.7
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.96.0.10
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

  7.2將yaml文件發送到各管理節點,並在任一管理節點上通過kubectl命令創建

[root@master k8s]# for i in 1 2; do scp coredns.yaml k8s-master$i:/root/k8s/; done

[root@k8s-master1 k8s]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

[root@k8s-master1 k8s]# kubectl get service -A
NAMESPACE     NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
default       kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP                  67m
kube-system   kube-dns     ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   2m7s

  7.3驗證dns是否正常運行

#創建一個 busybox 容器作為客戶端
[root@k8s-master1 k8s]# kubectl create -f https://k8s.io/examples/admin/dns/busybox.yaml
pod/busybox created
#解析 kubernetes
[root@k8s-master1 k8s]#  kubectl exec -it busybox -- nslookup kubernetes
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

 

參考博客 https://www.cnblogs.com/liyongjian5179/p/13198143.html


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM