二進制安裝k8s-單個master節點、兩個node--修改版--有個錯誤:好多地方確少APISERVER


centos7.4安裝k8s-1.11版本,二進制


安裝

配置系統相關參數
如下操作在所有節點操作

# 臨時禁用selinux
# 永久關閉 修改/etc/sysconfig/selinux文件設置
sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
setenforce 0

# 臨時關閉swap
# 永久關閉 注釋/etc/fstab文件里swap相關的行
swapoff -a

# 開啟forward
# Docker從1.13版本開始調整了默認的防火牆規則
# 禁用了iptables filter表中FOWARD鏈
# 這樣會引起Kubernetes集群中跨Node的Pod無法通信

iptables -P FORWARD ACCEPT

# 配置轉發相關參數,否則可能會出錯
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
sysctl --system

# 加載ipvs相關內核模塊
# 如果重新開機,需要重新加載
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep ip_vs

配置hosts解析
[root@host-10-1-1-8 k8s]# hostnamectl set-hostname lab1
[root@host-10-1-1-68 ~]# hostnamectl set-hostname lab2
[root@host-10-1-1-111 ~]# hostnamectl set-hostname lab3
都重啟

如下操作在所有節點操作

cat >>/etc/hosts<<EOF
10.1.1.8 lab1
10.1.1.68 lab2
10.1.1.111 lab3
EOF
安裝配置docker
v1.11.0版本推薦使用docker v17.03,
v1.11,v1.12,v1.13, 也可以使用,再高版本的docker可能無法正常使用。
測試發現17.09無法正常使用,不能使用資源限制(內存CPU)

如下操作在所有節點操作

安裝docker
# 卸載安裝指定版本docker-ce
yum remove -y docker-ce docker-ce-selinux container-selinux
yum install -y --setopt=obsoletes=0 \
docker-ce-17.03.1.ce-1.el7.centos \
docker-ce-selinux-17.03.1.ce-1.el7.centos
啟動docker
systemctl enable docker && systemctl restart docker








安裝CFSSL

只在lab1節點操作

# 下載
# 百度雲鏈接:https://pan.baidu.com/s/1kgV40nwHy1IKnnLD6zH4cQ 密碼:alyj
mkdir -pv /server/software/k8s
cd /server/software/k8s
yum install -y wget
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

# 安裝
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl*







配置CA
只在lab1節點操作

此處的CA配置,后面配置etcd和k8s時都需要使用

mkdir -pv $HOME/ssl && cd $HOME/ssl
cat >ca-config.json<<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF








配置etcd集群
生成etcd-ca
只在lab1節點操作

# 寫入配置
cat >etcd-ca-csr.json<<EOF
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "etcd",
      "OU": "Etcd Security"
    }
  ]
}
EOF

# 生成 etcd root ca
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca

cat >etcd-csr.json<<EOF
{
    "CN": "etcd",
    "hosts": [
      "127.0.0.1",
      "10.1.1.8",
      "10.1.1.68",
      "10.1.1.111"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "etcd",
            "OU": "Etcd Security"
        }
    ]
}
EOF

# 生成 etcd ca
cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
mkdir -pv /etc/etcd/ssl
cp etcd*.pem /etc/etcd/ssl
ls /etc/etcd/ssl/etcd*.pem

# 復制到其他節點
cd /etc/etcd && tar cvzf etcd-ssl.tgz ssl/
scp /etc/etcd/etcd-ssl.tgz lab2:~/
scp /etc/etcd/etcd-ssl.tgz lab3:~/
ssh lab2 'mkdir -pv /etc/etcd && tar xf etcd-ssl.tgz -C /etc/etcd && ls -l /etc/etcd/ssl'
ssh lab3 'mkdir -pv /etc/etcd && tar xf etcd-ssl.tgz -C /etc/etcd && ls -l /etc/etcd/ssl'








安裝啟動etcd

如下操作在所有節點操作,
注意下面:關於證書在master上生成后復制到node節點, 關於軟件node節點也要安裝

# 安裝
# 百度雲鏈接:https://pan.baidu.com/s/1IVHyMqiJrlq9gmbF49Ly3Q 密碼:w5nx
mkdir -pv /server/software/k8s
cd /server/software/k8s
yum install -y wget
wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
tar xf etcd-v3.2.18-linux-amd64.tar.gz
mv etcd-v3.2.18-linux-amd64 /usr/local/etcd-v3.2.18
ln -sv /usr/local/etcd-v3.2.18 /usr/local/etcd
cd /usr/local/etcd && mkdir bin && mv etcd etcdctl bin
/usr/local/etcd/bin/etcd --version
cd $HOME

# 配置啟動腳本
export ETCD_NAME=$(hostname)
export INTERNAL_IP=$(hostname -i | awk '{print $NF}')
export ECTD_CLUSTER='lab1=https://10.1.1.8:2380,lab2=https://10.1.1.68:2380,lab3=https://10.1.1.111:2380'
mkdir -pv /data/etcd
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/data/etcd
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/local/etcd/bin/etcd \\
  --name lab1 \\
  --cert-file=/etc/etcd/ssl/etcd.pem \\
  --key-file=/etc/etcd/ssl/etcd-key.pem \\
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \\
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
  --trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
  --initial-advertise-peer-urls https://10.1.1.8:2380 \\
  --listen-peer-urls https://10.1.1.8:2380 \\
  --listen-client-urls https://10.1.1.8:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://10.1.1.8:2379 \\
  --initial-cluster-token my-etcd-token \\
  --initial-cluster lab1=https://10.1.1.8:2380,lab2=https://10.1.1.68:2380,lab3=https://10.1.1.111:2380 \\
  --initial-cluster-state new \\
  --data-dir=/data/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF




創建etcd配置文件 /etc/etcd/etcd.conf

vi /etc/etcd/etcd.conf

# [member]

ETCD_NAME=etcd1

ETCD_DATA_DIR="/var/lib/etcd"

ETCD_LISTEN_PEER_URLS="https://10.1.1.8:2380"

ETCD_LISTEN_CLIENT_URLS="https://10.1.1.8:2379"



# [cluster]

ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.1.1.8:2380"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_ADVERTISE_CLIENT_URLS="https://10.1.1.8:2379"







# 啟動並設置開機啟動
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
查看etcd集群狀態
/usr/local/etcd/bin/etcdctl --endpoints "https://127.0.0.1:2379" \
  --ca-file=/etc/etcd/ssl/etcd-ca.pem \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  cluster-health







生成k8s集群的CA
# 進入相關目錄
cd $HOME/ssl

# 配置 root ca
cat >ca-csr.json<<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ],
  "ca": {
     "expiry": "87600h"
  }
}
EOF

# 生成 root ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*.pem




# 配置 kube-apiserver ca
# 10.96.0.1 是 kube- 指定的 service-cluster-ip-range 網段的第一個IP
cat >kube--csr.json<<EOF
{
    "CN": "kube-apiserver",
    "hosts": [
      "127.0.0.1",
      "10.1.1.8",
      "10.1.1.68",
      "10.1.1.111",
      "10.96.0.1",
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

# 生成 kube-apiserver ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube--csr.json | cfssljson -bare kube-apiserver
ls kube-*.pem




# 配置 kube-controller-manager ca
cat >kube-controller-manager-csr.json<<EOF
{
    "CN": "system:kube-controller-manager",
    "hosts": [
      "127.0.0.1",
      "10.1.1.8",
      "10.1.1.68",
      "10.1.1.111"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "system:kube-controller-manager",
            "OU": "System"
        }
    ]
}
EOF

# 生成 kube-controller-manager ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*.pem






# 配置 kube-scheduler ca
cat >kube-scheduler-csr.json<<EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "10.1.1.8",
      "10.1.1.68",
      "10.1.1.111"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "system:kube-scheduler",
            "OU": "System"
        }
    ]
}
EOF






# 生成 kube-scheduler ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
ls kube-scheduler*.pem

# 配置 kube-proxy ca
cat >kube-proxy-csr.json<<EOF
{
    "CN": "system:kube-proxy",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "system:kube-proxy",
            "OU": "System"
        }
    ]
}
EOF





# 生成 kube-proxy ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*.pem

# 配置 admin ca
cat >admin-csr.json<<EOF
{
    "CN": "admin",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "system:masters",
            "OU": "System"
        }
    ]
}
EOF

# 生成 admin ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
ls admin*.pem






# 復制生成的ca
mkdir -pv /etc/kubernetes/pki
cp ca*.pem admin*.pem kube-proxy*.pem kube-scheduler*.pem kube-controller-manager*.pem kube-*.pem /etc/kubernetes/pki
cd /etc/kubernetes && tar cvzf pki.tgz pki/
scp /etc/kubernetes/pki.tgz lab2:~/
scp /etc/kubernetes/pki.tgz lab3:~/
ssh lab2 'mkdir -pv /etc/kubernetes && tar xf pki.tgz -C /etc/kubernetes && ls -l /etc/kubernetes/pki'
ssh lab3 'mkdir -pv /etc/kubernetes && tar xf pki.tgz -C /etc/kubernetes && ls -l /etc/kubernetes/pki'
cd $HOME








安裝k8s文件,node節點也要安裝

# 下載文件
# 需要翻牆,如果不能翻牆使用如下鏈接下載
# 鏈接:https://pan.baidu.com/s/1OI9Q4BRp7jNJUmsA8IAkbA 密碼:tnx5
cd /server/software/k8s
wget https://dl.k8s.io/v1.11.0/kubernetes-server-linux-amd64.tar.gz
tar xf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
mkdir -pv /usr/local/kubernetes-v1.11.0/bin
cp kube- kube-controller-manager kube-scheduler kube-proxy kubelet kubectl /usr/local/kubernetes-v1.11.0/bin
ln -sv /usr/local/kubernetes-v1.11.0 /usr/local/kubernetes
cp /usr/local/kubernetes/bin/kubectl /usr/local/bin/kubectl
kubectl version #報錯忽略,繼續做
cd $HOME







生成kubeconfig
# 使用 TLS Bootstrapping 
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > /etc/kubernetes/token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

# 創建 kubelet bootstrapping kubeconfig
cd /etc/kubernetes
export KUBE_="https://10.1.1.8:6443"
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true \
  --server=${KUBE_} \
  --kubeconfig=kubelet-bootstrap.conf
kubectl config set-credentials kubelet-bootstrap \
  --token=${BOOTSTRAP_TOKEN} \
  --kubeconfig=kubelet-bootstrap.conf
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=kubelet-bootstrap.conf
kubectl config use-context default --kubeconfig=kubelet-bootstrap.conf

# 創建 kube-controller-manager kubeconfig
export KUBE_="https://10.1.1.8:6443"
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true \
  --server=${KUBE_} \
  --kubeconfig=kube-controller-manager.conf
kubectl config set-credentials kube-controller-manager \
  --client-certificate=/etc/kubernetes/pki/kube-controller-manager.pem \
  --client-key=/etc/kubernetes/pki/kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.conf
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-controller-manager \
  --kubeconfig=kube-controller-manager.conf
kubectl config use-context default --kubeconfig=kube-controller-manager.conf

# 創建 kube-scheduler kubeconfig
export KUBE_="https://10.1.1.8:6443"
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true \
  --server=${KUBE_} \
  --kubeconfig=kube-scheduler.conf
kubectl config set-credentials kube-scheduler \
  --client-certificate=/etc/kubernetes/pki/kube-scheduler.pem \
  --client-key=/etc/kubernetes/pki/kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.conf
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-scheduler \
  --kubeconfig=kube-scheduler.conf
kubectl config use-context default --kubeconfig=kube-scheduler.conf

# 創建 kube-proxy kubeconfig
export KUBE_="https://10.1.1.8:6443"
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true \
  --server=${KUBE_} \
  --kubeconfig=kube-proxy.conf
kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
  --client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.conf
kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.conf
kubectl config use-context default --kubeconfig=kube-proxy.conf

# 創建 admin kubeconfig
export KUBE_="https://10.1.1.8:6443"
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/pki/ca.pem \
  --embed-certs=true \
  --server=${KUBE_} \
  --kubeconfig=admin.conf
kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/pki/admin.pem \
  --client-key=/etc/kubernetes/pki/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=admin.conf
kubectl config set-context default \
  --cluster=kubernetes \
  --user=admin \
  --kubeconfig=admin.conf
kubectl config use-context default --kubeconfig=admin.conf

# 把 kube-proxy.conf 復制到其他節點
scp kubelet-bootstrap.conf kube-proxy.conf lab2:/etc/kubernetes
scp kubelet-bootstrap.conf kube-proxy.conf lab3:/etc/kubernetes
cd $HOME









配置master相關組件

只在lab1節點操作

配置啟動kube-apiserver
# 復制 etcd ca
mkdir -pv /etc/kubernetes/pki/etcd
cd /etc/etcd/ssl
cp etcd-ca.pem etcd-key.pem etcd.pem /etc/kubernetes/pki/etcd

# 生成 service account key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
ls /etc/kubernetes/pki/sa.*
cd $HOME

# 啟動文件
cat >/etc/systemd/system/kube-apiserver.service<<EOF
[Unit]
Description=Kubernetes API Service
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/local/kubernetes/bin/kube-apiserver \\
        \$KUBE_LOGTOSTDERR \\
        \$KUBE_LOG_LEVEL \\
        \$KUBE_ETCD_ARGS \\
        \$KUBE_API_ADDRESS \\
        \$KUBE_SERVICE_ADDRESSES \\
        \$KUBE_ADMISSION_CONTROL \\
        \$KUBE__ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 該配置文件同時被 kube-apiserver, kube-controller-manager
# kube-scheduler, kubelet, kube-proxy 使用
cat >/etc/kubernetes/config<<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=2"
EOF

cat >/etc/kubernetes/apiserver<<EOF
KUBE_API_ADDRESS="--advertise-address=10.1.1.8"
KUBE_ETCD_ARGS="--etcd-servers=https://10.1.1.8:2379,https://10.1.1.68:2379,https://10.1.1.111:2379 --etcd-cafile=/etc/kubernetes/pki/etcd/etcd-ca.pem --etcd-certfile=/etc/kubernetes/pki/etcd/etcd.pem --etcd-keyfile=/etc/kubernetes/pki/etcd/etcd-key.pem"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.96.0.0/12"
KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
KUBE_APISERVER_ARGS="--allow-privileged=true --authorization-mode=Node,RBAC --enable-bootstrap-token-auth=true --token-auth-file=/etc/kubernetes/token.csv --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/pki/kube-.pem --tls-private-key-file=/etc/kubernetes/pki/kube--key.pem --client-ca-file=/etc/kubernetes/pki/ca.pem --service-account-key-file=/etc/kubernetes/pki/sa.pub --enable-swagger-ui=true --secure-port=6443 --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname --anonymous-auth=false --kubelet-client-certificate=/etc/kubernetes/pki/admin.pem --kubelet-client-key=/etc/kubernetes/pki/admin-key.pem"
EOF

# 啟動
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

# 瀏覽器訪問測試
https://10.1.1.8:6443/swaggerapi









配置啟動kube-controller-manager
# 啟動文件
cat >/etc/systemd/system/kube-controller-manager.service<<EOF
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/local/kubernetes/bin/kube-controller-manager \\
        \$KUBE_LOGTOSTDERR \\
        \$KUBE_LOG_LEVEL \\
        \$KUBECONFIG \\
        \$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/controller-manager<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-controller-manager.conf"
KUBE_CONTROLLER_MANAGER_ARGS="--address=127.0.0.1 --cluster-cidr=10.244.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem --service-account-private-key-file=/etc/kubernetes/pki/sa.key --root-ca-file=/etc/kubernetes/pki/ca.pem --leader-elect=true --use-service-account-credentials=true --node-monitor-grace-period=10s --pod-eviction-timeout=10s --allocate-node-cidrs=true --controllers=*,bootstrapsigner,tokencleaner"
EOF

# 啟動
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager









配置啟動kube-scheduler
cat >/etc/systemd/system/kube-scheduler.service<<EOF
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/local/kubernetes/bin/kube-scheduler \\
            \$KUBE_LOGTOSTDERR \\
            \$KUBE_LOG_LEVEL \\
            \$KUBECONFIG \\
            \$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/scheduler<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-scheduler.conf"
KUBE_SCHEDULER_ARGS="--leader-elect=true --address=127.0.0.1"
EOF

# 啟動
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler









配置master使用kubelet
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get no  報錯忽略

配置node1使用kubelet
scp /etc/kubernetes/admin.conf root@11.11.11.112:/etc/kubernetes/
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get no  報錯忽略

配置node2使用kubelet                                                                                      
scp /etc/kubernetes/admin.conf root@11.11.11.113:/etc/kubernetes/
rm -rf $HOME/.kube
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get no    報錯忽略    

查看組件狀態
kubectl get componentstatuses




配置kubelet使用bootstrap
# 將 bootstrap token 文件中的 kubelet-bootstrap 用戶賦予 system:node-bootstrapper cluster 角色
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap




配置node相關組件
如下操作在所有節點操作,包括master節點

安裝cni
# 安裝 cni
# 百度雲鏈接:https://pan.baidu.com/s/1-PputObLs5jouXLnuBCI6Q 密碼:tzqm
cd /server/software/k8s
wget https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz
mkdir -pv /opt/cni/bin
tar xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
ls -l /opt/cni/bin
cd $HOME





配置啟動kubelet
# 啟動文件
mkdir -pv /data/kubelet
cat >/etc/systemd/system/kubelet.service<<EOF
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/data/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/kubernetes/bin/kubelet \\
            \$KUBE_LOGTOSTDERR \\
            \$KUBE_LOG_LEVEL \\
            \$KUBELET_CONFIG \\
            \$KUBELET_HOSTNAME \\
            \$KUBELET_POD_INFRA_CONTAINER \\
            \$KUBELET_ARGS
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

cat >/etc/kubernetes/config<<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=2"
EOF



# 注意修改相關ip   node節點也配置,node節點改成對應的nodeip

cat >/etc/kubernetes/kubelet<<EOF
KUBELET_HOSTNAME="--hostname-override=10.1.1.8"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"
KUBELET_CONFIG="--config=/etc/kubernetes/kubelet-config.yml"
KUBELET_ARGS="--bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cert-dir=/etc/kubernetes/pki --network-plugin=cni --cni-bin-dir=/opt/cni/bin --cni-conf-dir=/etc/cni/net.d"
EOF

# 注意修改相關ip     node節點也配置,node節點改成對應的nodeip

# lab1 lab2 lab3 使用各自ip
cat >/etc/kubernetes/kubelet-config.yml<<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 10.1.1.8
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
  - 10.96.0.10
clusterDomain: cluster.local.
hairpinMode: promiscuous-bridge
serializeImagePulls: false
authentication:
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.pem
EOF

# 啟動
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet






通過證書請求
# 在配置了kubectl的節點上執行如下操作

# 查看
kubectl get csr

# 通過,下面的長字符串填寫在所有節點執行上一步的結果,包括master
kubectl certificate approve node-csr-Yiiv675wUCvQl3HH11jDr0cC9p3kbrXWrxvG3EjWGoE

# 查看節點
# 此時節點狀態為 NotReady
kubectl get nodes

# 在node節點查看生成的文件
ls -l /etc/kubernetes/kubelet.conf
ls -l /etc/kubernetes/pki/kubelet*








配置啟動kube-proxy
# 安裝
yum install -y conntrack-tools

# 啟動文件
cat >/etc/systemd/system/kube-proxy.service<<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/local/kubernetes/bin/kube-proxy \\
        \$KUBE_LOGTOSTDERR \\
        \$KUBE_LOG_LEVEL \\
        \$KUBECONFIG \\
        \$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 注意修改相關ip
# lab1 lab2 lab3 使用各自ip
# 由於 1.11.0 ipvs 在centos7上有bug無法正常使用所有本實驗使用 iptables 模式

注意:這次用的 iptables  因為用的centos7.4
cat >/etc/kubernetes/proxy<<EOF
KUBECONFIG="--kubeconfig=/etc/kubernetes/kube-proxy.conf"
KUBE_PROXY_ARGS="--bind-address=10.1.1.8 --proxy-mode=iptables --hostname-override=10.1.1.8 --cluster-cidr=10.244.0.0/16"
EOF

# 啟動
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy








設置集群角色
# 設置 lab1 為 master
kubectl label nodes 10.1.1.8 node-role.kubernetes.io/master=

# 設置 lab2 lab3 為 node
kubectl label nodes 10.1.1.68 node-role.kubernetes.io/node=
kubectl label nodes 10.1.1.111 node-role.kubernetes.io/node=

# 設置 master 一般情況下不接受負載
kubectl taint nodes 10.1.1.8 node-role.kubernetes.io/master=true:NoSchedule

master運行pod
kubectl taint nodes master.k8s node-role.kubernetes.io/master-
master不運行pod
kubectl taint nodes master.k8s node-role.kubernetes.io/master=:NoSchedule



# 查看節點
# 此時節點狀態為 NotReady
kubectl get no








配置使用flannel網絡
在lab1操作
注意下面的網卡名稱要填寫對應的網卡名稱

# 下載配置
mkdir flannel && cd flannel
wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml

# 修改配置
# 此處的ip配置要與上面kubeadm的pod-network一致
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }

# 修改鏡像
image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64

# 如果Node有多個網卡的話,參考flannel issues 39701,
# https://github.com/kubernetes/kubernetes/issues/39701
# 目前需要在kube-flannel.yml中使用--iface參數指定集群主機內網網卡的名稱,
# 否則可能會出現dns無法解析。容器無法通信的情況,需要將kube-flannel.yml下載到本地,
# flanneld啟動參數加上--iface=<iface-name>
    containers:
      - name: kube-flannel
        image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=eth1

# 啟動
kubectl apply -f kube-flannel.yml

# 查看
kubectl get pods -n kube-system
kubectl get svc -n kube-system

# 查看節點狀態
# 當 flannel pod 全部啟動之后,節點狀態為 Ready
kubectl get no








配置使用coredns
在lab1操作

注意下面用的 1.2.0  

# 安裝
# 10.96.0.10 kubelet中配置的dns
cd $HOME && mkdir coredns && cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
./deploy.sh -i 10.96.0.10 > coredns.yaml
kubectl apply -f coredns.yml

注意: 查看10.96.0.10 是否添加到文檔里面



# 查看
kubectl get pods -n kube-system
kubectl get svc -n kube-system





測試
啟動
kubectl run nginx --replicas=2 --image=nginx:alpine --port=80
kubectl expose deployment nginx --type=NodePort --name=example-service-nodeport
kubectl expose deployment nginx --name=example-service
kubectl scale --replicas=3 deployment/nginx
查看狀態
kubectl get deploy -o wide
kubectl get pods -o wide
kubectl get svc -o wide
kubectl describe svc example-service




DNS解析
kubectl run -it --rm --image=infoblox/dnstools dns-client
nslookup kubernetes
nslookup example-service
curl example-service


訪問測試
# 10.96.59.56 為查看svc時獲取到的clusterip
curl "10.107.91.153:80"

# 32223 為查看svc時獲取到的 nodeport
http://10.1.1.8:32223/
http://10.1.1.68:32223/
http://10.1.1.111:32223/


清理
kubectl delete svc example-service example-service-nodeport
kubectl delete deploy nginx curl



特別注意:


1、安裝完后沒有發現 etcd.conf

 
         

后來問上海will, 說沒有影響,但別人說需要加

 

2、文檔中的所有 export 都是臨時的,重啟后就會失效,線上都要寫死。

 
         

export INTERNAL_IP=$(hostname -i | awk '{print $NF}')

 
         

# 配置啟動腳本
export ETCD_NAME=$(hostname)
export INTERNAL_IP=$(hostname -i | awk '{print $NF}')
export ECTD_CLUSTER='lab1=https://10.1.1.8:2380,lab2=https://10.1.1.68:2380,lab3=https://10.1.1.111:2380'
mkdir -pv /data/etcd
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

 
         

[Service]
Type=notify
WorkingDirectory=/data/etcd
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/local/etcd/bin/etcd \\
--name lab1 \\
--cert-file=/etc/etcd/ssl/etcd.pem \\
--key-file=/etc/etcd/ssl/etcd-key.pem \\
--peer-cert-file=/etc/etcd/ssl/etcd.pem \\
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \\
--trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
--peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \\
--initial-advertise-peer-urls https://10.1.1.8:2380 \\
--listen-peer-urls https://10.1.1.8:2380 \\
--listen-client-urls https://10.1.1.8:2379,https://127.0.0.1:2379 \\
--advertise-client-urls https://10.1.1.8:2379 \\
--initial-cluster-token my-etcd-token \\
--initial-cluster lab1=https://10.1.1.8:2380,lab2=https://10.1.1.68:2380,lab3=https://10.1.1.111:2380 \\
--initial-cluster-state new \\
--data-dir=/data/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

 
         

[Install]
WantedBy=multi-user.target
EOF




參照 http://www.maogx.win/

https://juejin.im/user/59ffa2836fb9a0451c39c64f/posts
https://k8smeetup.github.io/docs/concepts/



 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM