master端下載kubernetes組件:
wget https://storage.googleapis.com/kubernetes-release/release/v1.9.2/kubernetes-server-linux-amd64.tar.gz
node端下工kubernetes node組件:
wget https://dl.k8s.io/v1.9.2/kubernetes-node-linux-amd64.tar.gz
部署master組件
master操作:
把二制文件移動到bin下
[root@master bin]# pwd /root/master_pkg/kubernetes/server/bin [root@master bin]# cp kube-controller-manager kube-scheduler kube-apiserver /opt/kubernetes/bin/ [root@master bin]# chmod +x /opt/kubernetes/bin/*
添加apiserver.sh腳本

#!/bin/bash MASTER_ADDRESS=${1:-"192.168.1.195"} ETCD_SERVERS=${2:-"http://127.0.0.1:2379"} cat <<EOF >/opt/kubernetes/cfg/kube-apiserver KUBE_APISERVER_OPTS="--logtostderr=true \\ --v=4 \\ --etcd-servers=${ETCD_SERVERS} \\ --insecure-bind-address=127.0.0.1 \\ --bind-address=${MASTER_ADDRESS} \\ --insecure-port=8080 \\ --secure-port=6443 \\ --advertise-address=${MASTER_ADDRESS} \\ --allow-privileged=true \\ --service-cluster-ip-range=10.10.10.0/24 \\ --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \ --authorization-mode=RBAC,Node \\ --kubelet-https=true \\ --enable-bootstrap-token-auth \\ --token-auth-file=/opt/kubernetes/cfg/token.csv \\ --service-node-port-range=30000-50000 \\ --tls-cert-file=/opt/kubernetes/ssl/server.pem \\ --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\ --client-ca-file=/opt/kubernetes/ssl/ca.pem \\ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --etcd-cafile=/opt/kubernetes/ssl/ca.pem \\ --etcd-certfile=/opt/kubernetes/ssl/server.pem \\ --etcd-keyfile=/opt/kubernetes/ssl/server-key.pem" EOF cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kube-apiserver systemctl restart kube-apiserver
執行apiserver.sh腳本:
[root@master bin]# ./apiserver.sh 192.168.1.101 https://192.168.1.101:2379,https://192.168.1.102:2379,https://192.168.1.103:2379 Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
將token.csv放到cfg目錄下
cp /opt/kubernetes/ssl/token.csv /opt/kubernetes/cfg/
啟動kube-apiserver
[root@master bin]# systemctl start kube-apiserver
添加controller-manager.sh控制器腳本

#!/bin/bash MASTER_ADDRESS=${1:-"127.0.0.1"} cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\ --v=4 \\ --master=${MASTER_ADDRESS}:8080 \\ --leader-elect=true \\ --address=127.0.0.1 \\ --service-cluster-ip-range=10.10.10.0/24 \\ --cluster-name=kubernetes \\ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\ --root-ca-file=/opt/kubernetes/ssl/ca.pem" EOF cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kube-controller-manager systemctl restart kube-controller-manager
執行腳本:
[root@master bin]# ./controller-manager.sh 127.0.0.1
查看服務是否啟動
[root@master bin]# ps -ef | grep controller-manager root 16464 1 10 14:34 ? 00:00:01 /opt/kubernetes/bin/kube-controller-manager --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect=true --address=127.0.0.1 --service-cluster-ip-range=10.10.10.0/24 --cluster-name=kubernetes --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem
添加scheduler.sh腳本

#!/bin/bash MASTER_ADDRESS=${1:-"127.0.0.1"} cat <<EOF >/opt/kubernetes/cfg/kube-scheduler KUBE_SCHEDULER_OPTS="--logtostderr=true \\ --v=4 \\ --master=${MASTER_ADDRESS}:8080 \\ --leader-elect" EOF cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kube-scheduler systemctl restart kube-scheduler
執行腳本
[root@master bin]# ./scheduler.sh 127.0.0.1
查看服務是否啟動
[root@master bin]# ps -ef | grep scheduler root 16531 1 4 14:37 ? 00:00:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
查看節點狀態
[root@master bin]# kubectl get cs NAME STATUS MESSAGE ERROR controller-manager Healthy ok scheduler Healthy ok etcd-0 Healthy {"health": "true"} etcd-2 Healthy {"health": "true"} etcd-1 Healthy {"health": "true"}
部署node節點
將master節點生成的kubeconfig文件傳到兩個節點的cfg目錄下
/opt/kubernetes/ssl [root@master ssl]# scp *kubeconfig root@192.168.1.102:/opt/kubernetes/cfg/ [root@master ssl]# scp *kubeconfig root@192.168.1.103:/opt/kubernetes/cfg/
node1節點操作:
解壓kubernetes-node-linux-amd64.tar.gz 包
[root@node1 node_pkg]# tar xvf kubernetes-node-linux-amd64.tar.gz
將解壓出來的二制移到bin下
[root@node1 bin]# cp kubelet kube-proxy /opt/kubernetes/bin/
[root@node1 bin]# chmod +x /opt/kubernetes/bin/*
添加kubelet.sh腳本

#!/bin/bash NODE_ADDRESS=${1:-"192.168.1.196"} DNS_SERVER_IP=${2:-"10.10.10.2"} cat <<EOF >/opt/kubernetes/cfg/kubelet KUBELET_OPTS="--logtostderr=true \\ --v=4 \\ --address=${NODE_ADDRESS} \\ --hostname-override=${NODE_ADDRESS} \\ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\ --cert-dir=/opt/kubernetes/ssl \\ --allow-privileged=true \\ --cluster-dns=${DNS_SERVER_IP} \\ --cluster-domain=cluster.local \\ --fail-swap-on=false \\ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0" EOF cat <<EOF >/usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet After=docker.service Requires=docker.service [Service] EnvironmentFile=-/opt/kubernetes/cfg/kubelet ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS Restart=on-failure KillMode=process [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kubelet systemctl restart kubelet
執行腳本
[root@node1 bin]# ./kubelet.sh 192.168.0.102 10.10.10.2 Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service. 備注:192.168.0.102為你當前節點的IP 10.10.10.2為你的DNS地址
查看kubelete是否啟動
發現有錯誤日志,創建證權限拒絕
error: failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelet-bootstrap" cannot create certificatesigningrequests.certificates.k8s.io at the cluster scope: clusterrole.rbac.authorization.k8s.io "system:node-bootstrap" not found
解決方法
在master端創建權限分配角色
[root@master ssl]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
node節點再次啟動kubelet
創建proxy.sh腳本

#!/bin/bash NODE_ADDRESS=${1:-"192.168.1.200"} cat <<EOF >/opt/kubernetes/cfg/kube-proxy KUBE_PROXY_OPTS="--logtostderr=true \ --v=4 \ --hostname-override=${NODE_ADDRESS} \ --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig" EOF cat <<EOF >/usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Proxy After=network.target [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kube-proxy systemctl restart kube-proxy
執行腳本
[root@node1 ssl]# ./proxy.sh 192.168.1.102 備注:192.168.1.102是當前節點的地址
在master查看節點請求信息:
[root@master ssl]# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-iVbj9CKPaWhh7VAQfqK16Xz9in4-Byb_XZaDJLz3zfw 11m kubelet-bootstrap Pending
允許自簽證書請求連接
[root@master ssl]# kubectl certificate approve node-csr-iVbj9CKPaWhh7VAQfqK16Xz9in4-Byb_XZaDJLz3zfw
再次查看連接:
[root@master ssl]# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-iVbj9CKPaWhh7VAQfqK16Xz9in4-Byb_XZaDJLz3zfw 14m kubelet-bootstrap Approved,Issued
查看Node為准備狀態
[root@master ssl]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.1.102 Ready <none> 1m v1.9.2
node2節點操作:
把Node1節點的文個拷到node2節點,或者重復node1節點步驟
[root@node1 ssl]# scp -r /opt/kubernetes/bin root@192.168.1.103:/opt/kubernetes [root@node1 ssl]# scp -r /opt/kubernetes/cfg root@192.168.1.103:/opt/kubernetes
[root@node1 ssl]# scp /usr/lib/systemd/system/kubelet.service root@192.168.1.103:/usr/lib/systemd/system/
[root@node1 ssl]# scp /usr/lib/systemd/system/kube-proxy.service root@192.168.1.103:/usr/lib/systemd/system/
修改node2節點cfg下kubelet配置文件的ip改為當前節點IP
KUBELET_OPTS="--logtostderr=true \ --v=4 \ --address=192.168.1.103 \ --hostname-override=192.168.1.103 \ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \ --cert-dir=/opt/kubernetes/ssl \ --allow-privileged=true \ --cluster-dns=10.10.10.2 \ --cluster-domain=cluster.local \ --fail-swap-on=false \ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
修改node2節點cfg下kube-proxy配置文件的ip改為當前節點IP
KUBE_PROXY_OPTS="--logtostderr=true --v=4 --hostname-override=192.168.1.103 --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
啟動服務
[root@node2 cfg]# systemctl start kubelet
[root@node2 cfg]# systemctl start kube-proxy
master節點查看是否有請求
[root@master ssl]# kubectl get csr NAME AGE REQUESTOR CONDITION node-csr-OPWss8__QdJqP6QmudtkaVQWeDh278BxzP35hdeAkZI 17s kubelet-bootstrap Pending node-csr-iVbj9CKPaWhh7VAQfqK16Xz9in4-Byb_XZaDJLz3zfw 28m kubelet-bootstrap Approved,Issued
允許自簽證書連接
[root@master ssl]# kubectl certificate approve node-csr-OPWss8__QdJqP6QmudtkaVQWeDh278BxzP35hdeAkZI
查看節點
[root@master ssl]# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.1.102 Ready <none> 15m v1.9.2 192.168.1.103 Ready <none> 12s v1.9.2
測試示例
創建nginx實例:
[root@master ssl]# kubectl run nginx --image=nginx --replicas=3
查看Pod
[root@master ssl]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-8586cf59-7r4zq 0/1 ContainerCreating 0 10s nginx-8586cf59-9wpwr 0/1 ContainerCreating 0 10s nginx-8586cf59-h2n5h 0/1 ContainerCreating 0 10s
查看資源對象
[root@master ssl]# kubectl get all NAME READY STATUS RESTARTS AGE pod/nginx-8586cf59-7r4zq 0/1 ContainerCreating 0 1m pod/nginx-8586cf59-9wpwr 0/1 ContainerCreating 0 1m pod/nginx-8586cf59-h2n5h 0/1 ContainerCreating 0 1m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/kubernetes ClusterIP 10.10.10.1 <none> 443/TCP 1h NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deployment.extensions/nginx 3 3 3 0 1m NAME DESIRED CURRENT READY AGE replicaset.extensions/nginx-8586cf59 3 3 0 1m NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE deployment.apps/nginx 3 3 3 0 1m NAME DESIRED CURRENT READY AGE replicaset.apps/nginx-8586cf59 3 3 0 1m
查看容器運行在哪個節點
[root@master ssl]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE nginx-8586cf59-7r4zq 0/1 ImagePullBackOff 0 7m 172.17.47.2 192.168.1.103 nginx-8586cf59-9wpwr 1/1 Running 0 7m 172.17.47.3 192.168.1.103 nginx-8586cf59-h2n5h 1/1 Running 0 7m 172.17.45.2 192.168.1.102
對外發布一個服務
[root@master ssl]# kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
[root@master ssl]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.10.10.1 <none> 443/TCP 2h nginx NodePort 10.10.10.130 <none> 88:34986/TCP 13s
備注:88端口是提供node節點訪問
34986為隨機端口,外問該問
在node節點訪問88這個端口
[root@node1 ssl]# curl -I 10.10.10.130:88 HTTP/1.1 200 OK Server: nginx/1.15.2 Date: Wed, 08 Aug 2018 08:54:09 GMT Content-Type: text/html Content-Length: 612 Last-Modified: Tue, 24 Jul 2018 13:02:29 GMT Connection: keep-alive ETag: "5b572365-264" Accept-Ranges: bytes