
項目地址:https://github.com/easzlab/kubeasz
#:先配置harbor #:利用腳本安裝docker root@k8s-harbor1:~# vim docker_install.sh #!/bin/bash sudo apt-get update sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable" sudo apt-get -y update sudo apt install -y docker-ce=5:18.09.9~3-0~ubuntu-bionic docker-ce-cli=5:18.09.9~3-0~ubuntu-bionic root@k8s-harbor1:~# bash docker_install.sh #:配置加速器 root@k8s-harbor1:~# sudo mkdir -p /etc/docker root@k8s-harbor1:~# sudo tee /etc/docker/daemon.json <<-'EOF' > { > "registry-mirrors": ["https://5zw40ihv.mirror.aliyuncs.com"] > } > EOF { "registry-mirrors": ["https://5zw40ihv.mirror.aliyuncs.com"] } root@k8s-harbor1:~# sudo systemctl daemon-reload root@k8s-harbor1:~# sudo systemctl restart docker #:安裝docker-compose root@k8s-harbor1:~# apt install -y docker-compose #:下載harbor包,解壓並做軟連接 root@k8s-harbor1:/usr/local/src# ls harbor-offline-installer-v1.7.5.tgz root@k8s-harbor1:/usr/local/src# tar xf harbor-offline-installer-v1.7.5.tgz root@k8s-harbor1:/usr/local/src# ln -sv /usr/local/src/harbor /usr/local/harbor #:在准備證書,為harbor配置中准備的 root@k8s-harbor1:/usr/local/harbor# mkdir /usr/local/src/harbor/certs #:准備一個放證書的目錄 root@k8s-harbor1:/usr/local/harbor# cd /usr/local/src/harbor/certs root@k8s-harbor1:/usr/local/src/harbor/certs# openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key 2048 #:生成私有key root@k8s-harbor1:/usr/local/src/harbor/certs# openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj "/CN=harbor.magedu.net" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt #:注意改域名,這個名字harbor配置中的hostname一定要一樣, 生成自簽名證書,在ubuntu系統會以下錯誤 Can't load /root/.rnd into RNG 139879360623040:error:2406F079:random number generator:RAND_load_file:Cannot open file:../crypto/rand/randfile.c:88:Filename=/root/.rnd #:根據提示創建這個文件,再次執行即可 root@k8s-harbor1:/usr/local/src/harbor/certs# touch /root/.rnd #:修改harbor配置文件 root@k8s-harbor1:/usr/local/src/harbor/certs# cd /usr/local/harbor root@k8s-harbor1:/usr/local/harbor# vim harbor.cfg hostname = harbor.magedu.net ui_url_protocol = https #:此處要使用https協議 ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key #:此處就寫上步生成的證書 harbor_admin_password = 123456 #:harbor的登錄密碼 #:開始安裝harbor root@k8s-harbor1:/usr/local/harbor# ./install.sh #:測試
#:配置master1可以上傳和拉取鏡像 #:先利用腳本安裝docker root@k8s-master1:~# bash docker_install.sh #:創建一個以harbor訪問名相同的目錄(必須要同訪問名相同)放證書,否則不能上傳和下載鏡像 root@k8s-master1:~# mkdir /etc/docker/certs.d/harbor.magedu.net -p #:將harbor的公鑰拷貝到要上傳鏡像的服務器 root@k8s-harbor1:~# scp /usr/local/src/harbor/certs/harbor-ca.crt 192.168.5.101:/etc/docker/certs.d/harbor.magedu.ne #:重啟docker root@k8s-master1:~# systemctl restart docker #:配置域名解析 root@k8s-master1:~# vim /etc/hosts 192.168.5.103 harbor.magedu.net #:登錄測試 root@k8s-master1:~# docker login harbor.magedu.net #:在harbor的web端創建一個項目,設置成公開 #:下載一個小鏡像,修改tag,上傳一下測試 root@k8s-master1:~# docker pull alpine root@k8s-master1:~# docker tag 961769676411 harbor.magedu.net/linux37/alpine:v1 root@k8s-master1:~# docker push harbor.magedu.net/linux37/alpine:v1 #:配置master2可以上傳和拉取鏡像 #:在master2 利用腳本安裝docker root@k8s-master2:~# bash docker_install.sh #:因為只有master2上傳鏡像,所以我們手動將認證文件和證書傳到master2上 root@k8s-master1:~# scp -r /root/.docker 192.168.5.102:/root #:利用腳本將master1的公鑰拷貝到master2,etcd,node節點,實現免秘鑰登錄 root@k8s-master1:~# vim scp.sh #!/bin/bash IP=" 192.168.5.101 192.168.5.102
192.168.5.104 192.168.5.105 192.168.5.106 192.168.5.107 192.168.5.108 192.168.5.109 " for node in ${IP};do sshpass -p centos ssh-copy-id ${node} -o StrictHostKeyChecking=no if [ $? -eq 0 ];then echo "${node} 秘鑰拷貝完成" else echo "${node} 秘鑰拷貝失敗" fi done #:安裝sshpass命令 root@k8s-master1:~# apt install sshpass #:在master1上生成秘鑰對 root@k8s-master1:~# ssh-keygen #:執行腳本 root@k8s-master1:~# bash scp.sh #:再次修改腳本,將證書文件,認證文件,資源限制,拷貝到各主機 root@k8s-master1:~# vim scp.sh #!/bin/bash IP=" 192.168.5.102
192.168.5.104 192.168.5.105 192.168.5.106 192.168.5.107 192.168.5.108 192.168.5.109 " for node in ${IP};do # sshpass -p centos ssh-copy-id ${node} -o StrictHostKeyChecking=no # if [ $? -eq 0 ];then # echo "${node} 秘鑰拷貝完成" # else # echo "${node} 秘鑰拷貝失敗" # fi scp docker_install.sh ${node}:/root scp -r /etc/docker/certs.d ${node}:/etc/docker scp /etc/hosts ${node}:/etc/ scp /etc/security/limits.conf ${node}:/etc/security/limits.conf scp /etc/sysctl.conf ${node}:/etc/sysctl.conf ssh ${node} "reboot" echo "${node} 重啟成功" done
#:優化參數
root@k8s-master1:~# vim /etc/sysctl.conf
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_recycle = 0
root@k8s-master1:~# vim /etc/security/limits.conf
* soft core unlimited
* hard core unlimited
* soft nproc 1000000
* hard nproc 1000000
* soft nofile 1000000
* hard nofile 1000000
* soft memlock 32000
* hard memlock 32000
* soft msgqueue 8192000
* hard msgqueue 8192000
#:重啟自己 root@k8s-master1:~# reboot
#:配置haproxy+keepalived #:安裝haproxy和keepalive的 root@k8s-ha1:~# apt install -y haproxy keepalived #:配置keepalive的 root@k8s-ha1:~# find / -name keepalived.conf* root@k8s-ha1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf root@k8s-ha1:~# vim /etc/keepalived/keepalived.conf virtual_ipaddress { 192.168.5.248 dev eth0 label eth0:0 } #:配置haproxy root@k8s-etcd3:~# vim /etc/haproxy/haproxy.cfg listen k8s-api-6443 bind 192.168.5.248:6443 mode tcp server 192.168.5.101 192.168.5.101:6443 check fall 3 rise 3 inter 3s server 192.168.5.102 192.168.5.102:6443 check fall 3 rise 3 inter 3s #:重啟服務 root@k8s-ha1:~# systemctl restart haproxy root@k8s-ha1:~# systemctl restart keepalived #:設置開機啟動 root@k8s-ha1:~# systemctl enable haproxy root@k8s-ha1:~# systemctl enable keepalived #:另外一台也用同樣的方法,然后測試
#:在master1上配置ansible #:安裝ansible root@k8s-master1:/etc/ansible# apt install -y ansible #:將項目clone下來,我們用的0.6.1.地址:https://github.com/easzlab/kubeasz/tree/0.6.1 root@k8s-master1:/etc/ansible# cd /opt/ root@k8s-master1:/opt# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git #:將ansible默認安裝的文件移走,然后將clone下來的所有文件移到ansible的配置中 root@k8s-master1:/opt# mv /etc/ansible/* /tmp #:注意此處如果沒有別的東西,可刪除 root@k8s-master1:/opt# cp -rf kubeasz/* /etc/ansible/ #:如果你的版本啟動的時候,需要改變參數,可以到一下目錄,修改 root@k8s-master1:/etc/ansible/roles/kube-master/templates# cd /etc/ansible/roles/kube-master/templates/ root@k8s-master1:/etc/ansible/roles/kube-master/templates# ls aggregator-proxy-csr.json.j2 kube-apiserver.service.j2 kube-controller-manager.service.j2 kube-scheduler.service.j2 basic-auth.csv.j2 kube-apiserver-v1.8.service.j2 kubernetes-csr.json.j2 #:我們選擇什么部署方式,是單節點還是多節點,我們是多節點 root@k8s-master1:/etc/ansible# cd /etc/ansible/ root@k8s-master1:/etc/ansible# ll example/ total 40 drwxr-xr-x 2 root root 4096 Oct 6 13:42 ./ drwxr-xr-x 10 root root 4096 Oct 6 13:42 ../ -rw-r--r-- 1 root root 2207 Oct 6 13:42 hosts.allinone.example -rw-r--r-- 1 root root 2241 Oct 6 13:42 hosts.allinone.example.en -rw-r--r-- 1 root root 2397 Oct 6 13:42 hosts.cloud.example -rw-r--r-- 1 root root 2325 Oct 6 13:42 hosts.cloud.example.en -rw-r--r-- 1 root root 2667 Oct 6 13:42 hosts.m-masters.example #;多節點部署 ,中文版 -rw-r--r-- 1 root root 2626 Oct 6 13:42 hosts.m-masters.example.en #;多節點部署,英文版 -rw-r--r-- 1 root root 2226 Oct 6 13:42 hosts.s-master.example -rw-r--r-- 1 root root 2258 Oct 6 13:42 hosts.s-master.example.en #:因為我們部署的是多節點,所以講多節點部署的文件拷貝到ansible下面 root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts
#:ansible部署k8s #;根據官方文檔配置 https://github.com/easzlab/kubeasz/blob/0.6.1/docs/setup/00-planning_and_overall_intro.md #:升級一下apt源 root@k8s-master1:/etc/ansible# apt-get update #:安裝python2.7 root@k8s-master1:/etc/ansible# apt-get install python2.7 #:做軟鏈接 root@k8s-master2:~# ln -s /usr/bin/python2.7 /usr/bin/python #:在node和etcd節點也安裝Python,並做軟鏈接 #:下載二進制文件K8S解壓到/etc/ansible/bin目錄 root@k8s-master1:/usr/local/src# tar xf k8s.1-13-5.tar.gz root@k8s-master1:/usr/local/src# ls bin k8s.1-13-5.tar.gz root@k8s-master1:/usr/local/src# mv bin/* /etc/ansible/bin/ #:測試一下,必須可以打出當前版本 root@k8s-master1:/etc/ansible/bin# ./kube-apiserver --version Kubernetes v1.13.5 #:退出目錄,根據情況修改hosts root@k8s-master1:/etc/ansible/bin# cd .. root@k8s-master1:/etc/ansible# vim hosts #:這個就是選的那個部署方式,改的名 [deploy] 192.168.5.101 NTP_ENABLED=no #:本機的IP # etcd集群請提供如下NODE_NAME,注意etcd集群必須是1,3,5,7...奇數個節點 [etcd] 192.168.5.104 NODE_NAME=etcd1 192.168.5.105 NODE_NAME=etcd2 192.168.5.106 NODE_NAME=etcd3 [new-etcd] # 預留組,后續添加etcd節點使用 #192.168.1.x NODE_NAME=etcdx [kube-master] 192.168.5.101 [new-master] # 預留組,后續添加master節點使用 192.168.5.102 #:這個是故意留出來的,后期測試添加節點 [kube-node] 192.168.5.108 [new-node] # 預留組,后續添加node節點使用 192.168.5.109 K8S_VER="v1.13" #:這個要注意版本號 MASTER_IP="192.168.5.248" #:這個是VIP地址 KUBE_APISERVER="https://{{ MASTER_IP }}:6443" #:注意這個是6443 CLUSTER_NETWORK="calico" #;我們用的calico網絡 SERVICE_CIDR="10.20.0.0/16" #;service 的網段,注意不要和內網沖突 CLUSTER_CIDR="172.31.0.0/16" #:這個是分配給容器的網段 CLUSTER_KUBERNETES_SVC_IP="10.20.0.1" #:上面service定義的第一個網段 CLUSTER_DNS_SVC_IP="10.20.254.254" #:DNS的網段,我們用的service最后一個網段 CLUSTER_DNS_DOMAIN="linux37.local." #:DNS的域名 BASIC_AUTH_USER="admin" BASIC_AUTH_PASS="123456" #:集群的密碼 bin_dir="/usr/bin" #:注意這個一般放這個文件,不然執行時候還要修改
#:測試一下
root@k8s-master1:/etc/ansible# ansible all -m ping
#:根據官網分布安裝 root@k8s-master1:/etc/ansible# ansible-playbook 01.prepare.yml #:執行02的時候,如果想換版本,就去下載高點的版本,然后解壓 root@k8s-master1:/opt# tar xf etcd-v3.3.15-linux-amd64.tar.gz #:進到解壓目錄,測試一下 root@k8s-master1:/opt/etcd-v3.3.15-linux-amd64# ./etcd --version #:然后將可執行文件移到ansible root@k8s-master1:/opt/etcd-v3.3.15-linux-amd64# mv etcd* /etc/ansible/bin/ #:開始部署02 root@k8s-master1:/etc/ansible# ansible-playbook 02.etcd.yml #:在任何一個etcd服務器執行一下命令,驗證etcd服務(必須返回successfully) root@k8s-etcd1:~# export NODE_IPS="192.168.5.104 192.168.5.105 192.168.5.106" root@k8s-etcd1:~# for ip in ${NODE_IPS}; do ETCDCTL_API=3 /usr/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem endpoint health;done https://192.168.5.104:2379 is healthy: successfully committed proposal: took = 10.453066ms https://192.168.5.105:2379 is healthy: successfully committed proposal: took = 11.483075ms https://192.168.5.106:2379 is healthy: successfully committed proposal: took = 11.542092ms #:因為docker我們已經裝好了 ,所以03就不用做了 #:開始部署04 root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml #:找一台主機測試VIP的6443通不通 root@k8s-harbor1:~# telnet 192.168.5.248 6443 #:現在就可以在master1上get node了,查看狀態是不是ready root@k8s-master1:/etc/ansible# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.5.101 Ready,SchedulingDisabled master 2m9s v1.13.5 #:開始部署05(將node節點添加到master) root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml TASK [kube-node : 開啟kubelet 服務] ***************************************************************************************************** fatal: [192.168.5.108]: FAILED! => {"changed": true, "cmd": "systemctl daemon-reload && systemctl restart kubelet", "delta": "0:00:00.249926", "end": "2019-10-06 15:40:48.272879", "msg": "non-zero return code", "rc": 5, "start": "2019-10-06 15:40:48.022953", "stderr": "Failed to restart kubelet.service: Unit docker.service not found.", "stderr_lines": ["Failed to restart kubelet.service: Unit docker.service not found."], "stdout": "", "stdout_lines": []} #:此時會報錯,因為node節點沒有安裝docker(這次在node1和2都安裝docker) root@k8s-node1:~# bash docker_install.sh #:在此執行 root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml #:查看 root@k8s-master1:/etc/ansible# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.5.101 Ready,SchedulingDisabled master 18m v1.13.5 192.168.5.108 Ready node 17s v1.13.5 #:開始部署06 (網絡組件) #:我們需要准備鏡像,准備哪些可以查看你安裝那個版本的calico就去查看那個版本(我們這個是裝的3.4的,具體看task/default/main.yml定義的版本) root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2 #:在這個里面搜索image,然后找到需要下載的鏡像 #:找到后再GitHub上查找calico3.4最新版本的下載下來 #;然后kublet也需要一個鏡像 root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2 --pod-infra-container-image={{ SANDBOX_IMAGE }} \ #:他是用變量顯示的,我們查找一下這個鏡像在哪里 root@k8s-master1:/etc/ansible# grep pod-infra-container-image* ./* -R root@k8s-master1:/etc/ansible# grep mirrorgooglecontainers* ./* -R ./roles/kube-node/defaults/main.yml:SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1" #:這樣我們就查到他在哪里了,打開文件 root@k8s-master1:/etc/ansible# vim ./roles/kube-node/defaults/main.yml SANDBOX_IMAGE: "mirrorgooglecontainers/pause-amd64:3.1" #:然后我們找一台主機,將這個鏡像下下來,然后修改tag號,傳到harbor root@k8s-node1:~# docker pull mirrorgooglecontainers/pause-amd64:3.1 root@k8s-node1:~# docker tag mirrorgooglecontainers/pause-amd64:3.1 harbor.magedu.net/linux37/pause-amd64:3.1 root@k8s-node1:~# docker push harbor.magedu.net/linux37/pause-amd64:3.1 #:在master主機改掉鏡像地址 root@k8s-master1:/etc/ansible# vim ./roles/kube-node/defaults/main.yml SANDBOX_IMAGE: "harbor.magedu.net/linux37/pause-amd64:3.1" #:然后重新執行一下 root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml #:在node節點查看 root@k8s-node1:~# ps aux |grep kubelet --pod-infra-container-image=harbor.magedu.net/linux37/pause-amd64:3.1 #:然后將master的也改掉 root@k8s-master1:/etc/ansible# vim /etc/systemd/system/kubelet.service --pod-infra-container-image=harbor.magedu.net/linux37/pause-amd64:3.1 \ --max-pods=110 \ #:注意這個在生產環境一定要改大點,這個就是一個master起多少容器 #:然后重啟 root@k8s-master1:/etc/ansible# systemctl daemon-reload root@k8s-master1:/etc/ansible# systemctl restart kubelet #;查看 root@k8s-master1:/etc/ansible# kubectl get nodes #:然后還繼續准備網絡的鏡像 #:將下載好的calico包傳到服務器,並解壓,解壓后會出現三個鏡像 root@k8s-master1:/opt# tar xf release-v3.4.4_\(1\).tgz root@k8s-master1:/opt# cd release-v3.4.4/ root@k8s-master1:/opt/release-v3.4.4# cd images/ #:先將ini的鏡像導進來,改tag,傳到harbor root@k8s-master1:/opt/release-v3.4.4/images# docker load -i calico-cni.tar root@k8s-master1:/opt/release-v3.4.4/images# docker tag f5e5bae3eb87 harbor.magedu.net/linux37/calico-cni:v3.4.4 root@k8s-master1:/opt/release-v3.4.4/images# docker push harbor.magedu.net/linux37/calico-cni:v3.4.4 #:然后修改鏡像地址 root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2 - name: install-cni image: harbor.magedu.net/linux37/calico-cni:v3.4.4 #:將node鏡像導進來,改tag,傳到harbor root@k8s-master1:/opt/release-v3.4.4/images# docker load -i calico-node.tar root@k8s-master1:/opt/release-v3.4.4/images# docker tag a8dbf15bbd6f harbor.magedu.net/linux37/calico-node:v3.4.4 root@k8s-master1:/opt/release-v3.4.4/images# docker push harbor.magedu.net/linux37/calico-node:v3.4.4 #:然后修改鏡像地址 root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2 - name: calico-node image: harbor.magedu.net/linux37/calico-node:v3.4.4 #:將kubee鏡像導進來,改tag,傳到harbor root@k8s-master1:/opt/release-v3.4.4/images# docker load -i calico-kube-controllers.tar root@k8s-master1:/opt/release-v3.4.4/images# docker tag 0030ff291350 harbor.magedu.net/linux37/calico-kube-controllers:v3.4.4 root@k8s-master1:/opt/release-v3.4.4/images# docker push harbor.magedu.net/linux37/calico-kube-controllers:v3.4.4 #:然后修改鏡像地址 root@k8s-master1:/etc/ansible# vim roles/calico/templates/calico-v3.4.yaml.j2 containers: - name: calico-kube-controllers image: harbor.magedu.net/linux37/calico-kube-controllers:v3.4.4 #:開始部署06 root@k8s-master1:/etc/ansible# ansible-playbook 06.network.yml #:查看 root@k8s-master1:/etc/ansible# calicoctl node status Calico process is running. IPv4 BGP status +---------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +---------------+-------------------+-------+----------+-------------+ | 192.168.5.108 | node-to-node mesh | up | 08:57:09 | Established | +---------------+-------------------+-------+----------+-------------+
#:添加node和master
#:首先在配置文件中寫好要添加的node root@k8s-master1:/etc/ansible# vim hosts [new-node] # 預留組,后續添加node節點使用 192.168.5.109 #:執行添加 root@k8s-master1:/etc/ansible# ansible-playbook 20.addnode.yml #:查看 root@k8s-master1:/etc/ansible# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.5.101 Ready,SchedulingDisabled master 93m v1.13.5 192.168.5.108 Ready node 75m v1.13.5 192.168.5.109 Ready node 62s v1.13.5 #:因為它安裝的docker不符合我們的版本,所以執行替換 root@k8s-master1:/etc/ansible# docker version Client: Version: 18.09.9 API version: 1.39 Go version: go1.11.13 Git commit: 039a7df9ba Built: Wed Sep 4 16:57:28 2019 OS/Arch: linux/amd64 Experimental: false Server: Docker Engine - Community Engine: Version: 18.09.9 API version: 1.39 (minimum version 1.12) Go version: go1.11.13 Git commit: 039a7df Built: Wed Sep 4 16:19:38 2019 OS/Arch: linux/amd64 Experimental: false root@k8s-master1:/etc/ansible# cp /usr/bin/docker* /etc/ansible/bin/ root@k8s-master1:/etc/ansible# cp /usr/bin/containerd* /etc/ansible/bin/ #:在添加會出錯,因為node已經添加過了,所以在配置文件刪掉重新執行 root@k8s-master1:/etc/ansible# vim hosts [new-node] # 預留組,后續添加node節點使用 192.168.5.109 #:再次執行 root@k8s-master1:/etc/ansible# ansible-playbook 20.addnode.yml #;檢查 root@k8s-master1:/etc/ansible# kubectl get nodes #:在node節點查看 root@k8s-node2:~# calicoctl node status Calico process is running. IPv4 BGP status +---------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +---------------+-------------------+-------+----------+-------------+ | 192.168.5.101 | node-to-node mesh | up | 09:13:07 | Established | | 192.168.5.108 | node-to-node mesh | up | 09:13:07 | Established | +---------------+-------------------+-------+----------+-------------+
#:添加master #:在配置文件寫好要添加的master root@k8s-master1:/etc/ansible# vim hosts [new-master] # 預留組,后續添加master節點使用 192.168.5.102 #:注釋lb選項 root@k8s-master1:/etc/ansible# vim 21.addmaster.yml # reconfigure and restart the haproxy service #- hosts: lb # roles: # - lb #;添加 root@k8s-master1:/etc/ansible# ansible-playbook 21.addmaster.yml #:檢測 root@k8s-master1:/etc/ansible# kubectl get node NAME STATUS ROLES AGE VERSION 192.168.5.101 Ready,SchedulingDisabled master 113m v1.13.5 192.168.5.102 Ready,SchedulingDisabled master 5m58s v1.13.5 192.168.5.108 Ready node 95m v1.13.5 192.168.5.109 Ready node 20m v1.13.5 #:在node節點檢測(必須要保證后面是establishd) root@k8s-node1:~# calicoctl node status Calico process is running. IPv4 BGP status +---------------+-------------------+-------+----------+-------------+ | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | +---------------+-------------------+-------+----------+-------------+ | 192.168.5.101 | node-to-node mesh | up | 08:57:10 | Established | | 192.168.5.109 | node-to-node mesh | up | 09:13:08 | Established | | 192.168.5.102 | node-to-node mesh | up | 09:22:28 | Established | +---------------+-------------------+-------+----------+-------------+ #:運行幾個容器檢測一下 root@k8s-master1:/etc/ansible# kubectl run net-test --image=alpine --replicas=4 sleep 36000 root@k8s-master1:/etc/ansible# kubectl get pod NAME READY STATUS RESTARTS AGE net-test-7d5ddd7497-9zmfs 1/1 Running 0 62s net-test-7d5ddd7497-l2b28 1/1 Running 0 62s net-test-7d5ddd7497-strk6 1/1 Running 0 62s net-test-7d5ddd7497-vwsh7 1/1 Running 0 62s #:查看pod的地址 root@k8s-master1:/etc/ansible# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES net-test-7d5ddd7497-9zmfs 1/1 Running 0 112s 172.31.58.65 192.168.5.108 <none> <none> net-test-7d5ddd7497-l2b28 1/1 Running 0 112s 172.31.58.66 192.168.5.108 <none> <none> net-test-7d5ddd7497-strk6 1/1 Running 0 112s 172.31.13.129 192.168.5.109 <none> <none> net-test-7d5ddd7497-vwsh7 1/1 Running 0 112s 172.31.13.130 192.168.5.109 <none> <none> #:進到容器測試一下 root@k8s-master1:/etc/ansible# kubectl exec -it net-test-7d5ddd7497-9zmfs sh / # ping 172.31.13.129 PING 172.31.13.129 (172.31.13.129): 56 data bytes 64 bytes from 172.31.13.129: seq=0 ttl=62 time=2.312 ms ^C --- 172.31.13.129 ping statistics --- 1 packets transmitted, 1 packets received, 0% packet loss round-trip min/avg/max = 2.312/2.312/2.312 ms / # ping 223.6.6.6 PING 223.6.6.6 (223.6.6.6): 56 data bytes 64 bytes from 223.6.6.6: seq=0 ttl=127 time=41.006 ms ^C --- 223.6.6.6 ping statistics --- 1 packets transmitted, 1 packets received, 0% packet loss round-trip min/avg/max = 41.006/41.006/41.006 ms
#:搭建DNS
#:我們將下載好的DNS鏡像傳到ansible專門放第三方軟件的目錄 root@k8s-master1:/etc/ansible/manifests# cd /etc/ansible/manifests/ #:創建一個DNS目錄 root@k8s-master1:/etc/ansible/manifests# mkdir dns #; 以為后期可能會講解兩種dns,因此在創建一個目錄,將文件放到此目錄 root@k8s-master1:/etc/ansible/manifests# cd dns root@k8s-master1:/etc/ansible/manifests/dns# mkdir kube-dns root@k8s-master1:/etc/ansible/manifests/dns# cd kube-dns/ root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# ll total 136996 drwxr-xr-x 3 root root 4096 Oct 6 21:34 ./ drwxr-xr-x 3 root root 4096 Oct 6 21:33 ../ -rw-r--r-- 1 root root 3983872 Oct 6 21:34 busybox-online.tar.gz -rw-r--r-- 1 root root 277 Oct 6 21:34 busybox.yaml drwxr-xr-x 2 root root 4096 Oct 6 21:34 heapster/ -rw-r--r-- 1 root root 41687040 Oct 6 21:34 k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz -rw-r--r-- 1 root root 51441152 Oct 6 21:34 k8s-dns-kube-dns-amd64_1.14.13.tar.gz -rw-r--r-- 1 root root 43140608 Oct 6 21:34 k8s-dns-sidecar-amd64_1.14.13.tar.gz -rw-r--r-- 1 root root 6305 Oct 6 21:34 kube-dns.yaml #:我們使用的是此目錄里面的kube-dns.yaml這個文件,和鏡像 root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml clusterIP: 10.20.254.254 #:這個就是DNS的地址,必須要和hosts設置的DNS地址一樣
#:然后我們將這個文件中的鏡像改一下
#:先導入目錄中的鏡像,並傳到harbor
#:先導入第一個,並傳到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 82f954458b31 harbor.magedu.net/linux37/k8s-dns-kube-dns-amd64:v1.14.13
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/k8s-dns-kube-dns-amd64:v1.14.13
#;修改文件中的image地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
containers:
- name: kubedns
image: harbor.magedu.net/linux37/k8s-dns-kube-dns-amd64:v1.14.13
#:導入第二個,並傳到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 7b15476a7228 harbor.magedu.net/linux37/k8s-dns-dnsmasq-nanny-amd64:v1.14.13
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/k8s-dns-dnsmasq-nanny-amd64:v1.14.13
#;修改文件中的image地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
- name: dnsmasq
image: harbor.magedu.net/linux37/k8s-dns-dnsmasq-nanny-amd64:v1.14.13
#:導入第三個,並傳到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 333fb0833870 harbor.magedu.net/linux37/k8s-dns-sidecar-amd64:v1.14.13
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/k8s-dns-sidecar-amd64:v1.14.13
#;修改文件中的image地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
- name: sidecar
image: harbor.magedu.net/linux37/k8s-dns-sidecar-amd64:v1.14.13
limits:
memory: 256Mi #:將這個也要改一下,我們生產中可以設置4個G
args:
- --domain=linux37.local #:這個要改成和ansible的host文件中的域名相同
- --server=/linux37.local/127.0.0.1#10053 #:這個要改成和ansible的host文件中的域名相同
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.linux37.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.linux37.local,5,SRV
#:創建dns服務
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f kube-dns.yaml
#:檢測
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod -n kube-system
#:利用busybox檢測DNS,先導入鏡像,傳到harbor
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker load -i busybox-online.tar.gz
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker tag 747e1d7f6665 harbor.magedu.net/linux37/busybox:latest
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# docker push harbor.magedu.net/linux37/busybox:latest
#:修改鏡像地址
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# vim busybox.yaml
spec:
containers:
- image: harbor.magedu.net/linux37/busybox:latest
#:創建鏡像
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl apply -f busybox.yaml
#:查看
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl get pod
#:檢測
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl get service --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.20.0.1 <none> 443/TCP 6h51m
kube-system kube-dns ClusterIP 10.20.254.254 <none> 53/UDP,53/TCP 17m
root@k8s-master1:/etc/ansible/manifests/dns/kube-dns# kubectl exec busybox nslookup kube-dns.kube-system.svc.linux37.local
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux37.local
Name: kube-dns.kube-system.svc.linux37.local
Address 1: 10.20.254.254 kube-dns.kube-system.svc.linux37.local
#:部署dashbord
#:下載dashbord,並解壓 root@k8s-master1:~# cd /etc/ansible/manifests/dashboard/ #:創建一個同dashbord同版本的目錄,將壓縮包移到此目錄 root@k8s-master1:/etc/ansible/manifests/dashboard# mkdir 1.10.1 root@k8s-master1:/etc/ansible/manifests/dashboard# mv kubernetes-dashboard-amd64-v1.10.1.tar.gz 1.10.1/ root@k8s-master1:/etc/ansible/manifests/dashboard# cd 1.10.1/ root@k8s-master1:/etc/ansible/manifests/dashboard/1.10.1# tar xf kubernetes-dashboard-amd64-v1.10.1.tar.gz
