1.7:k8s應用環境:
1.7.1:dashboard(1.10.1)
部署kubernetes的web管理界面dashboard
1.7.1.1:具體步驟:
1.導入dashboard鏡像並上傳至本地harbor服務器
# tar xvf dashboard-yaml_image-1.10.1.tar.gz
# docker load -i kubernetes-dashboard-amd64-v1.10.1.tar.gz
# docker tag gcr.io/google-containers/kubernetes-dashboard-amd64:v1.10.1 harbor1.dexter.com/baseimages/kubernetes-dashboard-amd64:v1.10.1
2.修改kubernetes-dashboard.yaml文件中的dashboard鏡像地址為本地harbor地址
root@ansible-vm1:~# cd /etc/ansible/manifests/dashboard/
root@ansible-vm1:/etc/ansible/manifests/dashboard# mkdir -pv 1.10.1
root@ansible-vm1:/etc/ansible/manifests/dashboard# cp ./*.yaml 1.10.1/
root@ansible-vm1:/etc/ansible/manifests/dashboard/1.10.1# vim kubernetes-dashboard.yaml
image:
harbor1.dexter.com/baseimages/kubernetes-dashboard-amd64:v1.10.1
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- port: 443
nodePort: 30001
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
修改此yaml文件為:
1).注釋掉Dashboard Secret ,不然后面訪問顯示網頁不安全,證書過期,我們自己生成證書。
2).因為我選擇nodeport訪問dashboard,所以將service type字段設置為nodeport,並指定nodeport為40000,如下圖
3.創建服務
# kubectl create -f .
serviceaccount/admin-user created
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
serviceaccount/dashboard-read-user created
clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created
clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created
clusterrole.rbac.authorization.k8s.io/ui-admin created
rolebinding.rbac.authorization.k8s.io/ui-admin-binding created
clusterrole.rbac.authorization.k8s.io/ui-read created
rolebinding.rbac.authorization.k8s.io/ui-read-binding created
4.驗證dashboard啟動完成:
# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-d59797cd7-dqkcd 1/1 Running 0 170m
calico-node-7nwn8 2/2 Running 2 2d
calico-node-9sdfq 2/2 Running 4 2d
calico-node-m9zkv 2/2 Running 6 2d
calico-node-tdzv6 2/2 Running 6 2d
kubernetes-dashboard-665997f648-zfqrk 1/1 Running 0 14m
# kubectl get service -n kube-system
root@k8s-m1:~# kubectl get service -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.20.112.241 <none> 443:30001/TCP 42m
# kubectl cluster-info #查看集群信息
# kubectl cluster-info
Kubernetes master is running at https://172.16.99.148:6443
kubernetes-dashboard is running at
https://172.16.99.148:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

直接使用node的IP加端口訪問,訪問地址如下:
相關報錯:
報錯類型如下

解決辦法:
生成證書
mkdir key && cd key
#生成證書
openssl genrsa -out dashboard.key 2048
#我這里寫的自己的node1節點,因為我是通過nodeport訪問的;如果通過apiserver訪問,可以寫成自己的master節點ip
openssl req -new -out dashboard.csr -key dashboard.key -subj '/CN=172.16.99.123'
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#刪除原有的證書secret
kubectl delete secret kubernetes-dashboard-certs -n kube-system
#創建新的證書secret
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
#查看pod
kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-d59797cd7-dqkcd 1/1 Running 0 155m
calico-node-7nwn8 2/2 Running 2 47h
calico-node-9sdfq 2/2 Running 4 47h
calico-node-m9zkv 2/2 Running 6 47h
calico-node-tdzv6 2/2 Running 6 2d
kubernetes-dashboard-665997f648-cb9jm 1/1 Running 0 27m
#重啟pod
kubectl delete pod kubernetes-dashboard-665997f648-cb9jm -n kube-system
再一次創建dashboard pod
kubectl apply -f kubernetes-dashboard.yaml

1.7.1.2:token登錄dashboard:
# kubectl -n kube-system get secret | grep admin-user
admin-user-token-gpqv8 kubernetes.io/service-account-token 3 61m
# kubectl -n kube-system describe secret admin-user-token-gpqv8
Name: admin-user-token-gpqv8
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 06b222ae-59ee-11ea-bc2b-fa163e62a670
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1346 bytes
namespace: 11 bytes
token:
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWdwcXY4Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIwNmIyMjJhZS01OWVlLTExZWEtYmMyYi1mYTE2M2U2MmE2NzAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.PC9rHsfA9KiCSGAUnw0HkhIEQYw9RCZltC09uxTzaEGpzG3zzLj82dqhyIrKbXRrOeQimzmgHSDPlXbNvZDO3KudFPXAVx7ZYpTAnGb76HSMIB9QWQFycog5Zne4dzNByt5PqwzlyAKlul-_yljP3ZX6zZyQW7ZDeB99OHx_8b_yCRkBfqAzJrm9ssCcYaUYIK870oI8a-6ozySUIn7jsFgFU7iAVM4B9-btQ0O37YlscJa6vPE7slB7AN3UfCaqnKUGdlnrQisJynIFhNawDEYe-LgCc1CQZICABYzMsuEB9X0IClSHjivg5tFPw6nDmIjT531WkUre_LP1lyDiVw
使用令牌登陸


注:我們不用nodeport,其實也是可以的。直接訪問kubectl cluster-info中的dashboard地址:
https://172.16.99.148:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy,不過要記得
如果你的k8s集群是運行在openstack中,防火牆一直是個阻礙,我們在openstack相關計算節點中粗暴的執行iptables -F命令后才能訪問到了如下頁面

1.7.1.3:Kubeconfig登錄
制作Kubeconfig文件
# kubectl -n kube-system get secret | grep admin
admin-user-token-zncmg kubernetes.io/service-account-token 3 19h
# kubectl -n kube-system describe secret admin-user-token-zncmg
Name: admin-user-token-zncmg
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 8bd97c05-5a00-11ea-bc2b-fa163e62a670
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1346 bytes
namespace: 11 bytes
token:
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXpuY21nIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4YmQ5N2MwNS01YTAwLTExZWEtYmMyYi1mYTE2M2U2MmE2NzAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.oVxI3GlCzRcMQoqnGqfyMBXWPO7oD0u9gOqsz8qiue2qIp-SISvyF9LyIznB_g0i3aXQHv_b-1Jr07NvH042aG02T_zJVC-_xC_WzvJ9xf_jyJkimFyjF6ZRwMsT6QJ0KaIcAxbhDCUD5MmcihQYg6EMtnYxkOFUn77eFJiaogslB-gVmeEz4EVsWPHX8NggXp8DA0gnLnQ2L6jq_zSoKNXe9synvj9LITo-6Zf2YrnmKhERVU2wqJxloI_VIzpQDQtYq9tdBUEiZ1ELdUCXw_2pYQ3qkphZiTXz8XoqUorwiB8xdjPHgI97e6tPLupyRljRkgbHwbKiHOWBiZD-4A
# cp /root/.kube/config /opt/kubeconfig
root@ansible-vm1:~/.kube# cat /opt/kubeconfig
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0akNDQXA2Z0F3SUJBZ0lVVDMwT2p1MVhJaE9HSVZrMVNIZklQdktWVHprd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1lURUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEV6QVJCZ05WQkFNVENtdDFZbVZ5CmJtVjBaWE13SGhjTk1qQXdNakkyTURNME5EQXdXaGNOTXpVd01qSXlNRE0wTkRBd1dqQmhNUXN3Q1FZRFZRUUcKRXdKRFRqRVJNQThHQTFVRUNCTUlTR0Z1WjFwb2IzVXhDekFKQmdOVkJBY1RBbGhUTVF3d0NnWURWUVFLRXdOcgpPSE14RHpBTkJnTlZCQXNUQmxONWMzUmxiVEVUTUJFR0ExVUVBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKCktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxwbGwyZkRzdklZcmZ5bC95RENkZCszdlRURytob00KejJ5S1ZHUnhUaXhuengrd1ZwRXhEWVg1dTcwVmFjNmVkVFZEaTQ4RjNRaDNnUUhtcFRQaGczT3poOEROdnJqVgpVUm91UTdBMU9MMG9KY2hDZVh1TGNrY3pkckpYVjhTSHA5TmlCZURPUGxIbnpxMFU3T0pHUThRY1hkcHNaUW12CmluN0M0bTMybVpqUVdlYTYydlJGWHowN1UwRzg1QmVSUXdBTS92eWdYMWdRWmNuRE5VbldRZjJkOTFhMkRCT2IKT2RLdTBFV0tRSlFVcEJSUWl2S2gxTkRwQ2xTamFvYjhENjROcG1LNmhzcDR5WjRrNEFpb0NZVnF2alVWWWVIbgpyOStPc1JmZmRNZ00xc3NSTXoyM1RwQytCVUlKQnFGa0J6aGkzc3h0Q1JjZUJQeVdEV3RXaTlrQ0F3RUFBYU5tCk1HUXdEZ1lEVlIwUEFRSC9CQVFEQWdFR01CSUdBMVVkRXdFQi93UUlNQVlCQWY4Q0FRSXdIUVlEVlIwT0JCWUUKRkpQZi9MTnZ0YzRTRFNQRDQ0Sm5UYXlSSXorY01COEdBMVVkSXdRWU1CYUFGSlBmL0xOdnRjNFNEU1BENDRKbgpUYXlSSXorY01BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQlVQVjdzcGszR24yNEZwYlhlZGJnQXRBeU9nUFdHClBONGtYbFdLZjRmQ2I0WVVOei9YVjlvLzdsL3NwR0NjcjR2U1ZsbVRQMWdVcGZPcVgyajJFSXBXSlAzMUttV1IKek1HL0hXQ0RNNlFLaUFkUDYxTWNtNThzTGtuelFaY25jQWNaNjRMdEREVU5DeWZlS21wMUI1U2pKaEovWXk1QwpEcktGbjhHWUJRR2NNRklFZXY1UExoYUIyR1k3cVBnb0pjVFo0Y0g1WmRIOGIyckR2WmlqMTF0RFZqNVErR1NHClM3NnU1UVJYYVc0WnAyd2J3WWVFTFg3QkpDVkNGL1ZOVENNWTRkbnk5eGhSYnRtQjFhSDlCajBCcUF0V2dsT08KeXA4b1V0MngveVZ3WDJOVUhZYmk5SE4zMnBMMzdWS2VEZlVwQkZnZmlKZ3phRFRTU0docy93engKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://172.16.99.148:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: admin
name: kubernetes
current-context: kubernetes
kind: Config
preferences: {}
users:
- name: admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQxVENDQXIyZ0F3SUJBZ0lVQnlkTVpQcDhKOVkwZDlOUE11M3lnUFIyb2JBd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1lURUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdUQ0VoaGJtZGFhRzkxTVFzd0NRWURWUVFIRXdKWQpVekVNTUFvR0ExVUVDaE1EYXpoek1ROHdEUVlEVlFRTEV3WlRlWE4wWlcweEV6QVJCZ05WQkFNVENtdDFZbVZ5CmJtVjBaWE13SGhjTk1qQXdNakkyTURVd01EQXdXaGNOTXpBd01qSXpNRFV3TURBd1dqQm5NUXN3Q1FZRFZRUUcKRXdKRFRqRVJNQThHQTFVRUNCTUlTR0Z1WjFwb2IzVXhDekFKQmdOVkJBY1RBbGhUTVJjd0ZRWURWUVFLRXc1egplWE4wWlcwNmJXRnpkR1Z5Y3pFUE1BMEdBMVVFQ3hNR1UzbHpkR1Z0TVE0d0RBWURWUVFERXdWaFpHMXBiakNDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTGhlOXQvNG91WTRBNys3VHNBVzYza3gKQ3JINzRyZnppWStnbEt3YWVCOXZKU3RBOWhVR2lOTGQzY040VS82VUF3eXVRSFhucWVOYVVKb0Ntb0dTR1MxbwphN1VOeHVWVEo2NXMrQnArSVNOQUFHMW1kRnhzbkk5MG9iWk5mem1XNWxnRDA5N3VvRnBXVFovZXVucG00NFQyCm50S053Wm8zVjJCZXVHYU9TRnV2WkdzWUpjbDNSYW5XK1QzSWJsSm9RdG1JNWZJZm1aZG93emVLM2l0YzVJbXEKYjJRRk5NQjJveHdWalgySkJXZk1WNmpYemk3SFIwT0UxWkJQWGo3Vm5oeGl3V1RYbEhOMW5rVFZZNVpBZUwwUQprTWFqN1pGcDRnUHVESkJxVkhBZ3hRWWJtVzhDYzZmYXdyOThzK2dONzNBckxnMXF5blAxUnBnMzU4Q2dNYVVDCkF3RUFBYU4vTUgwd0RnWURWUjBQQVFIL0JBUURBZ1dnTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3IKQmdFRkJRY0RBakFNQmdOVkhSTUJBZjhFQWpBQU1CMEdBMVVkRGdRV0JCUmxVYmp1OGhxQmJVeFVjNkFJZThWbwptOHZSdmpBZkJnTlZIU01FR0RBV2dCU1QzL3l6YjdYT0VnMGp3K09DWjAyc2tTTS9uREFOQmdrcWhraUc5dzBCCkFRc0ZBQU9DQVFFQUFiRHViNHZYR2s5UzFYS1UyZVhDd2FsQmRrbDBkQTd6eDBkTzBBc1hIL1NTMUQ5OTZKcFgKSy9vWU1pejRLWVRNMC9wRUFBQTFHWWV1RkRFdjJvTEhNZ3MvMHBMSXZqdm1uTUcxMG1mSHkrVWtoNHlrdDNvVgpEQlNuMXR1ZGhzeU1LS3JiRktHblJSNHlSSDVpcUJaQ3JmY0RmNUl3VUp5cnRhamN2TGJqVlJRSFh4N0JuMTI2ClkwRE1LOXJyV1JuT2J1ZTRnYy9PWVVPNkJqdERnWjkrQXVCN2NKWVhtb3liTUwwZFRWRUpVYk5uc1Q2YWFLNTEKMnRHd0N4M1pzMDlSSzY5K01VTnZSTEZDdytGMTNSTVI3TmFFWXlKMkpqelVsemQzZFFaQ3lpVng0cEdFalY1TgpheHRGL1I1UFBnc0VacGM3ZWpPS3ZvekNHVXlpSVVXdGF3PT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBdUY3MjMvaWk1amdEdjd0T3dCYnJlVEVLc2Z2aXQvT0pqNkNVckJwNEgyOGxLMEQyCkZRYUkwdDNkdzNoVC9wUURESzVBZGVlcDQxcFFtZ0thZ1pJWkxXaHJ0UTNHNVZNbnJtejRHbjRoSTBBQWJXWjAKWEd5Y2ozU2h0azEvT1pibVdBUFQzdTZnV2xaTm45NjZlbWJqaFBhZTBvM0JtamRYWUY2NFpvNUlXNjlrYXhnbAp5WGRGcWRiNVBjaHVVbWhDMllqbDhoK1psMmpETjRyZUsxemtpYXB2WkFVMHdIYWpIQldOZllrRlo4eFhxTmZPCkxzZEhRNFRWa0U5ZVB0V2VIR0xCWk5lVWMzV2VSTlZqbGtCNHZSQ1F4cVB0a1duaUErNE1rR3BVY0NERkJodVoKYndKenA5ckN2M3l6NkEzdmNDc3VEV3JLYy9WR21EZm53S0F4cFFJREFRQUJBb0lCQVFDeGZXSm12U0o5Uk1GLwpLNStsVnFzN2tWVzlnRUtEV2lVOHFwZFZrMm0rd1Mza0ZQYVJ5U2VnOEV2VUtKdWJ3ZnVwa25xbHh1NksyMkFxCjA0VFFaY2h0S1ZBL0RWTkRZNmtZeHZpVjhJU1FQY1hyaTYxTGFKZlRsckV6SWludlUvRE9IR2t6L1Q5TG1EZkUKUnhQNFQrS0tGeTFRZjMwNHJEd21ueWtnT2FzNDd0MFpUWHdGQlFWemxLTU9SU25GdWpDTmxvN0YvNUtsRjcwbAp0OStlQjNpQlJMVzRDeHc1WW9VTi9LcFYyY2ZUVnZGTmZOdis5NnI0WGw1UzQ0cGZaWmlwdzlweXBESXgvSWt1Ck5qRzBGeEZ1OGJmckpLVjVzVWpicG9ZKzFyVTFvV1M3eXFGemQ3UlNtV0hiTllrWHc0RVQ5TEpSRDlXWktpOEUKV3FsdlFuNkJBb0dCQU85UHZKK3hsS2luY1A0MEJpODI3ZnJIYUw1OUx2VXo2bUJiK2dHbW9kUVUxWHd2U0dodApUYU5NVmhvVFlzbE8rRUZmM0ZTRE56U0FLQStaUGZybDcxOFE4MFdOMW9lbmpqcXQvVUlTekVLREczd3U3bnd4CkpXVTBKWlJCOW90c0VHN003VktsV2tnQURqNnlZY2lJVDNxaTRBOE40aGl0TUhsdFFKekF2VXZoQW9HQkFNVTYKYVltYy8rdHYxeVpBbEo1SGprejlVMk5zR2VDU29qbnJHMFFOcXRQWlBiamYzM0gycEtWS3RSY2tXNDJucG1BUApKdjNNcktiRTVodnY2U3J6VkRNZ1BhenJRTXpTdWRBaXpYZkIzWVIwRXYxak9KUTFuVndQQ0NtNm5Oa20xZFFPCjBFVzdlcHFyeDlidkhBdlZkaWRxdnlYZmJ2VlEwb295MFoxYWduNUZBb0dCQUl0M211UXVxQWFLWHUybkFCdXcKRlkxYmZZM1dndkZnS2kyeUxNZWRoeDZFYmM2TDk5VDBMcFVHdmY5QVlRZ1ZQOVZKdXF4K05FUWlsRFpUQnE0YwpKeDd1VC9pdkt1R3dJdEhMNkpjRFFZdFp3VURrVVJTTHg5RnRUS0ZVdUF5VkZCYWUwNGlnMlRhdzRaeGtkVnhiCkpJYkNPWFpNandIMm5ST0hPbXFnWVRIQkFvR0FDSDFVTDZVL2F0MzhqOXYxeWI1Z3hMV2Uwa2ZEOFdPK2NlbkoKMmFzUThGK0loWjIxVzQxM1Z0b1pZMjZnTmovQ0xKNWFXbEJtR2lPZG1CUkNvQ09yT3l3bkczdGc1YkFvYVdvbQpHQUtUUzNGSG8vcVNZK2JPNkRpSmJHcG85L3Z3OWxqUTVEK0dyb080YldzTGRRTHlQQTRmUGowWTVKeGZBNjNlClVmeWtZMVVDZ1lFQXR4MUpYOEhZOUtvMFB0S2N5TktTZHU1dkdGMW4wdFNvTU8vVjNTdVBQcWRQYU8vSHhxMFQKMUlnUndWWDF0RGhwZFBPaWlRUzhrRisyWTYvRDhCU0hCMWs5dTlsbzhwTzE2L2R5RnE5Yk1yK1MzMzJ3bi90MApmM2RiQ1hONm1nbFRTU1p3eHlFUERqMFlpdWZmWk1BSEZKQlk3cTNDSGIrVFJHemVzR1poZzBVPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXpuY21nIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI4YmQ5N2MwNS01YTAwLTExZWEtYmMyYi1mYTE2M2U2MmE2NzAiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.oVxI3GlCzRcMQoqnGqfyMBXWPO7oD0u9gOqsz8qiue2qIp-SISvyF9LyIznB_g0i3aXQHv_b-1Jr07NvH042aG02T_zJVC-_xC_WzvJ9xf_jyJkimFyjF6ZRwMsT6QJ0KaIcAxbhDCUD5MmcihQYg6EMtnYxkOFUn77eFJiaogslB-gVmeEz4EVsWPHX8NggXp8DA0gnLnQ2L6jq_zSoKNXe9synvj9LITo-6Zf2YrnmKhERVU2wqJxloI_VIzpQDQtYq9tdBUEiZ1ELdUCXw_2pYQ3qkphZiTXz8XoqUorwiB8xdjPHgI97e6tPLupyRljRkgbHwbKiHOWBiZD-4A
# sz /opt/kubeconfig

1.7.1.4:修改iptables為ipvs及調度算法:
root@s6:~# vim /etc/systemd/system/kube-proxy.service --proxy-mode=ipvs \ --ipvs-scheduler=sh
1.7.1.5:設置token登錄會話保持時間
# vim dashboard/kubernetes-dashboard.yaml
image: harbor1.dexter.com/baseimages/kubernetes-dashboard-amd64:v1.10.1
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --token-ttl=43200
1.7.1.6:session保持:
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
1.8:DNS服務:
目前常用的dns組件有kube-dns和coredns兩個
1.8.1:部署coredns:
root@ansible-vm1:/etc/ansible/manifests#mkdir -pv dns/{kube-dns,coredns}
root@ansible-vm1:/etc/ansible/manifests# cd dns/kube-dns/
上傳一些文件到kube-dns目錄下,具體文件如下圖

注:這些文件在kubernetns的github中能找到,一般放在其二進制安裝包中。kubernetns二進制安裝包下載地址:
https://github.com/kubernetes/kubernetes/releases
# docker load -i busybox-online.tar.gz
# docker tag quay.io/prometheus/busybox:latest
harbor1.dexter.com/baseimages/busybox:latest
# docker push
harbor1.dexter.com/baseimages/busybox:latest
修改busybox.yaml
root@ansible-vm1:/etc/ansible/manifests/dns/kube-dns# cat busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default #default namespace的DNS
spec:
containers:
- image:
harbor1.dexter.com/baseimages/busybox:latest
command:
- sleep
- "3600"
imagePullPolicy: Always
name: busybox
restartPolicy: Always
啟動busybox
root@ansible-vm1:/etc/ansible/manifests/dns/kube-dns# kubectl create -f busybox.yaml
root@ansible-vm1:/etc/ansible/manifests/dns/kube-dns# kubectl get pods
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 14m
# docker tag gcr.io/google-containers/coredns:1.2.6 harbor1.dexter.com/baseimages/coredns:1.2.6
# docker push harbor1.dexter.com/baseimages/coredns:1.2.6
1.8.2:部署kube-dns:
1.skyDNS/kube-dns/ coreDNS
kube-dns:提供service name域名的解析
dns-dnsmasq:提供DNS緩存,降低kubedns負載,提高性能
dns-sidecar:定期檢查kubedns和dnsmasq的健康狀態
2.導入鏡像並上傳至本地harbor
# docker load -i k8s-dns-kube-dns-amd64_1.14.13.tar.gz
# docker tag gcr.io/google-containers/k8s-dns-kube-dns-amd64:1.14.13 harbor1.dexter.com/baseimages/k8s-dns-kube-dns-amd64:1.14.13
# docker push harbor1.dexter.com/baseimages/k8s-dns-kube-dns-amd64:1.14.13
# docker load -i k8s-dns-sidecar-amd64_1.14.13.tar.gz
# docker tag gcr.io/google-containers/k8s-dns-sidecar-amd64:1.14.13 harbor1.dexter.com/baseimages/k8s-dns-sidecar-amd64:1.14.13
# docker push harbor1.dexter.com/baseimages/k8s-dns-sidecar-amd64:1.14.13
# docker load -i k8s-dns-dnsmasq-nanny-amd64_1.14.13.tar.gz
# docker tag gcr.io/google-containers/k8s-dns-dnsmasq-nanny-amd64:1.14.13 harbor1.dexter.com/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13
# docker push harbor1.dexter.com/baseimages/k8s-dns-dnsmasq-nanny-amd64:1.14.13
3.修改yaml文件中的鏡像地址為本地harbor地址
root@k8s-n1:~# ps -ef | grep dns | grep -v grep
root 1007 1 5 11:42 ? 00:16:08 /usr/bin/kubelet --address=172.16.99.123 --allow-privileged=true --anonymous-auth=false --authentication-token-webhook --authorization-mode=Webhook --client-ca-file=/etc/kubernetes/ssl/ca.pem --cluster-dns=10.20.254.254 --cluster-domain=cluster.local. --cni-bin-dir=/usr/bin --cni-conf-dir=/etc/cni/net.d --fail-swap-on=false --hairpin-mode hairpin-veth --hostname-override=172.16.99.123 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig --max-pods=110 --network-plugin=cni --pod-infra-container-image=
harbor1.dexter.com/baseimages/pause-amd64:3.1 --register-node=true --root-dir=/var/lib/kubelet --tls-cert-file=/etc/kubernetes/ssl/kubelet.pem --tls-private-key-file=/etc/kubernetes/ssl/kubelet-key.pem --v=2
root@ansible-vm1:/etc/ansible/manifests/dns/kube-dns# vim kube-dns.yaml
.
.
clusterIP: 10.20.254.254
.
.
- name: kubedns
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
.
- name: dnsmasq
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --no-negcache
- --dns-loop-detect
- --log-facility=-
- --server=/
dexter.com/172.20.100.23#53
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
.
- name: sidecar
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV
ports:
- containerPort: 10054
4.創建服務
# kubectl apply -f kube-dns.yaml
5.查看pod是否運行正常
root@ansible-vm1:/etc/ansible/manifests/dns/kube-dns# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-d59797cd7-dqkcd 1/1 Running 0 6h5m
calico-node-7nwn8 2/2 Running 2 2d3h
calico-node-9sdfq 2/2 Running 4 2d3h
calico-node-m9zkv 2/2 Running 6 2d3h
calico-node-tdzv6 2/2 Running 6 2d3h
heapster-7f4864f77-jzk78 1/1 Running 0 115m
kube-dns-569c979454-j579m 3/3 Running 0 67s
kubernetes-dashboard-5d6c5449c8-lr7p7 1/1 Running 0 97m
monitoring-grafana-685557648b-9qr74 1/1 Running 0 115m
monitoring-influxdb-5cc945bc5c-kt8qn 1/1 Running 0 115m
1.8.3:dns測試:
# vim coredns.yaml
# kubectl apply -f coredns.yaml
# kubectl exec busybox nslookup kubernetes
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.20.0.1 kubernetes.default.svc.cluster.local
# kubectl exec busybox nslookup kubernetes.default.svc.cluster.local
Server: 10.20.254.254
Address 1: 10.20.254.254 kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default.svc.cluster.local
Address 1: 10.20.0.1 kubernetes.default.svc.cluster.local
1.8.4:監控組件heapster:
heapster:數據采集 influxdb:數據存儲 grafana:web展示
1.導入相應的鏡像
docker pull mirrorgooglecontainers/heapster-grafana-amd64:v5.0.4
docker pull mirrorgooglecontainers/heapster-amd64:v1.5.4
docker pull mirrorgooglecontainers/heapster-influxdb-amd64:v1.5.2
docker tag mirrorgooglecontainers/heapster-grafana-amd64:v5.0.4
harbor1.dexter.com/baseimages/heapster-grafana-amd64:v5.0.4
docker tag mirrorgooglecontainers/heapster-amd64:v1.5.4
harbor1.dexter.com/baseimages/heapster-amd64:v1.5.4
docker tag mirrorgooglecontainers/heapster-influxdb-amd64:v1.5.2
harbor1.dexter.com/baseimages/heapster-influxdb-amd64:v1.5.2
2.更改yaml中的鏡像地址
mkdir -pv heapster
cd heapster
修改
# cat *.yaml | grep image
image: k8s.gcr.io/heapster-grafana-amd64:v5.0.4
image: k8s.gcr.io/heapster-amd64:v1.5.4
imagePullPolicy: IfNotPresent
image: k8s.gcr.io/heapster-influxdb-amd64:v1.5.2
# sed -i 's#k8s.gcr.io#harbor1.dexter.com/baseimages#g' *.yaml
# cat *.yaml | grep image
image: harbor1.dexter.com/baseimages/heapster-grafana-amd64:v5.0.4
image: harbor1.dexter.com/baseimages/heapster-amd64:v1.5.4
imagePullPolicy: IfNotPresent
image: harbor1.dexter.com/baseimages/heapster-influxdb-amd64:v1.5.2
3.創建服務
kubectl create -f .
注1:heapster-grafana-amd64:v4.4.3這個版本較為好用,推薦使用,可以直接在grafana上查看信息,heapster-grafana-amd64:v5.0.4這個版本沒有直接創建dashboard.


注2:harbor倉庫里的鏡像如下圖
