集群環境:C1(master01),C2(node02)
1、安裝dokcer並啟動
sudo apt-get install docker.io
sudo service docker start
2、關閉firewilld和selinux
查看selinux狀態:/usr/sbin/sestatus -v
臨時關閉:setenforce 0
永久關閉:修改/etc/selinux/config文件中設置SELINUX=disabled ,然后重啟服務器。
顯示防火牆和端口的偵聽狀態:ufw status
關閉的防火牆 :ufw disable
開啟防火牆:ufw enable
3、配置hosts並同步時鍾
hostnamectl set-hostname master01
hostnamectl set-hostname node01
echo -e "192.168.25.30 master01\n192.168.25.31 node01" >> /etc/hosts
4、待docker安裝完成,配置docker.service(每個節點)
vi /lib/systemd/system/docker.service
#找到ExecStart=xxx,在這行上面加入一行,內容如下:(k8s的網絡需要)
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
5、重讀systemctl並啟動docker
systemctl daemon-reload
systemctl start docker
6、設置系統參數 - 允許路由轉發,不對bridge的數據進行處理
#寫入配置文件
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#生效配置文件
sysctl -p /etc/sysctl.d/k8s.conf
7、這里k8s采用二進制安裝方式
#安裝cfssl(所有節點) wget -q --timestamping \ https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \ https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 mv cfssl_linux-amd64 /usr/local/bin/cfssl mv cfssljson_linux-amd64 /usr/local/bin/cfssljson cfssl version #生成根證書(主節點) mkdir -p /etc/kubernetes/ca vim /etc/kubernetes/ca/ca-config.json #輸入一下內容 根據官網模板修改 { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "87600h" } } } vim /etc/kubernetes/ca/ca-csr.json #輸入一下內容 根據官網模板修改 { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } #生成證書和秘鑰 cd /etc/kubernetes/ca cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem #部署etcd #生成etcd證書 mkdir -p /var/lib/etcd #工作目錄 mkdir -p /etc/kubernetes/ca/etcd vim /etc/kubernetes/ca/etcd/etcd-csr.json #輸入一下內容 根據官網模板修改 { "CN": "etcd", "hosts": [ "127.0.0.1", "192.168.66.135" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } cd /etc/kubernetes/ca/etcd/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes etcd-csr.json | cfssljson -bare etcd ls etcd.csr etcd-csr.json etcd-key.pem etcd.pem #配置etcd為系統服務 vim /lib/systemd/system/etcd.service #輸入如下內容,注意修改主機ip和etcd路徑以及ca證書位置 [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/var/lib/etcd/ ExecStart=/root/bin/etcd \ --name=192.168.66.135 \ --listen-client-urls=https://192.168.66.135:2379,http://127.0.0.1:2379 \ --advertise-client-urls=https://192.168.66.135:2379 \ --data-dir=/var/lib/etcd \ --listen-peer-urls=https://192.168.66.135:2380 \ --initial-advertise-peer-urls=https://192.168.66.135:2380 \ --cert-file=/etc/kubernetes/ca/etcd/etcd.pem \ --key-file=/etc/kubernetes/ca/etcd/etcd-key.pem \ --peer-cert-file=/etc/kubernetes/ca/etcd/etcd.pem \ --peer-key-file=/etc/kubernetes/ca/etcd/etcd-key.pem \ --trusted-ca-file=/etc/kubernetes/ca/ca.pem \ --peer-trusted-ca-file=/etc/kubernetes/ca/ca.pem Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target #啟動服務 systemctl daemon-reload systemctl start etcd #驗證etcd ETCDCTL_API=3 etcdctl \ --endpoints=https://192.168.66.135:2379 \ --cacert=/etc/kubernetes/ca/ca.pem \ --cert=/etc/kubernetes/ca/etcd/etcd.pem \ --key=/etc/kubernetes/ca/etcd/etcd-key.pem \ endpoint health #出現類似如下結果,說明配置成功 https://192.168.66.135:2379 is healthy: successfully committed proposal: took = 5.194485ms #如果有異常可以查看日志 journalctl -f -u etcd.service # 部署APIServer(主節點) #生成證書 mkdir -p /etc/kubernetes/ca/kubernetes #准備csr,類似etcd cat kubernetes-csr.json { "CN": "kubernetes", "hosts": [ "127.0.0.1", "192.168.66.135", "10.68.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } cd /etc/kubernetes/ca/kubernetes/ #使用根證書(ca.pem)簽發kubernetes證書 cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes ls kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem #生成隨機token head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 11f4d7eafcf06965e413a409d35c3893 #按照固定格式寫入token.csv,注意替換token內容 echo "11f4d7eafcf06965e413a409d35c3893,kubelet-bootstrap,10001,\"system:kubelet-bootstrap\"" > /etc/kubernetes/ca/kubernetes/token.csv #配置apiserver為系統服務 vim /lib/systemd/system/kube-apiserver.service #輸入如下內容,注意修改ip和相關路徑 [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] ExecStart=/root/bin/kube-apiserver \ --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \ --insecure-bind-address=127.0.0.1 \ --kubelet-https=true \ --bind-address=192.168.66.135 \ --authorization-mode=Node,RBAC \ --runtime-config=rbac.authorization.k8s.io/v1 \ --enable-bootstrap-token-auth \ --token-auth-file=/etc/kubernetes/ca/kubernetes/token.csv \ --tls-cert-file=/etc/kubernetes/ca/kubernetes/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/ca/kubernetes/kubernetes-key.pem \ --client-ca-file=/etc/kubernetes/ca/ca.pem \ --service-account-key-file=/etc/kubernetes/ca/ca-key.pem \ --etcd-cafile=/etc/kubernetes/ca/ca.pem \ --etcd-certfile=/etc/kubernetes/ca/kubernetes/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/ca/kubernetes/kubernetes-key.pem \ --service-cluster-ip-range=10.68.0.0/16 \ --service-node-port-range=20000-40000 \ --etcd-servers=https://192.168.66.135:2379 \ --enable-swagger-ui=true \ --allow-privileged=true \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/var/lib/audit.log \ --event-ttl=1h \ --v=2 Restart=on-failure RestartSec=5 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target #啟動服務 systemctl daemon-reload systemctl start kube-apiserver #部署CalicoNode(所有節點) #准備證書 vim /etc/kubernetes/ca/calico/calico-csr.json { "CN": "calico", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] cd /etc/kubernetes/ca/calico/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes calico-csr.json | cfssljson -bare calico #配置為系統服務 vim /lib/systemd/system/kube-calico.service [Unit] Description=calico node After=docker.service Requires=docker.service [Service] User=root PermissionsStartOnly=true ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \ -e ETCD_ENDPOINTS=https://192.168.66.135:2379 \ -e ETCD_CA_CERT_FILE=/etc/kubernetes/ca/ca.pem \ -e ETCD_CERT_FILE=/etc/kubernetes/ca/calico/calico.pem \ -e ETCD_KEY_FILE=/etc/kubernetes/ca/calico/calico-key.pem \ -e CALICO_LIBNETWORK_ENABLED=true \ -e CALICO_NETWORKING_BACKEND=bird \ -e CALICO_DISABLE_FILE_LOGGING=true \ -e CALICO_IPV4POOL_CIDR=172.20.0.0/16 \ -e CALICO_IPV4POOL_IPIP=off \ -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \ -e FELIX_IPV6SUPPORT=false \ -e FELIX_LOGSEVERITYSCREEN=info \ -e FELIX_IPINIPMTU=1440 \ -e FELIX_HEALTHENABLED=true \ -e IP= \ -v /etc/kubernetes/ca:/etc/kubernetes/ca \ -v /var/run/calico:/var/run/calico \ -v /lib/modules:/lib/modules \ -v /run/docker/plugins:/run/docker/plugins \ -v /var/run/docker.sock:/var/run/docker.sock \ -v /var/log/calico:/var/log/calico \ registry.cn-hangzhou.aliyuncs.com/imooc/calico-node:v2.6.2 ExecStop=/usr/bin/docker rm -f calico-node Restart=always RestartSec=10 [Install] WantedBy=multi-user.target #啟動服務 systemctl enable kube-calico.service systemctl start kube-calico.service #部署ControllerManager(主節點) #配置ControllerManager為系統服務 vim /lib/systemd/system/kube-controller-manager.service #輸入如下內容,注意修改主機ip和相關路徑 [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/root/bin/kube-controller-manager \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --allocate-node-cidrs=true \ --service-cluster-ip-range=10.68.0.0/16 \ --cluster-cidr=172.20.0.0/16 \ --cluster-name=kubernetes \ --leader-elect=true \ --cluster-signing-cert-file=/etc/kubernetes/ca/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/ca/ca-key.pem \ --service-account-private-key-file=/etc/kubernetes/ca/ca-key.pem \ --root-ca-file=/etc/kubernetes/ca/ca.pem \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target #啟動服務 systemctl daemon-reload systemctl start kube-controller-manager # 部署Scheduler(主節點) #配置Scheduler為系統服務 vim /lib/systemd/system/kube-scheduler.service #輸入如下內容,注意修改主機ip和相關路徑 [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] ExecStart=/root/bin/kube-scheduler \ --address=127.0.0.1 \ --master=http://127.0.0.1:8080 \ --leader-elect=true \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target #啟動服務 systemctl enable kube-scheduler.service systemctl start kube-scheduler.service #配置kubectl mkdir -p /etc/kubernetes/ca/admin #准備csr.json,內容如下 { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "system:masters", "OU": "System" } ] } cd /etc/kubernetes/ca/admin/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes admin-csr.json | cfssljson -bare admin #指定apiserver的地址和證書位置 kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ca/ca.pem \ --embed-certs=true \ --server=https://192.168.66.135:6443 #設置客戶端認證參數,指定admin證書和秘鑰 kubectl config set-credentials admin \ --client-certificate=/etc/kubernetes/ca/admin/admin.pem \ --embed-certs=true \ --client-key=/etc/kubernetes/ca/admin/admin-key.pem #關聯用戶和集群 kubectl config set-context kubernetes \ --cluster=kubernetes --user=admin #設置當前上下文 kubectl config use-context kubernetes #部署kubelet #創建角色綁定(主節點) kubectl -n kube-system get clusterrole kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap #創建bootstrap.kubeconfig(工作節點) #設置集群參數(注意替換ip) kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/ca/ca.pem \ --embed-certs=true \ --server=https://192.168.66.135:6443 \ --kubeconfig=bootstrap.kubeconfig #設置客戶端認證參數(注意替換token) kubectl config set-credentials kubelet-bootstrap \ --token=11f4d7eafcf06965e413a409d35c3893\ --kubeconfig=bootstrap.kubeconfig #設置上下文 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig #選擇上下文 kubectl config use-context default --kubeconfig=bootstrap.kubeconfig mv bootstrap.kubeconfig /etc/kubernetes/ #准備cni配置 [root@C2 ~]# cat /etc/cni/net.d/10-calico.conf { "name": "calico-k8s-network", "cniVersion": "0.1.0", "type": "calico", "etcd_endpoints": "https://192.168.66.135:2379", "etcd_key_file": "/etc/kubernetes/ca/calico/calico-key.pem", "etcd_cert_file": "/etc/kubernetes/ca/calico/calico.pem", "etcd_ca_cert_file": "/etc/kubernetes/ca/ca.pem", "log_level": "info", "ipam": { "type": "calico-ipam" }, "kubernetes": { "kubeconfig": "/etc/kubernetes/kubelet.kubeconfig" } } #配置為系統服務並啟動 [root@C2 ~]# cat /lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/root/bin/kubelet \ --address=192.168.66.136 \ --hostname-override=192.168.66.136 \ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/imooc/pause-amd64:3.0 \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/ca \ --hairpin-mode hairpin-veth \ --network-plugin=cni \ --cni-conf-dir=/etc/cni/net.d \ --cni-bin-dir=/root/bin \ --cluster-dns=10.68.0.2 \ --cluster-domain=cluster.local. \ --allow-privileged=true \ --fail-swap-on=false \ --logtostderr=true \ --v=2 #kubelet cAdvisor 默認在所有接口監聽 4194 端口的請求, 以下iptables限制內網訪問 ExecStartPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT ExecStartPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT ExecStartPost=/sbin/iptables -A INPUT -s 192.168.0.0/16 -p tcp --dport 4194 -j ACCEPT ExecStartPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target #啟動kubelet之后到master節點允許worker加入(批准worker的tls證書請求) #--------*在主節點執行*--------- $ kubectl get csr|grep 'Pending' | awk '{print $1}'| xargs kubectl certificate approve #----------------------------- #部署kube-proxy(工作節點) #生成proxy證書 mkdir -p /etc/kubernetes/ca/kube-proxy #准備proxy證書配置 - proxy只需客戶端證書,因此證書請求中 hosts 字段可以為空。 #CN 指定該證書的 User 為 system:kube-proxy,預定義的 ClusterRoleBinding system:node-proxy 將User system:kube-proxy 與 Role system:node-proxier 綁定,授予了調用 kube-api-server proxy的相關 API 的權限 cat /etc/kubernetes/ca/kube-proxy/kube-proxy-csr.json [root@C2 ~]# cat /etc/kubernetes/ca/kube-proxy/kube-proxy-csr.json { "CN": "system:kube-proxy", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "Beijing", "L": "XS", "O": "k8s", "OU": "System" } ] } cd /etc/kubernetes/ca/kube-proxy/ cfssl gencert \ -ca=/etc/kubernetes/ca/ca.pem \ -ca-key=/etc/kubernetes/ca/ca-key.pem \ -config=/etc/kubernetes/ca/ca-config.json \ -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy #同上配置系統服務並啟動 [root@C2 ~]# cat /lib/systemd/system/kube kube-calico.service kubelet.service kube-proxy.service [root@C2 ~]# cat /lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/root/bin/kube-proxy \ --bind-address=192.168.66.136 \ --hostname-override=192.168.66.136 \ --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target ps:因為需要依賴conntrack-tools,所以在啟動之前,執行yum install conntrack-tools -y #部署kube-dns(k8s app) #在主節點准備如下文件 cat kube-dns.yaml --- apiVersion: v1 kind: ConfigMap metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns namespace: kube-system labels: addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: k8s-app: kube-dns clusterIP: 10.68.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP --- apiVersion: apps/v1 kind: Deployment metadata: name: kube-dns namespace: kube-system labels: k8s-app: kube-dns addonmanager.kubernetes.io/mode: Reconcile spec: strategy: rollingUpdate: maxSurge: 10% maxUnavailable: 0 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" volumes: - name: kube-dns-config configMap: name: kube-dns optional: true containers: - name: kubedns image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-kube-dns-amd64:1.14.5 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in # guaranteed class. Currently, this container falls into the # "burstable" category so the kubelet doesn't backoff from restarting it. limits: memory: 170Mi requests: cpu: 100m memory: 70Mi livenessProbe: httpGet: path: /healthcheck/kubedns port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /readiness port: 8081 scheme: HTTP # we poll on pod startup for the Kubernetes master service and # only setup the /readiness HTTP server once that's available. initialDelaySeconds: 3 timeoutSeconds: 5 args: - --domain=cluster.local. - --dns-port=10053 - --config-dir=/kube-dns-config - --v=2 env: - name: PROMETHEUS_PORT value: "10055" ports: - containerPort: 10053 name: dns-local protocol: UDP - containerPort: 10053 name: dns-tcp-local protocol: TCP - containerPort: 10055 name: metrics protocol: TCP volumeMounts: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-dnsmasq-nanny-amd64:1.14.5 livenessProbe: httpGet: path: /healthcheck/dnsmasq port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - -v=2 - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- - -k - --cache-size=1000 - --log-facility=- - --server=/cluster.local./127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP # see: https://github.com/kubernetes/kubernetes/issues/29055 for details resources: requests: cpu: 150m memory: 20Mi volumeMounts: - name: kube-dns-config mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-sidecar-amd64:1.14.5 livenessProbe: httpGet: path: /metrics port: 10054 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 args: - --v=2 - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A ports: - containerPort: 10054 name: metrics protocol: TCP resources: requests: memory: 20Mi cpu: 10m dnsPolicy: Default # Don't use cluster DNS. serviceAccountName: kube-dns kubectl create -f ~/kube-dns.yaml
