一、初始化
1、准備三節點 192.168.67.130-132 配置計算機名 hosts 時區 DNS setenforce 0
hostnamectl set-hostname k8s-master01 hostnamectl set-hostname k8s-node01 hostnamectl set-hostname k8s-node02 [root@k8s-master01 ~]# cat /etc/hosts|grep k8 192.168.67.130 k8s-master01 m1 192.168.67.131 k8s-node01 n1 192.168.67.132 k8s-node01 n2 rm -rf /etc/localtime && ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime echo "nameserver 8.8.8.8" >> /etc/resolv.conf
2、關閉firewalld swap selinux postfix
systemctl stop firewalld && systemctl disable firewalld swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config systemctl stop postfix && systemctl disable postfix systemctl stop NetworkManager && systemctl disable NetworkManager
3、安裝依賴包
yum install -y conntrack ntpdate ntp ipvsadm ipset iptables curl sysstat libseccomp wget vim net-tools git iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
4、調整內核參數,對於 K8S
cat > kubernetes.conf <<EOF net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv4.tcp_tw_recycle=0 vm.swappiness=0 # 禁止使用 swap 空間,只有當系統 OOM 時才允許使用它 vm.overcommit_memory=1 # 不檢查物理內存是否夠用 vm.panic_on_oom=0 # 開啟 OOM fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=1048576 fs.file-max=52706963 fs.nr_open=52706963 net.ipv6.conf.all.disable_ipv6=1 net.netfilter.nf_conntrack_max=2310720 EOF
sysctl -p kubernetes.conf
5、 設置 rsyslogd 和 systemd journald
mkdir /var/log/journal # 持久化保存日志的目錄 mkdir /etc/systemd/journald.conf.d cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF [Journal] # 持久化保存到磁盤 Storage=persistent # 壓縮歷史日志 Compress=yes SyncIntervalSec=5m RateLimitInterval=30s RateLimitBurst=1000 # 最大占用空間 10G SystemMaxUse=10G # 單日志文件最大 200M SystemMaxFileSize=200M # 日志保存時間 2 周 MaxRetentionSec=2week # 不將日志轉發到 syslog ForwardToSyslog=no EOF
systemctl restart systemd-journald
6、升級系統內核
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm yum --enablerepo=elrepo-kernel install -y kernel-lt #Installed: #kernel-lt.x86_64 0:5.4.185-1.el7.elrepo #[root@localhost ~]# cat /boot/grub2/grub.cfg |grep Core #menuentry 'CentOS Linux (5.4.185-1.el7.elrepo.x86_64) 7 (Core)' --class centos --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-693.el7.x86_64-advanced-90620579-94b5-4d17-8c9e-69be7e7e8510' #設置開機啟動的內核並重啟 grub2-set-default 'CentOS Linux (5.4.185-1.el7.elrepo.x86_64) 7 (Core)'
init 6
7、kube-proxy開啟ipvs的前置條件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF #!/bin/bash modprobe -- ip_vs modprobe -- ip_vs_rr modprobe -- ip_vs_wrr modprobe -- ip_vs_sh modprobe -- nf_conntrack_ipv4 EOF chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
二、Docker 、導入鏡像
1、安裝 Docker
yum-config-manager \ --add-repo \ http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo yum install -y docker-ce yum-utils device-mapper-persistent-data lvm2
2、 配置docker
mkdir -pv /etc/docker /etc/systemd/system/docker.service.d cat > /etc/docker/daemon.conf <<EOF { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, # "insecure-registries": ["www.zzx.com"] "registry-mirrors": ["https://f1bhsuge.mirror.aliyuncs.com"] } EOF systemctl daemon-reload && systemctl restart docker && systemctl enable docker
3、安裝 Kubeadm
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
4、查看需要 安裝的鏡像
[root@k8s-master01 ~]# kubeadm config images list I0319 21:45:39.690796 58635 version.go:248] remote version is much newer: v1.23.5; falling back to: stable-1.15 k8s.gcr.io/kube-apiserver:v1.15.12 k8s.gcr.io/kube-controller-manager:v1.15.12 k8s.gcr.io/kube-scheduler:v1.15.12 k8s.gcr.io/kube-proxy:v1.15.12 k8s.gcr.io/pause:3.1 k8s.gcr.io/etcd:3.3.10 k8s.gcr.io/coredns:1.3.1
5、 安裝鏡像(所有節點,flannel也導入最好)
#自動安裝
kubeadm config images pull
#或者導入提前下載好的鏡像 apiserver.tar coredns.tar etcd.tar kubec-con-man.tar pause.tar proxy.tar scheduler.tar
docker load -i 鏡像名.tar
docker pull quay.io/coreos/flannel:v0.12.0-amd64
6、檢查已安裝鏡像
[root@k8s-master01 ~]# docker images REPOSITORY TAG IMAGE ID CREATED SIZE rancher/mirrored-flannelcni-flannel v0.17.0 9247abf08677 2 weeks ago 59.8MB rancher/mirrored-flannelcni-flannel-cni-plugin v1.0.1 ac40ce625740 8 weeks ago 8.1MB k8s.gcr.io/kube-proxy v1.15.1 89a062da739d 2 years ago 82.4MB k8s.gcr.io/kube-scheduler v1.15.1 b0b3c4c404da 2 years ago 81.1MB k8s.gcr.io/kube-controller-manager v1.15.1 d75082f1d121 2 years ago 159MB k8s.gcr.io/kube-apiserver v1.15.1 68c3eb07bfc3 2 years ago 207MB k8s.gcr.io/coredns 1.3.1 eb516548c180 3 years ago 40.3MB k8s.gcr.io/etcd 3.3.10 2c4adeb21b4f 3 years ago 258MB k8s.gcr.io/pause 3.1 da86e6ba6ca1 4 years ago 742kB
7、修改kubeadm配置文件(controlPlaneEndpoint 是高可用添加其他master需要用的)
kubeadm config print init-defaults > kubeadm-config.yaml 修改文件中的內容 1、< advertiseAddress: 1.2.3.4 改為masterip > advertiseAddress: 192.168.67.130 2、 < kubernetesVersion: v1.14.0 修改版本 > kubernetesVersion: v1.15.1 3、serviceSubnet: 10.96.0.0/12后添加如下: podSubnet: "10.244.0.0/16" 4、在 kubernetesVersion:下一行加 controlPlaneEndpoint: 192.168.67.130:6443 5、在文件結尾,就是scheduler: {}后,添加三個-開頭的內容,添加如下: --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration featureGates: SupportIPVSProxyMode: true mode: ipvs
初始化主節點(master操作,node節點不需要初始化)
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
cat kubeadm-init.log
[root@k8s-master01 ~]# cat kubeadm-init.log [init] Using Kubernetes version: v1.15.1 [preflight] Running pre-flight checks [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Activating the kubelet service [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.67.130] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.67.130 127.0.0.1 ::1] [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.67.130 127.0.0.1 ::1] [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [kubelet-check] Initial timeout of 40s passed. [apiclient] All control plane components are healthy after 78.033441 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: 563876bbfc717e9dcc713e63e32cc17740e67eb7b1e9e59b58397a7179f70447 [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: abcdef.0123456789abcdef [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.67.130:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31
加上controlPlaneEndpoint的日志(第一個join是加master用的,第二個是加node用的)
[root@k8s-master11 ~]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log Flag --experimental-upload-certs has been deprecated, use --upload-certs instead W0325 14:38:13.577618 4660 strict.go:54] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeadm.k8s.io", Version:"v1beta2", Kind:"ClusterConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "scheduler" W0325 14:38:13.580013 4660 strict.go:54] error unmarshaling configuration schema.GroupVersionKind{Group:"kubeproxy.config.k8s.io", Version:"v1alpha1", Kind:"KubeProxyConfiguration"}: error unmarshaling JSON: while decoding JSON: json: unknown field "SupportIPVSProxyMode" [init] Using Kubernetes version: v1.15.1 [preflight] Running pre-flight checks [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 18.09 [preflight] Pulling images required for setting up a Kubernetes cluster [preflight] This might take a minute or two, depending on the speed of your internet connection [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Activating the kubelet service [certs] Using certificateDir folder "/etc/kubernetes/pki" [certs] Generating "etcd/ca" certificate and key [certs] Generating "etcd/server" certificate and key [certs] etcd/server serving cert is signed for DNS names [k8s-master11 localhost] and IPs [192.168.1.222 127.0.0.1 ::1] [certs] Generating "etcd/healthcheck-client" certificate and key [certs] Generating "apiserver-etcd-client" certificate and key [certs] Generating "etcd/peer" certificate and key [certs] etcd/peer serving cert is signed for DNS names [k8s-master11 localhost] and IPs [192.168.1.222 127.0.0.1 ::1] [certs] Generating "ca" certificate and key [certs] Generating "apiserver" certificate and key [certs] apiserver serving cert is signed for DNS names [k8s-master11 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.222 192.168.1.222] [certs] Generating "apiserver-kubelet-client" certificate and key [certs] Generating "front-proxy-ca" certificate and key [certs] Generating "front-proxy-client" certificate and key [certs] Generating "sa" key and public key [kubeconfig] Using kubeconfig folder "/etc/kubernetes" [kubeconfig] Writing "admin.conf" kubeconfig file [kubeconfig] Writing "kubelet.conf" kubeconfig file [kubeconfig] Writing "controller-manager.conf" kubeconfig file [kubeconfig] Writing "scheduler.conf" kubeconfig file [control-plane] Using manifest folder "/etc/kubernetes/manifests" [control-plane] Creating static Pod manifest for "kube-apiserver" [control-plane] Creating static Pod manifest for "kube-controller-manager" [control-plane] Creating static Pod manifest for "kube-scheduler" [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s [kubelet-check] Initial timeout of 40s passed. [apiclient] All control plane components are healthy after 43.510140 seconds [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace [kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: 32c6b111d2e3b0c00536e46c035dea7cf48b227d8c0ec5f22204a22dcbffadaa [mark-control-plane] Marking the node k8s-master11 as control-plane by adding the label "node-role.kubernetes.io/master=''" [mark-control-plane] Marking the node k8s-master11 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule] [bootstrap-token] Using token: abcdef.0123456789abcdef [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles [bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials [bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token [bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace [addons] Applied essential addon: CoreDNS [addons] Applied essential addon: kube-proxy Your Kubernetes control-plane has initialized successfully! To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config You should now deploy a pod network to the cluster. Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: https://kubernetes.io/docs/concepts/cluster-administration/addons/ You can now join any number of the control-plane node running the following command on each as root: kubeadm join 192.168.1.222:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de \ --control-plane --certificate-key 32c6b111d2e3b0c00536e46c035dea7cf48b227d8c0ec5f22204a22dcbffadaa Please note that the certificate-key gives access to cluster sensitive data, keep it secret! As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward. Then you can join any number of worker nodes by running the following on each as root: kubeadm join 192.168.1.222:6443 --token abcdef.0123456789abcdef \ --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de
初始化完檢查CONTAINER
[root@k8s-master01 ~]# docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 9a334ebeff7b 89a062da739d "/usr/local/bin/kube…" 4 minutes ago Up 4 minutes k8s_kube-proxy_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_0 a63518132184 k8s.gcr.io/pause:3.1 "/pause" 4 minutes ago Up 4 minutes k8s_POD_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_0 b858b2c351c6 2c4adeb21b4f "etcd --advertise-cl…" 6 minutes ago Up 6 minutes k8s_etcd_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_0 f7d24c8e46f5 b0b3c4c404da "kube-scheduler --bi…" 6 minutes ago Up 6 minutes k8s_kube-scheduler_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_0 14c87b4328a6 68c3eb07bfc3 "kube-apiserver --ad…" 6 minutes ago Up 6 minutes k8s_kube-apiserver_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_0 11fe3dcd2159 d75082f1d121 "kube-controller-man…" 6 minutes ago Up 6 minutes k8s_kube-controller-manager_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_0 2cc9bd668f88 k8s.gcr.io/pause:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_0 fc775bda68d9 k8s.gcr.io/pause:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_0 7307ddd0a491 k8s.gcr.io/pause:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_0 3b61a057462b k8s.gcr.io/pause:3.1 "/pause" 6 minutes ago Up 6 minutes k8s_POD_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_0
加master
用日志中的提示,如果token過期就先查下 下面這個是加join用的: [root@k8s-master12 ~]# kubeadm token create --print-join-command kubeadm join 192.168.1.222:6443 --token 9gqlv3.ls8txw55cmv0b8cv --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de 再查 [root@k8s-master12 ~]# kubeadm init phase upload-certs --experimental-upload-certs Flag --experimental-upload-certs has been deprecated, use --upload-certs instead I0325 15:47:01.601649 42170 version.go:248] remote version is much newer: v1.23.5; falling back to: stable-1.15 [upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace [upload-certs] Using certificate key: b67f75146964cc75a21643fbce229a0a584101db2fb4a8411ef2a861dbabba48 組合一下 kubeadm join 192.168.1.222:6443 --token 9gqlv3.ls8txw55cmv0b8cv --discovery-token-ca-cert-hash sha256:b4d052516693a00a4f65b7f357ed5de7b9d6e6971c444674e023b6099d0321de --control-plane --certificate-key b67f75146964cc75a21643fbce229a0a584101db2fb4a8411ef2a861dbabba48
配置admin.conf 否則kubectl報錯:
mkdir -p $HOME/.kube #三節點
[root@k8s-master01 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config [root@k8s-master01 ~]# chown $(id -u):$(id -g) $HOME/.kube/config [root@k8s-master01 ~]# scp -r /etc/kubernetes/admin.conf n1:$HOME/.kube/config [root@k8s-master01 ~]# scp -r /etc/kubernetes/admin.conf n2:$HOME/.kube/config
如果不配置配置admin.conf 會報錯
[root@k8s-master01 ~]# kubectl get pod The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@k8s-node01 ~]# kubectl apply -f kube-flannel.yml unable to recognize "kube-flannel.yml": Get http://localhost:8080/api?timeout=32s: dial tcp 127.0.0.1:8080: connect: connection refused
node節點join到集群 (token會過期的 如果無法加集群需要重新獲取token,后面有補充添加新節點失敗的處理)
[root@k8s-node01 ~]# kubeadm join 192.168.67.130:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31
檢查 node
[root@k8s-master01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master01 NotReady master 48m v1.15.1 k8s-node01 NotReady <none> 2m16s v1.15.1 k8s-node02 NotReady <none> 2m9s v1.15.1
配置flannel(master節點執行就好,一個master會啟動所有節點的flannel, kubectl get po -A -o wide|grep flannel 檢查)
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
如果無法下載就用這個(quay.io/coreos/flannel v0.12.0-amd64 4e9f801d2217
docker pull quay.io/coreos/flannel:v0.12.0-amd64 )
--- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: psp.flannel.unprivileged annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default spec: privileged: false volumes: - configMap - secret - emptyDir - hostPath allowedHostPaths: - pathPrefix: "/etc/cni/net.d" - pathPrefix: "/etc/kube-flannel" - pathPrefix: "/run/flannel" readOnlyRootFilesystem: false # Users and groups runAsUser: rule: RunAsAny supplementalGroups: rule: RunAsAny fsGroup: rule: RunAsAny # Privilege Escalation allowPrivilegeEscalation: false defaultAllowPrivilegeEscalation: false # Capabilities allowedCapabilities: ['NET_ADMIN'] defaultAddCapabilities: [] requiredDropCapabilities: [] # Host namespaces hostPID: false hostIPC: false hostNetwork: true hostPorts: - min: 0 max: 65535 # SELinux seLinux: # SELinux is unused in CaaSP rule: 'RunAsAny' --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: flannel rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] verbs: ['use'] resourceNames: ['psp.flannel.unprivileged'] - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg namespace: kube-system labels: tier: node app: flannel data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.3.1", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "10.244.0.0/16", "Backend": { "Type": "vxlan" } } --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-amd64 namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - amd64 hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-amd64 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-amd64 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-arm64 namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - arm64 hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-arm64 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-arm64 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-arm namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - arm hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-arm command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-arm command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-ppc64le namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - ppc64le hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-ppc64le command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-ppc64le command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-s390x namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - s390x hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-s390x command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-s390x command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg
示例
node節點執行,整個集群都啟動了 [root@k8s-node01 ~]# kubectl apply -f fla.yml podsecuritypolicy.policy/psp.flannel.unprivileged created clusterrole.rbac.authorization.k8s.io/flannel created clusterrolebinding.rbac.authorization.k8s.io/flannel created serviceaccount/flannel created configmap/kube-flannel-cfg created daemonset.apps/kube-flannel-ds-amd64 created daemonset.apps/kube-flannel-ds-arm64 created daemonset.apps/kube-flannel-ds-arm created daemonset.apps/kube-flannel-ds-ppc64le created daemonset.apps/kube-flannel-ds-s390x created [root@k8s-node01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master11 NotReady master 51m v1.15.1 k8s-master12 NotReady master 50m v1.15.1 k8s-master13 NotReady master 36m v1.15.1 k8s-node01 NotReady <none> 7m38s v1.15.1 [root@k8s-node01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master11 Ready master 53m v1.15.1 k8s-master12 Ready master 52m v1.15.1 k8s-master13 Ready master 38m v1.15.1 k8s-node01 Ready <none> 9m54s v1.15.1 [root@k8s-node01 ~]# kubectl get po -A -o wide|grep flannel kube-system kube-flannel-ds-amd64-4brcm 1/1 Running 0 3m26s 192.168.1.223 k8s-master12 <none> <none> kube-system kube-flannel-ds-amd64-6qx7w 1/1 Running 0 3m26s 192.168.1.222 k8s-master11 <none> <none> kube-system kube-flannel-ds-amd64-fphtr 1/1 Running 0 3m26s 192.168.1.225 k8s-node01 <none> <none> kube-system kube-flannel-ds-amd64-n492q 1/1 Running 0 3m26s 192.168.1.224 k8s-master13 <none> <none>
檢查集群狀態
[root@k8s-node01 ~]# kubectl cluster-info Kubernetes master is running at https://192.168.1.222:6443 KubeDNS is running at https://192.168.1.222:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. [root@k8s-node01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master11 Ready master 60m v1.15.1 k8s-master12 Ready master 59m v1.15.1 k8s-master13 Ready master 45m v1.15.1 k8s-node01 Ready <none> 16m v1.15.1 [root@k8s-node01 ~]# [root@k8s-node01 ~]# [root@k8s-node01 ~]# kubectl get endpoints kube-controller-manager -n kube-system -o yaml apiVersion: v1 kind: Endpoints metadata: annotations: control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master13_a009f513-fc78-4fe9-b145-3349ddf97e32","leaseDurationSeconds":15,"acquireTime":"2022-03-25T06:59:42Z","renewTime":"2022-03-25T07:39:30Z","leaderTransitions":3}' creationTimestamp: "2022-03-25T06:39:07Z" name: kube-controller-manager namespace: kube-system resourceVersion: "6475" selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager uid: f78caf1f-52c7-45e8-8d7e-f9af572cf2f8 [root@k8s-node01 ~]# [root@k8s-node01 ~]# [root@k8s-node01 ~]# kubectl get endpoints kube-scheduler -n kube-system -o yaml apiVersion: v1 kind: Endpoints metadata: annotations: control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"k8s-master12_db4f2952-76dc-47e2-87e5-98f7f43dcddc","leaseDurationSeconds":15,"acquireTime":"2022-03-25T06:59:30Z","renewTime":"2022-03-25T07:39:37Z","leaderTransitions":3}' creationTimestamp: "2022-03-25T06:39:07Z" name: kube-scheduler namespace: kube-system resourceVersion: "6484" selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler uid: b54f3201-de65-478d-880a-de7222156c3f
配置完flannel 檢查狀態變為Ready
[root@k8s-master01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master01 Ready master 76m v1.15.1 k8s-node01 Ready <none> 30m v1.15.1 k8s-node02 Ready <none> 30m v1.15.1 [root@k8s-master01 ~]# kubectl get pod No resources found. [root@k8s-master01 ~]# kubectl get pod -n kube-system NAME READY STATUS RESTARTS AGE coredns-5c98db65d4-rwdtr 1/1 Running 0 77m coredns-5c98db65d4-zhqwb 1/1 Running 0 77m etcd-k8s-master01 1/1 Running 0 77m kube-apiserver-k8s-master01 1/1 Running 0 77m kube-controller-manager-k8s-master01 1/1 Running 0 77m kube-flannel-ds-jrhz6 1/1 Running 0 12m kube-flannel-ds-kdmgx 1/1 Running 0 12m kube-flannel-ds-skqvq 1/1 Running 0 12m kube-proxy-hpgj4 1/1 Running 0 77m kube-proxy-q8rxb 1/1 Running 0 31m kube-proxy-ts8xr 1/1 Running 0 31m kube-scheduler-k8s-master01 1/1 Running 0 77m
[root@k8s-master01 ~]# kubectl get pod -n kube-system -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES coredns-5c98db65d4-rwdtr 1/1 Running 0 18h 10.244.0.2 k8s-master01 <none> <none> coredns-5c98db65d4-zhqwb 1/1 Running 0 18h 10.244.0.3 k8s-master01 <none> <none> etcd-k8s-master01 1/1 Running 0 18h 192.168.67.130 k8s-master01 <none> <none> kube-apiserver-k8s-master01 1/1 Running 0 18h 192.168.67.130 k8s-master01 <none> <none> kube-controller-manager-k8s-master01 1/1 Running 1 18h 192.168.67.130 k8s-master01 <none> <none> kube-flannel-ds-jrhz6 1/1 Running 0 17h 192.168.67.131 k8s-node01 <none> <none> kube-flannel-ds-kdmgx 1/1 Running 0 17h 192.168.67.130 k8s-master01 <none> <none> kube-flannel-ds-skqvq 1/1 Running 0 17h 192.168.67.132 k8s-node02 <none> <none> kube-proxy-hpgj4 1/1 Running 0 18h 192.168.67.130 k8s-master01 <none> <none> kube-proxy-q8rxb 1/1 Running 0 17h 192.168.67.131 k8s-node01 <none> <none> kube-proxy-ts8xr 1/1 Running 0 17h 192.168.67.132 k8s-node02 <none> <none> kube-scheduler-k8s-master01 1/1 Running 1 18h 192.168.67.130 k8s-master01 <none> <none> [root@k8s-master01 ~]# kubectl get pod --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-5c98db65d4-rwdtr 1/1 Running 0 12h kube-system coredns-5c98db65d4-zhqwb 1/1 Running 0 12h kube-system etcd-k8s-master01 1/1 Running 0 12h kube-system kube-apiserver-k8s-master01 1/1 Running 0 12h kube-system kube-controller-manager-k8s-master01 1/1 Running 1 12h kube-system kube-flannel-ds-jrhz6 1/1 Running 0 11h kube-system kube-flannel-ds-kdmgx 1/1 Running 0 11h kube-system kube-flannel-ds-skqvq 1/1 Running 0 11h kube-system kube-proxy-hpgj4 1/1 Running 0 12h kube-system kube-proxy-q8rxb 1/1 Running 0 11h kube-system kube-proxy-ts8xr 1/1 Running 0 11h kube-system kube-scheduler-k8s-master01 1/1 Running 1 12h [root@k8s-master01 ~]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 12h
創建一個deployment(不指定namespace)
[root@k8s-master01 ~]# kubectl create deployment my-nginx-first --image=nginx deployment.apps/my-nginx-first created [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-54ccf4ff-r5c7n 1/1 Running 0 8s 10.244.1.11 k8s-node01 <none> <none> [root@k8s-master01 ~]# kubectl get deployment -o wide NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR my-nginx-first 1/1 1 1 24s nginx nginx app=my-nginx-first [root@k8s-master01 ~]# kubectl get svc -o wide NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 18h <none>
[root@k8s-master01 ~]# kubectl create svc clusterip my-nginx-first --tcp=8081:80 service/my-nginx-first created [root@k8s-master01 ~]# kubectl get svc -o wide NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 18h <none> my-nginx-first ClusterIP 10.104.230.86 <none> 8081/TCP 15s app=my-nginx-first [root@k8s-master01 ~]# curl 10.104.230.86:8081 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title> <style> html { color-scheme: light dark; } body { width: 35em; margin: 0 auto; font-family: Tahoma, Verdana, Arial, sans-serif; } </style> </head> <body> <h1>Welcome to nginx!</h1> <p>If you see this page, the nginx web server is successfully installed and working. Further configuration is required.</p> <p>For online documentation and support please refer to <a href="http://nginx.org/">nginx.org</a>.<br/> Commercial support is available at <a href="http://nginx.com/">nginx.com</a>.</p> <p><em>Thank you for using nginx.</em></p> </body> </html> #擴容 調整replicas: 2 [root@k8s-master01 ~]# kubectl edit deployment my-nginx-first deployment.extensions/my-nginx-first edited [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-54ccf4ff-fv7rz 1/1 Running 0 8s 10.244.2.11 k8s-node02 <none> <none> my-nginx-first-54ccf4ff-r5c7n 1/1 Running 0 3m20s 10.244.1.11 k8s-node01 <none> <none> [root@k8s-master01 ~]# kubectl delete pod my-nginx-first-54ccf4ff-fv7rz pod "my-nginx-first-54ccf4ff-fv7rz" deleted [root@k8s-mastekubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-54ccf4ff-k2xhx 1/1 Running 0 11s 10.244.2.12 k8s-node02 <none> <none> my-nginx-first-54ccf4ff-r5c7n 1/1 Running 0 3m44s 10.244.1.11 k8s-node01 <none> <none>
kubectl describe svc/svc名稱
[root@k8s-master01 ~]# kubectl describe svc/nginx-deployment
Name: nginx-deployment
Namespace: default
Labels: app=nginx-deployment
Annotations: <none>
Selector: app=nginx-deployment
Type: ClusterIP
IP: 10.101.173.31 #CLUSTER-IP
Port: 8089-80 8089/TCP
TargetPort: 80/TCP
Endpoints: 10.244.1.19:80,10.244.2.21:80 #pods IP
Session Affinity: None
Events: <none>
[root@k8s-master01 ~]# kubectl delete svc/nginx-deployment service "nginx-deployment" deleted [root@k8s-master01 ~]# kubectl create svc nodeport nginx-deployment --tcp=8011:80 service/nginx-deployment created [root@k8s-master01 ~]# kubectl get svc/nginx-deployment NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-deployment NodePort 10.107.24.168 <none> 8011:31521/TCP 4m24s #130-132都可以訪問 [root@k8s-master01 ~]# curl 192.168.67.131:31521 <!DOCTYPE html> <html> <head> <title>Welcome to nginx!</title>
[root@k8s-master01 ~]# kubectl describe svc/nginx-deployment Name: nginx-deployment Namespace: default Labels: app=nginx-deployment Annotations: <none> Selector: app=nginx-deployment Type: NodePort IP: 10.107.24.168 Port: 8011-80 8011/TCP TargetPort: 80/TCP NodePort: 8011-80 31521/TCP Endpoints: 10.244.1.19:6:80 Session Affinity: None External Traffic Policy: Cluster Events: <none>
scale擴容
[root@k8s-master01 ~]# kubectl create deploy nginx-scale --image=nginx deployment.apps/nginx-scale created [root@k8s-master01 ~]# kubectl get deploy/nginx-scale NAME READY UP-TO-DATE AVAILABLE AGE nginx-scale 1/1 1 1 66s [root@k8s-master01 ~]# kubectl scale --replicas=3 deployment nginx-scale deployment.extensions/nginx-scale scaled
[root@k8s-master01 ~]# kubectl get deploy/nginx-scale NAME READY UP-TO-DATE AVAILABLE AGE nginx-scale 1/3 3 1 2m28s
[root@k8s-master01 ~]# kubectl get deploy/nginx-scale -o wide NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR nginx-scale 3/3 3 3 2m40s nginx nginx app=nginx-scale
[root@k8s-master01 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-scale-5ff9f49f4d-8xz4m 1/1 Running 0 25s nginx-scale-5ff9f49f4d-kvr44 1/1 Running 0 25s nginx-scale-5ff9f49f4d-t4h4c 1/1 Running 0 2m49s
metrics-server 配置
[root@k8s-master01 ~]# wget https://github.com/kubernetes-sigs/metrics-server/archive/v0.3.6.tar.gz [root@k8s-master01 ~]# tar xvf v0.3.6.tar.gz [root@k8s-master01 ~]# cd metrics-server-0.3.6/deploy/1.8+/ [root@k8s-master01 1.8+]# vi metrics-server-deployment.yaml # 修改image 和 imagePullPolicy image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6 imagePullPolicy: IfNotPresent # 新增command配置 command: - /metrics-server - --kubelet-insecure-tls - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalDNS,ExternalIP,Hostname # 最后新增resources配置 resources: limits: cpu: 300m memory: 200Mi requests: cpu: 200m memory: 100Mi [root@k8s-master01 ~]# kubectl apply -f metrics-server-0.3.6/deploy/1.8+/ #是整個目錄不是單個yml clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created serviceaccount/metrics-server created deployment.apps/metrics-server created service/metrics-server created clusterrole.rbac.authorization.k8s.io/system:metrics-server created clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created [root@k8s-master01 ~]# kubectl top node #出現這個報錯就等一下再跑 Error from server (ServiceUnavailable): the server is currently unable to handle the request (get nodes.metrics.k8s.io) [root@k8s-master01 ~]# kubectl top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% k8s-master01 554m 27% 1010Mi 35% k8s-node01 242m 12% 838Mi 44% k8s-node02 180m 4% 491Mi 26% [root@k8s-master01 ~]# kubectl top po NAME CPU(cores) MEMORY(bytes) nginx-deployment-7f58cf9455-bhnn8 0m 6Mi nginx-scale-5ff9f49f4d-8xz4m 0m 1Mi nginx-scale-5ff9f49f4d-kvr44 0m 1Mi nginx-scale-5ff9f49f4d-t4h4c 0m 1Mi
配置hpa autoscale
[root@k8s-master01 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-scale-5ff9f49f4d-8xz4m 1/1 Running 0 23m nginx-scale-5ff9f49f4d-kvr44 1/1 Running 0 23m nginx-scale-5ff9f49f4d-t4h4c 1/1 Running 0 25m [root@k8s-master01 ~]# kubectl set resources deployment nginx-scale --limits=cpu=50m,memory=512Mi --requests=cpu=10m,memory=256Mi deployment.extensions/nginx-scale resource requirements updated [root@k8s-master01 ~]# kubectl autoscale deployment nginx-scale --min=1 --max=10 --cpu-percent=50 horizontalpodautoscaler.autoscaling/nginx-scale autoscaled [root@k8s-master01 ~]# kubectl get hpa NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE nginx-scale Deployment/nginx-scale <unknown>/50% 1 10 0 5s #<unknown> 需要過會兒 ,如果不配置resources的limits也會一直unknown [root@k8s-master01 ~]# kubectl get hpa NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE nginx-scale Deployment/nginx-scale 0%/50% 1 10 3 24s [root@k8s-master01 ~]# kubectl top pod NAME CPU(cores) MEMORY(bytes) nginx-scale-649d95bcb4-dnrjr 0m 1Mi nginx-scale-649d95bcb4-nkg8m 0m 3Mi nginx-scale-649d95bcb4-nlrjk 0m 3Mi [root@k8s-master01 ~]# kubectl create svc nodeport nginx-scale --tcp=8011:80 service/nginx-scale created [root@k8s-master01 ~]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3d3h nginx-scale NodePort 10.107.61.46 <none> 8011:30096/TCP 9s [root@k8s-master01 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-deployment-7f58cf9455-bhnn8 1/1 Running 0 72m 10.244.1.78 k8s-node01 <none> <none> nginx-scale-649d95bcb4-nlrjk 1/1 Running 0 15m [root@k8s-master01 ~]# kubectl top pod NAME CPU(cores) MEMORY(bytes) nginx-scale-649d95bcb4-nlrjk 0m 3Mi #開始壓測驗證自動擴容 ab壓縮工具安裝 yum -y install httpd-tools [root@k8s-node02 ~]# while :;do ab -n 1000 -c 100 http://192.168.67.130:30096/ ;done [root@k8s-master01 ~]# kubectl top pod NAME CPU(cores) MEMORY(bytes) nginx-scale-649d95bcb4-nlrjk 12m 3Mi [root@k8s-master01 ~]# kubectl get pod NAME READY STATUS RESTARTS AGE nginx-scale-649d95bcb4-nlrjk 1/1 Running 0 17m nginx-scale-649d95bcb4-q5qcm 0/1 ContainerCreating 0 2s nginx-scale-649d95bcb4-w9hlf 0/1 ContainerCreating 0 2s [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-scale-649d95bcb4-nlrjk 1/1 Running 0 17m 10.244.2.86 k8s-node02 <none> <none> nginx-scale-649d95bcb4-q5qcm 1/1 Running 0 21s 10.244.2.88 k8s-node02 <none> <none> nginx-scale-649d95bcb4-w9hlf 1/1 Running 0 21s 10.244.1.84 k8s-node01 <none> <none> [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-scale-649d95bcb4-mgjjc 0/1 ContainerCreating 0 7s <none> k8s-node01 <none> <none> nginx-scale-649d95bcb4-nlrjk 1/1 Running 0 18m 10.244.2.86 k8s-node02 <none> <none> nginx-scale-649d95bcb4-q5qcm 1/1 Running 0 68s 10.244.2.88 k8s-node02 <none> <none> nginx-scale-649d95bcb4-qwrdx 0/1 ContainerCreating 0 8s <none> k8s-node01 <none> <none> nginx-scale-649d95bcb4-sh79d 0/1 ContainerCreating 0 7s <none> k8s-node02 <none> <none> nginx-scale-649d95bcb4-w9hlf 1/1 Running 0 68s 10.244.1.84 k8s-node01 <none> <none> [root@k8s-master01 ~]# kubectl top pod NAME CPU(cores) MEMORY(bytes) nginx-scale-649d95bcb4-nlrjk 44m 3Mi nginx-scale-649d95bcb4-q5qcm 47m 3Mi nginx-scale-649d95bcb4-w9hlf 43m 2Mi [root@k8s-master01 ~]# kubectl top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% k8s-master01 834m 41% 1030Mi 36% k8s-node01 1302m 65% 809Mi 43% k8s-node02 878m 21% 549Mi 29% [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-scale-649d95bcb4-mgjjc 1/1 Running 0 89s 10.244.1.86 k8s-node01 <none> <none> nginx-scale-649d95bcb4-mtt7c 1/1 Running 0 74s 10.244.2.91 k8s-node02 <none> <none> nginx-scale-649d95bcb4-nlrjk 1/1 Running 0 19m 10.244.2.86 k8s-node02 <none> <none> nginx-scale-649d95bcb4-q5qcm 1/1 Running 0 2m30s 10.244.2.88 k8s-node02 <none> <none> nginx-scale-649d95bcb4-qwrdx 1/1 Running 0 90s 10.244.1.85 k8s-node01 <none> <none> nginx-scale-649d95bcb4-rj7dq 1/1 Running 0 74s 10.244.2.90 k8s-node02 <none> <none> nginx-scale-649d95bcb4-sh79d 1/1 Running 0 89s 10.244.2.89 k8s-node02 <none> <none> nginx-scale-649d95bcb4-spzsb 1/1 Running 0 74s 10.244.1.87 k8s-node01 <none> <none> nginx-scale-649d95bcb4-w9hlf 1/1 Running 0 2m30s 10.244.1.84 k8s-node01 <none> <none> nginx-scale-649d95bcb4-xdk79 1/1 Running 0 26s 10.244.1.88 k8s-node01 <none> <none> [root@k8s-master01 ~]# kubectl top pod NAME CPU(cores) MEMORY(bytes) nginx-scale-649d95bcb4-mgjjc 31m 2Mi nginx-scale-649d95bcb4-mtt7c 32m 3Mi nginx-scale-649d95bcb4-nlrjk 42m 3Mi nginx-scale-649d95bcb4-q5qcm 43m 3Mi nginx-scale-649d95bcb4-qwrdx 27m 2Mi nginx-scale-649d95bcb4-rj7dq 44m 3Mi nginx-scale-649d95bcb4-sh79d 38m 3Mi nginx-scale-649d95bcb4-spzsb 30m 2Mi nginx-scale-649d95bcb4-w9hlf 24m 2Mi nginx-scale-649d95bcb4-xdk79 36m 2Mi #停止ab,等待回收,要等挺久的 [root@k8s-master01 ~]# kubectl top node NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% k8s-master01 470m 23% 1029Mi 36% k8s-node01 205m 10% 832Mi 44% k8s-node02 215m 5% 526Mi 27% k8s-node03 <unknown> <unknown> <unknown> <unknown> [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES nginx-scale-649d95bcb4-nlrjk 1/1 Running 0 126m 10.244.2.86 k8s-node02 <none> <none>
解決容器內時間和計算節點不一樣的問題
[root@k8s-master01 ~]# cat time.yml apiVersion: v1 kind: Pod metadata: name: time-nginx spec: containers: - name: time-nginx image: nginx env: - name: TZ value: Asia/Shanghai [root@k8s-master01 ~]# kubectl apply -f time.yml
查看標簽
[root@k8s-master01 ~]# kubectl get po --show-labels NAME READY STATUS RESTARTS AGE LABELS nginx-deployment-75bd67c8c5-4ws67 1/1 Running 0 21h app=nginx-deployment,pod-template-hash=75bd67c8c5 nginx-deployment-75bd67c8c5-zp9nz 1/1 Running 0 21h app=nginx-deployment,pod-template-hash=75bd67c8c5
滾動重啟(新pod :無 --> ContainerCreating --> Running , 舊pod : Running --> Terminating --> 消失 )
[root@k8s-master01 ~]# kubectl rollout restart deploy my-nginx-first;while :;do kubectl get pod -o wide;sleep 1;done deployment.extensions/my-nginx-first restarted NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-68c47d7f6-8jzs5 1/1 Running 0 2m46s 10.244.1.12 k8s-node01 <none> <none> my-nginx-first-68c47d7f6-fpnr2 1/1 Running 0 2m31s 10.244.2.14 k8s-node02 <none> <none> my-nginx-first-78cbf67ff-dkmbt 0/1 ContainerCreating 0 1s <none> k8s-node01 <none> <none> NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-68c47d7f6-8jzs5 1/1 Running 0 3m5s 10.244.1.12 k8s-node01 <none> <none> my-nginx-first-68c47d7f6-fpnr2 1/1 Terminating 0 2m50s 10.244.2.14 k8s-node02 <none> <none> my-nginx-first-78cbf67ff-dkmbt 1/1 Running 0 20s 10.244.1.13 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dtrxc 0/1 ContainerCreating 0 1s <none> k8s-node02 <none> <none> NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-68c47d7f6-8jzs5 1/1 Running 0 3m12s 10.244.1.12 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dkmbt 1/1 Running 0 27s 10.244.1.13 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dtrxc 0/1 ContainerCreating 0 8s <none> k8s-node02 <none> <none> NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-68c47d7f6-8jzs5 1/1 Terminating 0 3m16s 10.244.1.12 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dkmbt 1/1 Running 0 31s 10.244.1.13 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dtrxc 1/1 Running 0 12s 10.244.2.15 k8s-node02 <none> <none> NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-78cbf67ff-dkmbt 1/1 Running 0 34s 10.244.1.13 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dtrxc 1/1 Running 0 15s 10.244.2.15 k8s-node02 <none> <none>
回滾(從78cbf67ff回到68c47d7f6) 出了undo還有 rollout pause 和rollout resume 分別表示暫停和繼續
[root@k8s-master01 ~]# kubectl rollout undo deployment deployment.extensions/my-nginx-first rolled back [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-68c47d7f6-n96cm 1/1 Running 0 12s 10.244.1.14 k8s-node01 <none> <none> my-nginx-first-68c47d7f6-trbbk 0/1 ContainerCreating 0 3s <none> k8s-node02 <none> <none> my-nginx-first-78cbf67ff-dkmbt 1/1 Running 0 17m 10.244.1.13 k8s-node01 <none> <none> my-nginx-first-78cbf67ff-dtrxc 0/1 Terminating 0 16m 10.244.2.15 k8s-node02 <none> <none> [root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-first-68c47d7f6-n96cm 1/1 Running 0 58s 10.244.1.14 k8s-node01 <none> <none> my-nginx-first-68c47d7f6-trbbk 1/1 Running 0 49s 10.244.2.16 k8s-node02 <none> <none>
用yml創建
[root@k8s-master01 ~]# cat 1.yml apiVersion: extensions/v1beta1 #api的版本 kind: Deployment #資源的類型 metadata: #資源的元數據 name: nginx-deployment #資源的名稱 spec: #資源的說明 replicas: 2 #副本2個,缺省為1 template: #pod模板 metadata: #pod元數據 labels: #標簽 app: web_server #標簽選擇器 spec: containers: - name: nginx image: nginx ports: - containerPort: 22122
創建一個3節點的 (直接run不指定namespace)
[root@k8s-master01 ~]# kubectl run my-nginx --image=nginx --replicas 3 --port=80 #kubectl delete deployment my-nginx
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead. deployment.apps/my-nginx created
[root@k8s-master01 ~]# kubectl get pod -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES my-nginx-756fb87568-pfc22 0/1 ContainerCreating 0 22s <none> k8s-node02 <none> <none> my-nginx-756fb87568-q4fc2 1/1 Running 0 23s 10.244.2.3 k8s-node02 <none> <none> my-nginx-756fb87568-t2bvp 1/1 Running 0 22s 10.244.1.4 k8s-node01 <none> <none> my-nginx-first-54ccf4ff-5s2dd 1/1 Running 0 6m5s 10.244.2.2 k8s-node02 <none> <none> my-nginx-first-54ccf4ff-7x9rs 1/1 Running 0 27m 10.244.1.2 k8s-node01 <none> <none> my-nginx-first-54ccf4ff-j7f7w 1/1 Running 0 6m5s 10.244.1.3 k8s-node01 <none> <none>
[root@k8s-master01 ~]# kubectl get pod --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE default my-nginx-756fb87568-pfc22 1/1 Running 0 57s default my-nginx-756fb87568-q4fc2 1/1 Running 0 58s default my-nginx-756fb87568-t2bvp 1/1 Running 0 57s default my-nginx-first-54ccf4ff-5s2dd 1/1 Running 0 6m40s default my-nginx-first-54ccf4ff-7x9rs 1/1 Running 0 28m default my-nginx-first-54ccf4ff-j7f7w 1/1 Running 0 6m40s kube-system coredns-5c98db65d4-rwdtr 1/1 Running 0 12h kube-system coredns-5c98db65d4-zhqwb 1/1 Running 0 12h kube-system etcd-k8s-master01 1/1 Running 0 12h kube-system kube-apiserver-k8s-master01 1/1 Running 0 12h kube-system kube-controller-manager-k8s-master01 1/1 Running 1 12h kube-system kube-flannel-ds-jrhz6 1/1 Running 0 11h kube-system kube-flannel-ds-kdmgx 1/1 Running 0 11h kube-system kube-flannel-ds-skqvq 1/1 Running 0 11h kube-system kube-proxy-hpgj4 1/1 Running 0 12h kube-system kube-proxy-q8rxb 1/1 Running 0 12h kube-system kube-proxy-ts8xr 1/1 Running 0 12h kube-system kube-scheduler-k8s-master01 1/1 Running 1 12h
添加node3
vm克隆一個新的節點, kubeadm join 報錯 [root@k8s-node03 ~]# kubeadm join 192.168.67.130:6443 --token abcdef.0123456789abcdef \ > --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31 [preflight] Running pre-flight checks [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 18.09 error execution phase preflight: [preflight] Some fatal errors occurred: [ERROR FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists [ERROR FileAvailable--etc-kubernetes-bootstrap-kubelet.conf]: /etc/kubernetes/bootstrap-kubelet.conf already exists [ERROR Port-10250]: Port 10250 is in use [ERROR FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists [root@k8s-node03 ~]# kubeadm reset [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted. [reset] Are you sure you want to proceed? [y/N]: y 這樣還是無法加入集群,因為token過期了 在master機器中執行,kubeadm token create,獲取token,token有效期一般為24小時 [root@k8s-master01 ~]# kubeadm token create 9sihb9.c4xrt1qai09401oq 用新token添加成功 [root@k8s-node03 ~]# kubeadm join 192.168.67.130:6443 --token 9sihb9.c4xrt1qai09401oq --discovery-token-ca-cert-hash sha256:54061c49d48775a80026cce95b531df9e52e08b965b5ede4f5dfc74e2d038c31 [preflight] Running pre-flight checks [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.13. Latest validated version: 18.09 [preflight] Reading configuration from the cluster... [preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' [kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" [kubelet-start] Activating the kubelet service [kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap... This node has joined the cluster: * Certificate signing request was sent to apiserver and a response was received. * The Kubelet was informed of the new secure connection details. Run 'kubectl get nodes' on the control-plane to see this node join the cluster. [root@k8s-master01 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master01 Ready master 2d3h v1.15.1 k8s-node01 Ready <none> 2d3h v1.15.1 k8s-node02 Ready <none> 2d3h v1.15.1 k8s-node03 Ready <none> 69s v1.15.1
導出yml
[root@k8s-master01 ~]# kubectl get deploy nginx-deployment -o=yaml --export Flag --export has been deprecated, This flag is deprecated and will be removed in future. apiVersion: extensions/v1beta1 kind: Deployment metadata: annotations: deployment.kubernetes.io/revision: "11" kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"extensions/v1beta1","kind":"Deployment","metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"},"spec":{"replicas":2,"template":{"metadata":{"labels":{"app":"nginx-deployment"}},"spec":{"containers":[{"image":"nginx","name":"nginx","ports":[{"containerPort":22122}]}]}}}} creationTimestamp: null generation: 1 labels: app: nginx-deployment name: nginx-deployment selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/nginx-deployment spec: progressDeadlineSeconds: 2147483647 replicas: 10 revisionHistoryLimit: 2147483647 selector: matchLabels: app: nginx-deployment strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: annotations: kubectl.kubernetes.io/restartedAt: "2022-03-22T00:53:51+08:00" creationTimestamp: null labels: app: nginx-deployment spec: containers: - env: - name: TZ value: Asia/Shanghai image: nginx imagePullPolicy: Always name: nginx ports: - containerPort: 22122 protocol: TCP resources: limits: cpu: "2" memory: 512Mi requests: cpu: 10m memory: 256Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 status: {}
dashboard
[root@k8s-master01 ~]# cat /etc/docker/daemon.json { "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "insecure-registries": ["https://k1ktap5m.mirror.aliyuncs.com"] } [root@k8s-master01 ~]# docker pull kainonly/kubernetes-dashboard-amd64:v1.10.1 v1.10.1: Pulling from kainonly/kubernetes-dashboard-amd64 9518d8afb433: Pull complete Digest: sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747 Status: Downloaded newer image for kainonly/kubernetes-dashboard-amd64:v1.10.1 docker.io/kainonly/kubernetes-dashboard-amd64:v1.10.1 [root@k8s-master01 ~]# kubectl apply -f kubernetes-dashboard.yaml secret/kubernetes-dashboard-certs created serviceaccount/kubernetes-dashboard created role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created deployment.apps/kubernetes-dashboard created service/kubernetes-dashboard created [root@k8s-master01 ~]# kubectl get pod,svc -A|grep dashboard kube-system pod/kubernetes-dashboard-779b476744-z8c4c 1/1 Running 0 29m kube-system service/kubernetes-dashboard NodePort 10.110.4.110 <none> 443:31080/TCP 29m 火狐訪問 https://192.168.67.130:31080 [root@k8s-master01 ~]# cat kubernetes-dashboard.yaml apiVersion: v1 kind: Secret metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs namespace: kube-system type: Opaque --- apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: kubernetes-dashboard-minimal namespace: kube-system rules: - apiGroups: [""] resources: ["secrets"] verbs: ["create"] - apiGroups: [""] resources: ["configmaps"] verbs: ["create"] - apiGroups: [""] resources: ["secrets"] resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] verbs: ["get", "update", "delete"] - apiGroups: [""] resources: ["configmaps"] resourceNames: ["kubernetes-dashboard-settings"] verbs: ["get", "update"] - apiGroups: [""] resources: ["services"] resourceNames: ["heapster"] verbs: ["proxy"] - apiGroups: [""] resources: ["services/proxy"] resourceNames: ["heapster", "http:heapster:", "https:heapster:"] verbs: ["get"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kubernetes-dashboard-minimal namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: kubernetes-dashboard namespace: kube-system --- kind: Deployment apiVersion: apps/v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 selector: matchLabels: k8s-app: kubernetes-dashboard template: metadata: labels: k8s-app: kubernetes-dashboard spec: containers: - name: kubernetes-dashboard image: docker.io/kainonly/kubernetes-dashboard-amd64:v1.10.1 ports: - containerPort: 8443 protocol: TCP args: - --auto-generate-certificates volumeMounts: - name: kubernetes-dashboard-certs mountPath: /certs - mountPath: /tmp name: tmp-volume livenessProbe: httpGet: scheme: HTTPS path: / port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 volumes: - name: kubernetes-dashboard-certs secret: secretName: kubernetes-dashboard-certs - name: tmp-volume emptyDir: {} serviceAccountName: kubernetes-dashboard tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule --- kind: Service apiVersion: v1 metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard namespace: kube-system spec: type: NodePort ports: - port: 443 targetPort: 8443 nodePort: 31080 selector: k8s-app: kubernetes-dashboard
[root@k8s-master01 ~]# cat dashboard-svc-account.yaml apiVersion: v1 kind: ServiceAccount metadata: name: dashboard-admin namespace: kube-system --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: dashboard-admin subjects: - kind: ServiceAccount name: dashboard-admin namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin apiGroup: rbac.authorization.k8s.io
#簽名證書讓chrome可以訪問
[root@k8s-master01 ~]# mkdir kubernetes-dashboard-key && cd kubernetes-dashboard-key
[root@k8s-master01 kubernetes-dashboard-key]# openssl genrsa -out dashboard.key 2048
Generating RSA private key, 2048 bit long modulus
..........+++
...+++
e is 65537 (0x10001)
[root@k8s-master01 kubernetes-dashboard-key]# ll
total 4
-rw-r--r-- 1 root root 1675 Mar 23 01:50 dashboard.key
[root@k8s-master01 kubernetes-dashboard-key]# openssl req -days 3650 -new -out dashboard.csr -key dashboard.key -subj '/CN=192.168.100.100'
[root@k8s-master01 kubernetes-dashboard-key]# openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
Signature ok
subject=/CN=192.168.100.100
Getting Private key
[root@k8s-master01 kubernetes-dashboard-key]# ll
total 12
-rw-r--r-- 1 root root 993 Mar 23 01:50 dashboard.crt
-rw-r--r-- 1 root root 899 Mar 23 01:50 dashboard.csr
-rw-r--r-- 1 root root 1675 Mar 23 01:50 dashboard.key
[root@k8s-master01 kubernetes-dashboard-key]# kubectl delete secret kubernetes-dashboard-certs -n kube-system
secret "kubernetes-dashboard-certs" deleted
[root@k8s-master01 kubernetes-dashboard-key]# kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kube-system
secret/kubernetes-dashboard-certs created
[root@k8s-master01 ~]# kubectl get pod -A|grep dashboard
kubernetes-dashboard-779b476744-tdgvz 1/1 Running 0 3h16m
[root@k8s-master01 kubernetes-dashboard-key]# kubectl delete pod kubernetes-dashboard-779b476744-tdgvz -n kube-system
pod "kubernetes-dashboard-779b476744-tdgvz" deleted
[root@k8s-master01 ~]# kubectl get pod -A|grep dashboard
kube-system kubernetes-dashboard-779b476744-s4jch 1/1 Running 0 20s
[root@k8s-master01 kubernetes-dashboard-key]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 3d6h
kubernetes-dashboard NodePort 10.110.4.110 <none> 443:31080/TCP 9h
metrics-server ClusterIP 10.96.212.45 <none> 443/TCP 33h
谷歌訪問 https://192.168.67.130:31080/
輸入令牌
#獲取令牌(跑上面的 dashboard-svc-account.yaml)
[root@k8s-master01 ~]# kubectl describe secret ` kubectl get secret -n kube-system |grep admin|awk '{print $1}' ` -n kube-system|grep '^token'|awk '{print $2}'
eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tdG9rZW4tdzl3djgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGFzaGJvYXJkLWFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMTliNWRkNTctZDdiMi00ZTk4LThmMTktMzc4YTY1YzAyOGIzIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmRhc2hib2FyZC1hZG1pbiJ9.PBye3-1rYtP0cnESxkMNrhP39e0uayBvqeULrWukUuKASjLDXRMx19cIVxzz74WwzDH1EANNyWcN4QkHPBphbIraGwaZgH8biqp0FzZfP5VdW6KV6bWt7twC8hq_0pxGohDOrjlr5u79Rtl1elx-p60fvTIY3HmWguXxM8K4u6QVDuQlsk7YZ9pCpQRNtmVmbln-5Rj6sqQAYrX29LV-lswGGgwR2JomfPFMZ-kdnm7KjrLgm_bh9qbYZwgok2VlJyKWAGZUebVHEPHZGehnEI2k6iVdPiCZ2WjILdwSE8lQqpUcpZYRMunUtO952eYsJlg7r49EQu0V1J04Txv-DQ
Deployment vs ReplicationController in Kubernetes https://blog.csdn.net/u010884123/article/details/55213011
kubernetes命令 http://docs.kubernetes.org.cn/638.html
命名空間、標簽以及掛載deployment https://blog.csdn.net/jiangbenchu/article/details/91517865
輸入 kubectl run 后,到底發生了什么? https://zhuanlan.zhihu.com/p/79774851
-ReplicaSet、Deployment、DaemonSet https://www.cnblogs.com/bjlhx/p/13617166.html
K8s Master節點高可用集群搭建
[root@k8s-master01 ~]# docker ps -a|grep Up 8f7c95f65e99 eb516548c180 "/coredns -conf /etc…" 2 hours ago Up 2 hours k8s_coredns_coredns-5c98db65d4-rwdtr_kube-system_f967ac2e-23fc-4b7f-8762-bead67dbaab3_6 f7f196f7b85e eb516548c180 "/coredns -conf /etc…" 2 hours ago Up 2 hours k8s_coredns_coredns-5c98db65d4-zhqwb_kube-system_32bdf37c-06d9-46bb-93e3-f7cd03ffd750_5 e620d288b4e1 k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_coredns-5c98db65d4-rwdtr_kube-system_f967ac2e-23fc-4b7f-8762-bead67dbaab3_64 fa48f0aac53f k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_coredns-5c98db65d4-zhqwb_kube-system_32bdf37c-06d9-46bb-93e3-f7cd03ffd750_62 c8df0377024e d75082f1d121 "kube-controller-man…" 2 hours ago Up 2 hours k8s_kube-controller-manager_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_22 7ce3a8e22bb1 9247abf08677 "/opt/bin/flanneld -…" 2 hours ago Up 2 hours k8s_kube-flannel_kube-flannel-ds-kdmgx_kube-system_ded8ee32-7936-4c49-9af3-c6ed1e072de2_46 06958b247da2 68c3eb07bfc3 "kube-apiserver --ad…" 2 hours ago Up 2 hours k8s_kube-apiserver_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_49 e3f9f21510c8 89a062da739d "/usr/local/bin/kube…" 2 hours ago Up 2 hours k8s_kube-proxy_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_6 4fc01a7e344a k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-flannel-ds-kdmgx_kube-system_ded8ee32-7936-4c49-9af3-c6ed1e072de2_6 10608c3fe272 k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-proxy-hpgj4_kube-system_752ff8cb-a6d2-4057-b66c-d806f2f94252_6 055a829e9094 2c4adeb21b4f "etcd --advertise-cl…" 2 hours ago Up 2 hours k8s_etcd_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_6 a02321513038 b0b3c4c404da "kube-scheduler --bi…" 2 hours ago Up 2 hours k8s_kube-scheduler_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_21 c11e600e945f k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-scheduler-k8s-master01_kube-system_ecae9d12d3610192347be3d1aa5aa552_6 09ac011a5ec8 k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-controller-manager-k8s-master01_kube-system_5a1fa432561d9745fe013857ccb566c1_6 2bef7e1b3f52 k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_kube-apiserver-k8s-master01_kube-system_21e2cd988cdb757666987c7460642659_6 7d8415cadb4c k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_etcd-k8s-master01_kube-system_9091c3932085dc9fa7b1927b2dd6af54_6
[root@k8s-master01 ~]# kubectl get pod -A -o wide NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES default nginx-deployment-7f58cf9455-bhnn8 1/1 Running 0 7m5s 10.244.1.78 k8s-node01 <none> <none> default nginx-deployment-7f58cf9455-gc9vc 1/1 Terminating 0 102m 10.244.2.84 k8s-node02 <none> <none> kube-system coredns-5c98db65d4-rwdtr 1/1 Running 6 3d2h 10.244.0.9 k8s-master01 <none> <none> kube-system coredns-5c98db65d4-zhqwb 1/1 Running 5 3d2h 10.244.0.8 k8s-master01 <none> <none> kube-system etcd-k8s-master01 1/1 Running 6 3d2h 192.168.67.130 k8s-master01 <none> <none> kube-system kube-apiserver-k8s-master01 1/1 Running 49 3d2h 192.168.67.130 k8s-master01 <none> <none> kube-system kube-controller-manager-k8s-master01 1/1 Running 22 3d2h 192.168.67.130 k8s-master01 <none> <none> kube-system kube-flannel-ds-jrhz6 1/1 Running 45 3d1h 192.168.67.131 k8s-node01 <none> <none> kube-system kube-flannel-ds-kdmgx 1/1 Running 46 3d1h 192.168.67.130 k8s-master01 <none> <none> kube-system kube-flannel-ds-qdj9k 1/1 Running 1 22h 192.168.67.133 k8s-node03 <none> <none> kube-system kube-flannel-ds-skqvq 1/1 Running 37 3d1h 192.168.67.132 k8s-node02 <none> <none> kube-system kube-proxy-hpgj4 1/1 Running 6 3d2h 192.168.67.130 k8s-master01 <none> <none> kube-system kube-proxy-q8rxb 1/1 Running 2 3d2h 192.168.67.131 k8s-node01 <none> <none> kube-system kube-proxy-sfn2g 1/1 Running 1 22h 192.168.67.133 k8s-node03 <none> <none> kube-system kube-proxy-ts8xr 1/1 Running 5 3d2h 192.168.67.132 k8s-node02 <none> <none> kube-system kube-scheduler-k8s-master01 1/1 Running 21 3d2h 192.168.67.130 k8s-master01 <none> <none> kube-system kubernetes-dashboard-779b476744-tdgvz 1/1 Running 0 7m5s 10.244.1.77 k8s-node01 <none> <none> kube-system kubernetes-dashboard-779b476744-z8c4c 1/1 Terminating 1 6h2m 10.244.2.83 k8s-node02 <none> <none> kube-system metrics-server-6bfbbcff4d-prpb2 1/1 Terminating 0 102m 10.244.2.85 k8s-node02 <none> <none> kube-system metrics-server-6bfbbcff4d-qcx5d 1/1 Running 0 7m5s 10.244.1.79 k8s-node01 <none> <none>