一.kubernetes node简介
二.部署node服务
1.在matser上创建集群角色
kubectl create clusterrolebinding kubelet-bootstrap \ --clusterrole=system:node-bootstrapper \ --user=kubelet-bootstrap
#删除集群角色命令
kubectl delete clusterrolebinding kubelet-bootstrap
2.master上创建kubeconfig文件
在master上执行kubeconfig.sh,生成kube-proxy.kubeconfig,bootstrap.kubeconfig
# 创建 TLS Bootstrapping Token #BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ') BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008 #cat > token.csv <<EOF #${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" #EOF #---------------------- APISERVER=$1 SSL_DIR=$2 # 创建kubelet bootstrapping kubeconfig export KUBE_APISERVER="https://$APISERVER:6443" # 设置集群参数 kubectl config set-cluster kubernetes \ --certificate-authority=$SSL_DIR/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=bootstrap.kubeconfig # 设置客户端认证参数 kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=bootstrap.kubeconfig # 设置上下文参数 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=bootstrap.kubeconfig # 设置默认上下文 kubectl config use-context default --kubeconfig=bootstrap.kubeconfig #---------------------- # 创建kube-proxy kubeconfig文件 kubectl config set-cluster kubernetes \ --certificate-authority=$SSL_DIR/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \ --client-certificate=$SSL_DIR/kube-proxy.pem \ --client-key=$SSL_DIR/kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
sh -x kubeconfig.sh 192.168.244.226 /usr/local/src/k8s_config/ssl #master生成ssl证书的目录
将kube-proxy.kubeconfig,bootstrap.kubeconfig复制到node节点的/opt/kubernetes/cfg下
scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.244.227:/opt/kubernetes/cfg/ scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.244.228:/opt/kubernetes/cfg/
3.node部署kubelet, kube-proxy组件
将从kubenetes包重解压出来的kubelet,kube-proxy的二进制文件放到node机器上的/opt/kubernetes/bin下面(master上执行)
scp kubelet kube-proxy root@192.168.244.227:/opt/kubernetes/bin/ scp kubelet kube-proxy root@192.168.244.228:/opt/kubernetes/bin/
在node上执行kubelet.sh生成配置脚本
#!/bin/bash NODE_ADDRESS=$1 DNS_SERVER_IP=${2:-"10.0.0.2"} cat <<EOF >/opt/kubernetes/cfg/kubelet KUBELET_OPTS="--logtostderr=true \\ --v=4 \\ --address=${NODE_ADDRESS} \\ --hostname-override=${NODE_ADDRESS} \\ --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\ --experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\ --config=/opt/kubernetes/cfg/kubelet.config \\ --cert-dir=/opt/kubernetes/ssl \\ --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \\ --feature-gates=AttachVolumeLimit=false" EOF cat <<EOF >/opt/kubernetes/cfg/kubelet.config kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 address: ${NODE_ADDRESS} port: 10250 cgroupDriver: cgroupfs clusterDNS: - ${DNS_SERVER_IP} clusterDomain: cluster.local. failSwapOn: false authentication: anonymous: enabled: true EOF cat <<EOF >/usr/lib/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet After=docker.service Requires=docker.service [Service] EnvironmentFile=/opt/kubernetes/cfg/kubelet ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS Restart=on-failure KillMode=process [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kubelet systemctl restart kubelet
sh -x kubelet.sh 192.168.244.227
在node上执行kube-proxy.sh生成配置脚本
#!/bin/bash NODE_ADDRESS=$1 cat <<EOF >/opt/kubernetes/cfg/kube-proxy KUBE_PROXY_OPTS="--logtostderr=false \\ --log-dir=/opt/kubernets/logs \\ --v=4 \\ --hostname-override=${NODE_ADDRESS} \\ --cluster-cidr=10.0.0.0/24 \\ --proxy-mode=ipvs \\ --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig" EOF cat <<EOF >/usr/lib/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Proxy After=network.target [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable kube-proxy systemctl restart kube-proxy
sh -x proxy.sh 192.168.244.152
4.node像master请求证书下发
node上的kubelet和kube-proxy启动成功后,会向集群matser发起一个csr证书请求,需要在master给node颁发证书
获取node节点的证书颁发状态
kubectl get csr
kubectl certificate approve node-csr-ALySpQjD9y6MigTviznjWghSvqp4uMvitTTNj3d4bmQ
三.运行一个测试示例检验集群工作状态
#创建pod组 kubectl run nginx --image=nginx --replicas=2 #查看pod状态 kubectl get pod -o wide #将pod组暴露对外提供服务 kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort #查看服务 kubectl get svc nginx #查看pod详细状态 kubectl describe pod nginx
浏览器打开http://192.168.244.228:35814/,可以看到nginx欢迎页面
四.查看pod日志
创建一个用户用于查看日志
kubectl create clusterrolebinding cluster-system-anonymons --clusterrole=cluster-admin --user=system:anonymous
kubectl logs nginx-dbddb74b8-7sdlx
五.部署一个简单的webUI
cd /usr/local/src/k8s_config/kubernetes tar zxvf kubernetes-src.tar.gz cd cluster/addons/dashboard #创建组件 kubectl create -f dashboard-configmap.yaml kubectl create -f dashboard-rbac.yaml kubectl create -f dashboard-secret.yaml kubectl create -f dashboard-controller.yaml
修改dashboard-service.yaml
apiVersion: v1 kind: Service metadata: name: kubernetes-dashboard namespace: kube-system labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: type: NodePort #添加类型,将服务暴露提供访问 selector: k8s-app: kubernetes-dashboard ports: - port: 443 targetPort: 8443
kubectl create -f dashboard-service.yaml
kubectl create -f k8s-admin.yaml kubectl get secret -n kube-system kubectl describe secret dashboard-admin-token-kpwvf -n kube-system # 查看token
查看服务端口
浏览器打开https://192.168.244.227:41887,使用上面查询出来的token进行登录