1.環境准備
| name | ip address | system |
| master | 192.168.211.130 | centos 7 64位 |
| node1 | 192.168.211.131 | centos 7 64位 |
| node2 | 192.168.211.132 | centos 7 64位 |
2.安裝部署准備
同步三台機器時間(master/node1/node2)
yum install -y ntp
ntpdate -u cn.pool.ntp.org
在node節點上安裝redhat-ca軟件包(node1/node2)
yum install *rhsm* -y
配置軟件包的密鑰(node1/node2)
wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem
3.安裝etcd集群
- master節點
yum -y install kubernetes-master etcd
vi /etc/etcd/etcd.conf
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd1" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.211.130:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.211.130:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.211.130:2380,etcd2=http://192.168.211.131:2380,etcd3=http://192.168.211.132:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_AUTO_TLS="false" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple"
- node1,node2節點
yum -y install kubernetes-node etcd flannel docker
vi /etc/etcd/etcd.conf
#[Member] #ETCD_CORS="" ETCD_DATA_DIR="/var/lib/etcd/default.etcd" #ETCD_WAL_DIR="" ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" #ETCD_MAX_SNAPSHOTS="5" #ETCD_MAX_WALS="5" ETCD_NAME="etcd2" #ETCD_SNAPSHOT_COUNT="100000" #ETCD_HEARTBEAT_INTERVAL="100" #ETCD_ELECTION_TIMEOUT="1000" #ETCD_QUOTA_BACKEND_BYTES="0" #ETCD_MAX_REQUEST_BYTES="1572864" #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s" #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s" #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s" # #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.211.131:2380" ETCD_ADVERTISE_CLIENT_URLS="http://192.168.211.131:2379" #ETCD_DISCOVERY="" #ETCD_DISCOVERY_FALLBACK="proxy" #ETCD_DISCOVERY_PROXY="" #ETCD_DISCOVERY_SRV="" ETCD_INITIAL_CLUSTER="etcd1=http://192.168.211.130:2380,etcd2=http://192.168.211.131:2380,etcd3=http://192.168.211.132:2380" #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" #ETCD_INITIAL_CLUSTER_STATE="new" #ETCD_STRICT_RECONFIG_CHECK="true" #ETCD_ENABLE_V2="true" # #[Proxy] #ETCD_PROXY="off" #ETCD_PROXY_FAILURE_WAIT="5000" #ETCD_PROXY_REFRESH_INTERVAL="30000" #ETCD_PROXY_DIAL_TIMEOUT="1000" #ETCD_PROXY_WRITE_TIMEOUT="5000" #ETCD_PROXY_READ_TIMEOUT="0" # #[Security] #ETCD_CERT_FILE="" #ETCD_KEY_FILE="" #ETCD_CLIENT_CERT_AUTH="false" #ETCD_TRUSTED_CA_FILE="" #ETCD_AUTO_TLS="false" #ETCD_PEER_CERT_FILE="" #ETCD_PEER_KEY_FILE="" #ETCD_PEER_CLIENT_CERT_AUTH="false" #ETCD_PEER_TRUSTED_CA_FILE="" #ETCD_PEER_AUTO_TLS="false" # #[Logging] #ETCD_DEBUG="false" #ETCD_LOG_PACKAGE_LEVELS="" #ETCD_LOG_OUTPUT="default" # #[Unsafe] #ETCD_FORCE_NEW_CLUSTER="false" # #[Version] #ETCD_VERSION="false" #ETCD_AUTO_COMPACTION_RETENTION="0" # #[Profiling] #ETCD_ENABLE_PPROF="false" #ETCD_METRICS="basic" # #[Auth] #ETCD_AUTH_TOKEN="simple"
- 啟動etc cluster(master,node1,node2)
systemctl start etcd.service
systemctl status etcd.service
- 查看集群狀態(master)
etcdctl cluster-health
4.Kubenetes集群配置
- master節點
vim /etc/kubernetes/apiserver
### # kubernetes system config # # The following values are used to configure the kube-apiserver # # The address on the local server to listen to. # KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" KUBE_API_ADDRESS="--address=0.0.0.0" # The port on the local server to listen on. KUBE_API_PORT="--port=8080" # Port minions listen on KUBELET_PORT="--kubelet-port=10250" # Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.211.130:2379,http://192.168.211.131:2379,http://192.168.211.132:2379" # Address range to use for services KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # default admission control policies # KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota" # Add your own! KUBE_API_ARGS=""
systemctl enable kube-apiserver # 開機啟動 systemctl enable kube-controller-manager systemctl enable kube-scheduler
systemctl start kube-apiserver # 開啟
systemctl start kube-controller-manager
systemctl start kube-scheduler
- node1,node2節點
vim /etc/kubernetes/config
### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL="--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=false" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER="--master=http://182.168.211.130:8080"
vim /etc/kubernetes/kubelet
### # kubernetes kubelet (minion) config # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS="--address=192.168.211.132" # The port for the info server to serve on # KUBELET_PORT="--port=10250" # You may leave this blank to use the actual hostname KUBELET_HOSTNAME="--hostname-override=192.168.211.132" # location of the api-server KUBELET_API_SERVER="--api-servers=http://192.168.211.130:8080" # pod infrastructure container KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest" # Add your own! KUBELET_ARGS=""
vim /etc/sysconfig/flanneld
# Flanneld configuration options
# etcd url location. Point this to the server where etcd runs
FLANNEL_ETCD_ENDPOINTS="http://192.168.211.130:2379" # etcd config key. This is the configuration key that flannel queries # For address range assignment FLANNEL_ETCD_PREFIX="/atomic.io/network" # *****重點來了:這個需要在master手動配置,配置如下****** # Any additional options that you want to pass #FLANNEL_OPTIONS=""
## 由於linux還有底層的iptables,所以在node上分別執行:##
## 注意要關閉防火牆,有一些端口用於flannel之間相互通信 ##
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -F
iptables -L -n
etcdctl mk /atomic.io/network/network/config '{"Network":"10.2.0.0/16"}' # Type:host-gw/vxlan
etcdctl set /atomic.io/network/config '{"Network":"172.17.0.0/16", "SubnetMin": "172.17.1.0", "SubnetMax": "172.17.254.0", "Backend":{"Type":"host-gw"}}'
systemctl enable kubelet systemctl enable kube-proxy
systemctl enable flanneld
systemctl enable docker
systemctl start kubelet systemctl start kube-proxy
systemctl start flanneld systemctl start docker
- 查看集群狀態(master):
kubectl get nodes
etcdctl member list
etcdctl cluster-health
5.Kubernetes管理
- 創建pods
kubectl run my-nginx --image=nginx --replicas=2 --port=80
kubectl create -f deployment-nginx.yml
apiVersion: extensions/v1beta1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 4 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.12.2 ports: - containerPort: 80
- 查看pod信息
kubectl get pods -o wide
- 刪除pod
kubectl delete pods my-nginx-379829228-bkt21
- 進入指定pods並執行bash命令
kubectl exec -it my-nginx-379829228-462dl bash
- 將pods端口與宿主端口映射
kubectl port-forward my-nginx-379829228-ng2jb 80:80 # 主機(master)上映射,並且只能通過http://127.0.0.1:8080訪問
kubectl expose deployment nginx-deployment --type=NodePort # 節點(node1,node2)上映射,**推薦**
- 修改pods數量(pods的類型有:rc【Replication Controller】、rs【ReplicaSet】、deployment、job)
kubectl scale rc my-nginx --replicas=4
- 刪除pods(文件/name)(文件僅支持:json、yaml)
kubectl delete deployment --all
kubectl delete -f rc_nginx.yaml
- 通過deployment創建的pods升級images
kubectl set image deployment nginx-deployment nginx=nginx:1.13
- 查看pods升級images的歷史
kubectl rollout history deployment nginx-deployment
- 回滾pods升級images的歷史
kubectl rollout undo deployment nginx-deployment
- 查看節點
kubectl get node -o wide
- 創建service
kubectl expoese命令,會給我們的pod創建一個service,供外部訪問。通過yaml文件創建。還可以通過DNS(但需要add-on)
service主要有三類:ClusterIP,NodePort,Loadalancer
kubectl get svc # 查詢service
kubectl expose pods pod-nginx # 創建ClusterIP,創建出來的cluster ip不會變化,原本的pod的ip是會變化的。
# 通過給出的cluster ip+port訪問時,會自動做負載均衡
kubectl expose pods pod-nginx --type=NodePort # 創建NodePort。--type默認為ClusterIP
