k8s集群搭建是參照別人的,鏈接找不到了。需要5台機器,3主2從,主最少搭建3台才能實現高可用。
流程jenkins打包代碼 >> 把打包代碼生成鏡像 >> 備份鏡像庫舊鏡像 >> 新鏡像上傳鏡像庫 >> k8s去鏡像庫拉取鏡像 >> 運行yaml文件生成更新服務(jenkins執行的腳本和yaml文件都是文章下面)
#關閉firewall
systemctl disable firewalld
systemctl stop firewalld
setenforce 0 #臨時關閉selinux
#永久關閉selinux去配置文件中/etc/sysconfig/selinux
hostnamectl set-hostname master1 #修改主機名,每台服務器都不一樣
#配置hosts文件
cat /etc/hosts
192.168.51.213 master1
192.168.51.214 master2
192.168.51.211 node1
192.168.51.212 node2
#master1配置無密碼登錄(只有master1機器需要這步)
ssh-keygen -t rsa
ssh-copy-id master2
ssh-copy-id node1
ssh-copy-id node2
#開啟轉發
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl --system
#禁用交換分區
swapoff -a
#同步時間
ntpdate ntp1.aliyun.com
#安裝啟用ipvs作為轉發工具
cat > /etc/sysconfig/modules/ipvs.modules <<EOFmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#修改阿里雲yum源
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
wget http://mirrors.aliyun.com/repo/Centos-7.repo -O /etc/yum.repos.d/CentOS-Base.repo
wget http://mirrors.aliyun.com/repo/epel-7.repo -O /etc/yum.repos.d/epel.repo
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#安裝配置keeplived和haproxy(只有2台master需要)
yum install -y keepalived haproxy
#修改keepalived配置文件master1的priority為100,node-02的priority為90,其他配置一樣
cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
liumingtao@gxcards.com
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_1
}
vrrp_instance VI_1 {
state MASTER
interface ens192
lvs_sync_daemon_inteface ens192
virtual_router_id 88
advert_int 1
priority 100
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.51.210/24
}
}
#修改haproxy的配置文件,配置一樣
cat /etc/haproxy/haproxy.cfg
global
chroot /var/lib/haproxy
daemon
group haproxy
user haproxy
log 127.0.0.1:514 local0 warning
pidfile /var/lib/haproxy.pid
maxconn 20000
spread-checks 3
nbproc 8
defaults
log global
mode tcp
retries 3
option redispatch
listen https-apiserver
bind 192.168.51.210:8443
mode tcp
balance roundrobin
timeout server 15s
timeout connect 15s
server apiserver01 192.168.51.213:6443 check port 6443 inter 5000 fall 5
server apiserver02 192.168.51.214:6443 check port 6443 inter 5000 fall 5
#啟用keepalived和haproxy
systemctl enable keepalived && systemctl start keepalived
systemctl enable haproxy && systemctl start haproxy
#安裝k8s相關組件(4台都需要)
yum install -y kubelet kubeadm kubectl ipvsadm ipset docker-ce
#啟動docker
systemctl enable docker && systemctl start docker
#設置kubelet開機自啟動
systemctl enable kubelet
#把集群默認配置生成為配置文件並做修改(只有master1需要此操作)
kubeadm config print init-defaults > kubeadm-init.yaml
cat kubeadm-init.yaml
#修改了advertiseAddress為master1的ip,controlPlaneEndpoint為192.168.51.210:8443,imageRepository修改為registry.cn-hangzhou.aliyuncs.com/google_containers用於在阿里雲拉取鏡像。podSubnet添加了ip10.244.0.0/16,這個用於flannel網絡,不設置或設置錯flannel不會安裝成功。最后加3行,修改為ipvs轉發模式
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.51.213
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.51.210:8443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.14.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
#拉取集群所需要鏡像(master1操作)
kubeadm config images pull --config kubeadm-init.yaml
#初始化集群
kubeadm init --config kubeadm-init.yaml
#安裝集群命令(master1操作)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#執行此腳本把master1證書拷貝到master2上
USER=root
CONTROL_PLANE_IPS="master2"
for host in ${CONTROL_PLANE_IPS}; do
ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
#master1初始化集群后生成2條命令類似
kubeadm join 192.168.51.210:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:c8195afaff02deb8f263d59e81a6abf0efaf52d799df07e555d221e9b0feb301 --experimental-control-plane --ignore-preflight-errors=all #這條在master2上執行,執行時間很慢耐心等待
#master2命令執行完后安裝集群命令工具
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 192.168.51.210:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:c8195afaff02deb8f263d59e81a6abf0efaf52d799df07e555d221e9b0feb301 #這條在node的2個節點執行,執行時間快
#安裝網絡攻擊
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml (master1上執行)
#執行命令查看所有的節點有沒有加入集群
kubectl get nodes
#執行命令查看安裝的k8s組件是否成功(所有STATUS都是Runnig)
kubectl get pod -n kube-system
#執行命令查看集群網絡是否正常
ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.96.0.1:443 rr
-> 192.168.51.213:6443 Masq 1 0 0
-> 192.168.51.214:6443 Masq 1 1 0
TCP 10.96.0.10:53 rr
-> 10.244.1.2:53 Masq 1 0 0
-> 10.244.1.3:53 Masq 1 0 0
TCP 10.96.0.10:9153 rr
-> 10.244.1.2:9153 Masq 1 0 0
-> 10.244.1.3:9153 Masq 1 0 0
UDP 10.96.0.10:53 rr
-> 10.244.1.2:53 Masq 1 0 0
-> 10.244.1.3:53 Masq 1 0 0
# ---完-----
jenkins拉取打包代碼后,執行的腳本
#!/bin/bash
##編寫dockerfile,把jar包導入docker鏡像
cat <<EOF>>/var/lib/jenkins/workspace/app-web-user/Dockerfile
FROM centos7-java-webuser
RUN mkdir -p /app/web-user/
COPY default/web-user/target/app-web-user.jar /app/web-user/
EXPOSE 12220
EXPOSE 13220
WORKDIR /app/web-user/
ENTRYPOINT ["/usr/local/java/jdk1.8.0_101/bin/java","-Dsun.misc.URLClassPath.disableJarChecking=true -Xmx128M -Xms128M -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=12220","-jar","app-web-user.jar"]
EOF
##定義鏡像后綴時間
DATE=`date +%y%m%d%H%M`
cd /var/lib/jenkins/workspace/app-web-user
##利用dockerfile生成docker鏡像
docker build -t 192.168.51.211:5000\/webuser_$DATE .
##將鏡像導入本地私有鏡像庫
docker push 192.168.51.211:5000\/webuser_$DATE
###修改啟動容器鏡像為最新版本
sed -i "14s/.*/ image: 192.168.51.211:5000\/webuser_$DATE/g" /k8s/web-user/web-user-pod.yaml
###k8s利用最新鏡像啟動docker容器
/usr/bin/kubectl apply -f /k8s/web-user/
k8s應用的yaml文件
cd /k8s/web-user/
cat web-user-pod.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: web-user
spec:
replicas: 2
template:
metadata:
labels:
app: web-user
spec:
containers:
- name: web-user
image: 192.168.51.211:5000/webuser_1908211449
ports:
- containerPort: 13220
cat web-user-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: web-user
spec:
ports:
- name: web-user-svc
port: 13220
targetPort: 13220
nodePort: 32221
selector:
app: web-user
type: NodePort