Kubernetes集群部署之六Flannel网络部署


1.为Flannel生成证书:

[root@k8s-master ~]# cd /usr/local/src/ssl/
[root@k8s-master ssl]# cat > flanneld-csr.json <<EOF
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

EOF

2.生成证书

[root@k8s-master ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
   -ca-key=/opt/kubernetes/ssl/ca-key.pem \
   -config=/opt/kubernetes/ssl/ca-config.json \
   -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
   

3.分发证书文件

[root@k8s-master ssl]# cp flanneld*.pem /opt/kubernetes/ssl/
[root@k8s-master ssl]# scp flanneld*.pem 10.200.3.106:/opt/kubernetes/ssl/
[root@k8s-master ssl]# scp flanneld*.pem 10.200.3.107:/opt/kubernetes/ssl/

4.下载Flannel软件包

[root@k8s-master ssl]# cd /usr/local/src
[root@k8s-master src]# wget \
 https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
[root@k8s-master src]# tar zxf flannel-v0.10.0-linux-amd64.tar.gz
[root@k8s-master src]# cp flanneld mk-docker-opts.sh /opt/kubernetes/bin/

分发文件到node节点

[root@k8s-master src]# scp flanneld mk-docker-opts.sh 10.200.3.106:/opt/kubernetes/bin/
[root@k8s-master src]# scp flanneld mk-docker-opts.sh 10.200.3.107:/opt/kubernetes/bin/

分发对应脚本到/opt/kubernetes/bin目录下

[root@k8s-master src]# cd /usr/local/src/kubernetes/cluster/centos/node/bin/
[root@k8s-master bin]# cp remove-docker0.sh /opt/kubernetes/bin/
[root@k8s-master bin]# scp remove-docker0.sh 10.200.3.106:/opt/kubernetes/bin/
remove-docker0.sh                                                                                  100%  850     0.8KB/s   00:00    
[root@k8s-master bin]# scp remove-docker0.sh 10.200.3.107:/opt/kubernetes/bin/
remove-docker0.sh                                                                                  100%  850     0.8KB/s   00:00    
[root@k8s-master bin]# 

5.配置Flannel

[root@k8s-master ~]# cat > /opt/kubernetes/cfg/flannel  <<EOF
FLANNEL_ETCD="-etcd-endpoints=https://10.200.3.105:2379,https://10.200.3.106:2379,https://10.200.3.107:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"

EOF

分发配置到其它节点上

[root@k8s-master ~]# scp /opt/kubernetes/cfg/flannel 10.200.3.106:/opt/kubernetes/cfg/
[root@k8s-master ~]# scp /opt/kubernetes/cfg/flannel 10.200.3.107:/opt/kubernetes/cfg/

6.设置Flannel系统服务

[root@k8s-master ~]#vim /usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker

Type=notify

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service

复制系统服务脚本到其它节点上

[root@k8s-master ~]# scp /usr/lib/systemd/system/flannel.service 10.200.3.106:/usr/lib/systemd/system/
[root@k8s-master ~]# scp /usr/lib/systemd/system/flannel.service 10.200.3.107:/usr/lib/systemd/system/

7.Flannel CNI集成软件
下载CNI插件 

git地址:https://github.com/containernetworking/plugins/releases

[root@k8s-master ~]# cd /usr/local/src/

[root@k8s-master src]# wget \
 https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz

在所有节点创建该目录,存放CNI插件文件.

[root@k8s-master src]# mkdir /opt/kubernetes/bin/cni
[root@k8s-master src]#tar zxf cni-plugins-amd64-v0.7.1.tgz -C /opt/kubernetes/bin/cni
[root@k8s-master src]# scp -r /opt/kubernetes/bin/cni/* 10.200.3.106:/opt/kubernetes/bin/cni/
[root@k8s-master src]# scp -r /opt/kubernetes/bin/cni/* 10.200.3.107:/opt/kubernetes/bin/cni/

8.在master节点创建Etcd的key

/opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem \
      --no-sync -C https://10.200.3.105:2379,https://10.200.3.106:2379,https://10.200.3.107:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}'

9.启动flannel

# systemctl daemon-reload
# systemctl enable flannel
# chmod +x /opt/kubernetes/bin/*
# systemctl start flannel
查看服务状态
# systemctl status flannel

10.配置Docker使用Flannel

1.在Unit段中的After后面添加flannel.service参数,在Wants下面添加Requires=flannel.service.
2.[Service]段中Type后面添加EnvironmentFile=-/run/flannel/docker段,在ExecStart后面添加$DOCKER_OPTS参数.

配置如下:

[root@k8s-master ~]# vim /usr/lib/systemd/system/docker.service 
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service

[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPTS
...

将配置分发到另外两个节点中

[root@k8s-master ~]# rsync -av  /usr/lib/systemd/system/docker.service  10.200.3.106:/usr/lib/systemd/system/docker.service 
[root@k8s-master ~]# rsync -av  /usr/lib/systemd/system/docker.service  10.200.3.107:/usr/lib/systemd/system/docker.service

重启Docker服务

# systemctl daemon-reload
# systemctl restart docker

如果docker0和flannel在一个网段,则表示正常.

docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 10.2.1.1  netmask 255.255.255.0  broadcast 10.2.1.255
        ether 02:42:47:f1:94:9e  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 10.2.1.0  netmask 255.255.255.255  broadcast 0.0.0.0
        ether a2:5d:b6:0a:89:88  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 1 overruns 0  carrier 0  collisions 0

至此flannel网络配置完成,k8s的集群也部署完成,下面我们来建立pod测试集群之间网络的连通性.

 

创建第一个K8S应用,测试集群节点是否通信

1.创建一个测试用的nginx pod.

[root@k8s-master ~]# kubectl run my-nginx --image=nginx --replicas=3 --port=80

2.查看获取IP情况

[root@k8s-master ~]# kubectl get pod -o wide
NAME                             READY     STATUS    RESTARTS   AGE       IP            NODE
nginx-service-75ccfbd5c9-f7ghk   1/1       Running   0          1d        10.2.58.179   10.20.9.222
nginx-service-75ccfbd5c9-mhw5m   1/1       Running   0          1d        10.2.49.2     10.20.9.221
nginx-service-75ccfbd5c9-sdl6q   1/1       Running   0          1d        10.2.49.3     10.20.9.221

3.测试联通性

[root@k8s-master ~]# ping 10.2.58.179
PING 10.2.58.179 (10.2.58.179) 56(84) bytes of data.
64 bytes from 10.2.58.179: icmp_seq=1 ttl=63 time=0.206 ms
64 bytes from 10.2.58.179: icmp_seq=2 ttl=63 time=0.162 ms
64 bytes from 10.2.58.179: icmp_seq=3 ttl=63 time=0.131 ms
^C
--- 10.2.58.179 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.131/0.166/0.206/0.032 ms
[root@k8s-master ~]# ping 10.2.49.2
PING 10.2.49.2 (10.2.49.2) 56(84) bytes of data.
64 bytes from 10.2.49.2: icmp_seq=1 ttl=63 time=0.200 ms
64 bytes from 10.2.49.2: icmp_seq=2 ttl=63 time=0.138 ms
64 bytes from 10.2.49.2: icmp_seq=3 ttl=63 time=0.145 ms
^C
--- 10.2.49.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.138/0.161/0.200/0.027 ms
[root@k8s-master ~]# ping 10.2.49.3
PING 10.2.49.3 (10.2.49.3) 56(84) bytes of data.
64 bytes from 10.2.49.3: icmp_seq=1 ttl=63 time=0.176 ms
64 bytes from 10.2.49.3: icmp_seq=2 ttl=63 time=0.125 ms
64 bytes from 10.2.49.3: icmp_seq=3 ttl=63 time=0.123 ms
^C
--- 10.2.49.3 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 1999ms
rtt min/avg/max/mdev = 0.123/0.141/0.176/0.026 ms
[root@k8s-master ~]# 

4.暴露服务,创建service.

kubectl expose deployment my-nginx --port=8080 --target-port=80 --external-ip=10.20.9.221  
service "my-nginx" exposed

5.查看service明细。

[root@v0-k8s-master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
my-nginx     ClusterIP   10.1.159.6   10.20.9.221   8080/TCP   9s

6.测试svc访问

[root@v0-k8s-master ~]# curl -I 10.20.9.221:8080
HTTP/1.1 200 OK
Server: nginx/1.15.2
Date: Tue, 31 Jul 2018 16:29:53 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Tue, 24 Jul 2018 13:02:29 GMT
Connection: keep-alive
ETag: "5b572365-264"
Accept-Ranges: bytes

这样我们的k8s集群的部署和联通性都没问题,也算大功告成.哦也!!

 


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM