k8s擴展服務與深入功能


k8s擴展服務與深入功能

一、k8s的附加組件

1.dns服務

1.1.為什么需要dns服務?

之前交互時,rc配置文件會寫svc的ip地址,但是隨着節點變化,ip地址會變。如果配置dns服務,配置文件里直接寫svc的名字而不寫變化的ip地址,用名字解析為ip地址,直接寫死,不管他變不變都能連接上。

這個過程需要依賴dns服務。

1.2.安裝dns服務

# 1.下載鏡像包 導入私有倉庫
[root@k8s-node-1 ~]# ll
docker_k8s_dns.tar.gz

# 2.導入倉庫 這個包是我打包的四個鏡像

# 3.上傳創建文件
[root@k8s-master ~/k8s_yaml/dns]# ll
-rw-r--r-- 1 root root 5683 Aug 15 09:32 skydns-deploy.yaml
-rw-r--r-- 1 root root 1135 Aug 15 09:31 skydns-svc.yaml

# 4.修改deploy配置文件
[root@k8s-master ~/k8s_yaml/dns]# vim skydns-deploy.yaml
spec:
  nodeSelector:
    kubernetes.io/hostname: 10.0.0.6
  containers:   
注意下ip什么的

# 5.創建資源
[root@k8s-master ~/k8s_yaml/dns]# kubectl create -f skydns-deploy.yaml
# 6.修改svc配置文件
[root@k8s-master ~/k8s_yaml/dns]# vim skydns-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.230.254  [主要是這里]
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

# 7.創建資源
[root@k8s-master ~/k8s_yaml/dns]# kubectl create -f skydns-svc.yaml
# 8.檢查
[root@k8s-master ~/k8s_yaml/dns]# kubectl get all --namespace=kube-system [可以 -n]
# 9.修改所有node節點kubelet的配置文件
最后一行默認參數刪掉 加上一條
vim /etc/kubernetes/kubelet
KUBELET_ARGS="--cluster_dns=10.254.230.254 --cluster_domain=cluster.local"

# 10.重啟
systemctl restart kubelet

2.namespace命令空間

2.1.namespace作用

實現資源隔離

一個業務一個namespace

分類好 速度快

可以用業務名來命名namespace

2.2.創建namespace

# 1.創建namespace
[root@k8s-master ~]# kubectl create namespace wordpress
namespace "wordpress" created

# 2.查看namespace
[root@k8s-master ~]# kubectl get namespace
NAME          STATUS    AGE
default       Active    11h
kube-system   Active    11h
wordpress     Active    10s

2.3.在namespace里跑資源

# 1.在wordpress里跑nginx
[root@k8s-master ~]# kubectl run -n wordpress nginx --image=10.0.0.5:5000/nginx:1.13


# 2.查看資源
[root@k8s-master ~]# kubectl get all -n wordpress 
NAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx   1         1         1            1           1m

NAME                  DESIRED   CURRENT   READY     AGE
rs/nginx-1777049147   1         1         1         1m

NAME                        READY     STATUS    RESTARTS   AGE
po/nginx-1777049147-4sgbv   1/1       Running   0          1m


# 3.修改配置文件
[root@k8s-master ~]# kubectl edit -n wordpress deployment nginx
namespace: wordpress [配置文件里有一個這個 如果你不指定 這里寫的就是default]

2.4.開放端口

# 1.創建service
[root@k8s-master ~/k8s_yaml/svc]# cp k8s_svc.yaml wordpress.svc.yaml

# 2.查看rs標簽 發現標簽是run:nginx
[root@k8s-master ~/k8s_yaml/svc]# kubectl get all --all-namespaces -o wide

# 3.修改配置文件
[root@k8s-master ~/k8s_yaml/svc]# vim wordpress.svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myweb
  namespace: wordpress [這里是名]
spec:
  type: NodePort  #ClusterIP
  ports:
    - port: 80          #clusterIP
      nodePort: 30005   #node port
      targetPort: 80    #pod port
  selector:
    run: nginx [這里是標簽]

# 4.創建資源
[root@k8s-master ~/k8s_yaml/svc]# kubectl create -f wordpress.svc.yaml 

# 5.查看
[root@k8s-master ~/k8s_yaml/svc]# kubectl get all -n wordpress 
NAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx   1         1         1            1           12m

NAME        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
svc/myweb   10.254.139.67   <nodes>       80:30005/TCP   2m

NAME                  DESIRED   CURRENT   READY     AGE
rs/nginx-1777049147   1         1         1         12m

NAME                        READY     STATUS    RESTARTS   AGE
po/nginx-1777049147-4sgbv   1/1       Running   0          12m

3.健康檢查

3.1. 探針的種類

livenessProbe:健康狀態檢查,周期性檢查服務是否存活,檢查結果失敗,將重啟容器

readinessProbe:可用性檢查,周期性檢查服務是否可用,不可用將從service的endpoints中移除

3.2.探針的檢測方法

  • exec:執行一段命令
  • httpGet:檢測某個 http 請求的返回狀態碼
  • tcpSocket:測試某個端口是否能夠連接

3.3.liveness探針的exec使用

# 1.創建配置文件
[root@k8s-master ~/k8s_yaml]# mkdir healht
[root@k8s-master ~/k8s_yaml]# cd healht/
[root@k8s-master ~/k8s_yaml/healht]# vim exec_pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: exec
spec:
  containers:
    - name: nginx
      image: 10.0.0.5:5000/nginx:1.13
      ports:
        - containerPort: 80
      args: #指定初始化命令
        - /bin/sh
        - -c
        - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
      livenessProbe:
        exec:
          command:
            - cat
            - /tmp/healthy
        initialDelaySeconds: 5
        periodSeconds: 5

# 2.創建資源
[root@k8s-master ~/k8s_yaml/healht]# kubectl create -f exec_pod.yaml 

# 3.查看資源 
[root@k8s-master ~/k8s_yaml/healht]# kubectl get pod
NAME          READY     STATUS    RESTARTS   AGE
exec          1/1       Running   0          32s

# 4.查看30s過后
[root@k8s-master ~/k8s_yaml/healht]# kubectl describe pod exec
...
Unhealthy

# 5.再過一會 自動創建了
[root@k8s-master ~/k8s_yaml/healht]# kubectl describe pod exec
...
Started

# 6.查看資源 重啟狀態變1
[root@k8s-master ~/k8s_yaml/healht]# kubectl get pod
NAME          READY     STATUS    RESTARTS   AGE
exec          1/1       Running   1          2m

3.4.liveness探針的httpGet使用

# 1.創建配置文件
[root@k8s-master ~/k8s_yaml/health]# vim pod_httpGet.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: httpget
spec:
  containers:
    - name: nginx
      image: 10.0.0.5:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        httpGet:
          path: /index.html
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3

# 2.創建資源
[root@k8s-master ~/k8s_yaml/health]# kubectl create -f pod_httpGet.yaml 

# 3.查看資源
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod httpget 
NAME      READY     STATUS    RESTARTS   AGE
httpget   1/1       Running   0          1m

# 4.進容器修改首頁
[root@k8s-master ~/k8s_yaml/health]# kubectl exec -it httpget /bin/bash
root@httpget:/# 
root@httpget:/# cd /usr/share/nginx/html/
root@httpget:/usr/share/nginx/html# rm -rf index.html 

# 5.查看狀態 秒啟
[root@k8s-master ~/k8s_yaml/health]# kubectl describe pod httpget
Unhealthy
Started

# 6.查看資源 重啟1次
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod httpget 
NAME      READY     STATUS    RESTARTS   AGE
httpget   1/1       Running   1          4m

3.5.liveness探針的tcpSocket使用

和上一個大致一樣 不做過多測試

[root@k8s-master ~/k8s_yaml/health]# vim pod_tcpSocket.yaml
apiVersion: v1
kind: Pod
metadata:
  name: tcpSocket
spec:
  containers:
    - name: nginx
      image: 10.0.0.5:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        tcpSocket:
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3

3.6.readiness探針的httpGet使用

# 1.創建配置文件
[root@k8s-master ~/k8s_yaml/health]# vim nginx-rc-httpGet.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: readiness
spec:
  replicas: 2
  selector:
    app: readiness
  template:
    metadata:
      labels:
        app: readiness
    spec:
      containers:
      - name: readiness
        image: 10.0.0.5:5000/nginx:1.13
        ports:
        - containerPort: 80
        readinessProbe:
          httpGet:
            path: /qiangge.html
            port: 80
          initialDelaySeconds: 3
          periodSeconds: 3


# 2.創建資源
[root@k8s-master ~/k8s_yaml/health]# kubectl create -f nginx-rc-httpGet.yaml 


# 3.開放端口
[root@k8s-master ~/k8s_yaml/health]# kubectl expose replicationcontroller readiness --type=NodePort --port=80
service "readiness" exposed


# 4.查看資源 發現都沒准備好
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod
NAME              READY     STATUS             RESTARTS   AGE
readiness-0q9mz   0/1       Running            0          2m
readiness-3mc3f   0/1       Running            0          2m


# 5.進入容器 讓其中一個准備好
[root@k8s-master ~/k8s_yaml/health]# kubectl exec -it readiness-0q9mz /bin/bash
root@readiness-0q9mz:/# cd /usr/share/nginx/html/
root@readiness-0q9mz:/usr/share/nginx/html# echo hahaha >qiangge.html


# 6.查看資源 一個准備好了 並且ip也被加入了
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod
NAME              READY     STATUS    RESTARTS   AGE
readiness-0q9mz   1/1       Running   0          6m
readiness-3mc3f   0/1       Running   0          6m

[root@k8s-master ~/k8s_yaml/health]# kubectl describe svc readiness
Endpoints:		172.16.66.5:80

可用性檢查就是,你能用我就把你放進去,你不能用就算你跑起來了我也不會給你分流量。

4.dashboard服務

這個可以在網頁上直接管理整個k8s

# 1.上傳並導入鏡像,打標簽
[root@k8s-node-1 ~]# ll
-rw-r--r--  1 root root 86984704 Aug 15 09:36 kubernetes-dashboard-amd64_v1.4.1.tar.gz

[root@k8s-node-1 ~]# docker load -i kubernetes-dashboard-amd64_v1.4.1.tar.gz 
Loaded image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1
[root@k8s-node-1 ~]# docker tag index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1 10.0.0.5:5000/kubernetes-dashboard-amd64:v1.4.1


# 2.創建配置文件
[root@k8s-master ~/k8s_yaml]# mkdir dashboard
[root@k8s-master ~/k8s_yaml]# cd dashboard/
[root@k8s-master ~/k8s_yaml/dashboard]# ll
-rw-r--r-- 1 root root  274 Aug 15 09:31 dashboard-svc.yaml
-rw-r--r-- 1 root root 1004 Aug 15 09:31 dashboard.yaml


# 3.修改配置文件
[root@k8s-master ~/k8s_yaml/dashboard]# vim dashboard.yaml
spec:
      nodeSelector:
        kubernetes.io/hostname: 10.0.0.6
      containers:
注意下ip 什么的


# 4.創建資源
[root@k8s-master ~/k8s_yaml/dashboard]# kubectl create -f dashboard.yaml 


# 5.修改配置文件
[root@k8s-master ~/k8s_yaml/dashboard]# vim dashboard-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system [這個要和上面配置文件的名字一致]
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090


# 6.創建資源
[root@k8s-master ~/k8s_yaml/dashboard]# kubectl create -f dashboard-svc.yaml


# 7.網頁訪問10.0.0.5:8080/ui

5.通過apiservicer反向代理訪問service

第一種:NodePort類型 
type: NodePort
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30008

第二種:ClusterIP類型 [只要沒有指定 默認都是ClusterIP類型]
 type: ClusterIP
  ports:
    - port: 80
      targetPort: 80

二、k8s彈性伸縮

k8s彈性伸縮,需要附加插件heapster監控

1.安裝heapster監控

# 1.上傳並導入鏡像,打標簽
[root@k8s-node-1 ~]# ll
-rw-r--r--  1 root root 275096576 Jan  5 16:59 docker_heapster_grafana.tar.gz
-rw-r--r--  1 root root 260942336 Jan  5 16:59 docker_heapster_influxdb.tar.gz
-rw-r--r--  1 root root 991839232 Jan  5 16:59 docker_heapster.tar.gz
[root@k8s-node-1 ~]# 
for n in `ls *.tar.gz`;do docker load -i $n ;done 
docker tag docker.io/kubernetes/heapster_grafana:v2.6.0 10.0.0.5:5000/heapster_grafana:v2.6.0 
docker tag docker.io/kubernetes/heapster_influxdb:v0.5 10.0.0.5:5000/heapster_influxdb:v0.5 
docker tag docker.io/kubernetes/heapster:canary 10.0.0.5:5000/heapster:canary

    
# 2.創建配置文件
[root@k8s-master ~/k8s_yaml]# mkdir heapster
[root@k8s-master ~/k8s_yaml]# cd heapster/
[root@k8s-master ~/k8s_yaml/heapster]# ll
-rw-r--r-- 1 root root  414 Aug 15 09:31 grafana-service.yaml
-rw-r--r-- 1 root root  616 Aug 15 09:31 heapster-controller.yaml
-rw-r--r-- 1 root root  249 Aug 15 09:31 heapster-service.yaml
-rw-r--r-- 1 root root 1473 Aug 15 09:31 influxdb-grafana-controller.yaml
-rw-r--r-- 1 root root  259 Aug 15 09:31 influxdb-service.yaml
這些文件標簽都是相同的 
    selector:
    name: influxGrafana

        
# 3. 修改配置文件 注意ip地址什么的
[root@k8s-master ~/k8s_yaml/heapster]# vim heapster-controller.yaml
spec:
  nodename: 10.0.0.6 #這里多加一個固定ip 因為剛上傳鏡像在6上
  containers:
  - name: heapster
    image: 10.0.0.5:5000/heapster:canary
    imagePullPolicy: IfNotPresent #這里把always換成IfNotPresent
    command:

[root@k8s-master ~/k8s_yaml/heapster]# vim influxdb-grafana-controller.yaml    
spec:
  nodeName: 10.0.0.6 #這里加上固定ip
  containers:
  - name: influxdb  

    
# 4.創建當前目錄所有文件資源
[root@k8s-master ~/k8s_yaml/heapster]# kubectl create -f .

    
# 5.查看資源
[root@k8s-master ~/k8s_yaml/heapster]# kubectl get all -n kube-system
AME                       CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE
svc/heapster               10.254.36.31     <none>        80/TCP              1m
svc/kubernetes-dashboard   10.254.177.199   <none>        80/TCP              51m
svc/monitoring-grafana     10.254.197.174   <none>        80/TCP              1m
svc/monitoring-influxdb    10.254.187.46    <none>        8083/TCP,8086/TCP   1m

NAME                                              READY     STATUS    RESTARTS   AGE
po/heapster-jsvnq                                 1/1       Running   0          1m
po/influxdb-grafana-4jr7x                         2/2       Running   0          1m

heapster抓數據給influxdb,grafana出圖

查看網站

2.彈性伸縮

# 1.查看配置文件 直接用現有的創
[root@k8s-master ~/k8s_yaml/deploy]# vim k8s_deploy.yaml 
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: 10.0.0.5:5000/nginx:1.13
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 100m   #加上了限制
[root@k8s-master ~/k8s_yaml/deploy]# cat k8s_svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-deployment
spec:
  type: NodePort  #ClusterIP
  ports:
    - port: 80          #clusterIP
      nodePort: 30001   #node port
      targetPort: 80    #pod port
  selector:
    app: nginx


# 2.創建資源
[root@k8s-master ~/k8s_yaml/deploy]# kubectl create -f .


# 3.查看資源
[root@k8s-master ~/k8s_yaml/deploy]# kubectl get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           26s

NAME                   CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
svc/nginx-deployment   10.254.206.85    <nodes>       80:30001/TCP     26s

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2455254582-lv02t   1/1       Running   0          26s
po/nginx-deployment-2455254582-srkjl   1/1       Running   0          26s
po/nginx-deployment-2455254582-vddw7   1/1       Running   0          26s

# 1.創建彈性伸縮規則
kubectl autoscale deploy nginx-deployment --max=8 --min=1 --cpu-percent=8 [這個規則要按照生產環境設置]

# 2.測試
[root@k8s-node-2 ~]# yum install httpd-tools -y
[root@k8s-node-2 ~]# ab -n 1000000 -c 40 http://10.0.0.6:30001/index.html

# 3.出圖

擴了3個出來

結束測試

縮回至一個

三、持久化存儲

有了高可用,pod掛了會啟動新的pod,但是啟動新的pod就會把原來pod的數據搞丟。

1.數據持久化類型分類

1)emptyDir:

2)HostPath:

3)nfs

4)pv: persistent volume 【全局】

**5)pvc: persistent volume claim **【某個namespace】

2.HostPath類型

實現:

bug:

node節點轉移,持久化卷還在死掉的節點上,轉移不過來,還是丟數據

3.nfs類型

3.1.安裝nfs服務端

[root@k8s-master ~]# yum install nfs-utils -y
[root@k8s-node-1 ~]# yum install nfs-utils -y
[root@k8s-node-2 ~]# yum install nfs-utils -y

3.2.修改配置文件

# 1.掛載共享目錄
[root@k8s-master ~]# vim /etc/exports
/data  10.0.0.0/24(rw,async,no_root_squash,no_all_squash)

# 2.創建共享目錄
[root@k8s-master ~]# mkdir /data

# 3.重啟服務
[root@k8s-master ~]# systemctl start rpcbind
[root@k8s-master ~]# systemctl start nfs

# 4.客戶端檢查
[root@k8s-node-1 ~]# showmount -e 10.0.0.5
Export list for 10.0.0.5:
/data 10.0.0.0/24
[root@k8s-node-2 ~]# showmount -e 10.0.0.5
Export list for 10.0.0.5:
/data 10.0.0.0/24

# 5.修改配置文件
[root@k8s-master ~/k8s_yaml/tomcat_demo]# vim mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1 #啟一個就夠了 數據不會丟
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      nodeName: 10.0.0.6
      volumes:
      - name: mysql
        nfs:
          path: /data/mysql
          server: 10.0.0.5
      containers:
        - name: mysql
          image: 10.0.0.5:5000/mysql:5.7
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 3306
          volumeMounts:
            mountPath: /var/lib/mysql
            name: mysql

# 6.創建所需目錄
[root@k8s-master ~]# mkdir /data/mysql

# 7.創建資源
[root@k8s-master ~/k8s_yaml/tomcat_demo]# kubectl create -f .

4.pv與pvc

pv只管存儲數據 pvc去用

不用管語法怎么寫 只用告訴pvc我需要個pv

# 1.上傳所需文件
[root@k8s-master ~/k8s_yaml]# mkdir volume
[root@k8s-master ~/k8s_yaml]# cd volume/
[root@k8s-master ~/k8s_yaml/volume]# ll
-rw-r--r-- 1 root root 161 Aug 15 09:31 mysql_pvc.yaml
-rw-r--r-- 1 root root 291 Aug 15 09:31 mysql_pv.yaml


# 2.修改配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim mysql_pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs01
  labels:
    type: nfs01
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    path: "/data/tomcat-mysql"
    server: 10.0.0.5

[root@k8s-master ~/k8s_yaml/volume]# mkdir /data/tomcat-mysql

[root@k8s-master ~/k8s_yaml/volume]# vim mysql_pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nfs02
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi


# 3.創建pv和pvc
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f .


# 4.查看pv和pvc
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS    CLAIM   REASON    AGE
nfs01     10Gi       RWX           Recycle         Bound     default/nfs02     50s
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pvc
NAME      STATUS    VOLUME    CAPACITY   ACCESSMODES   AGE
nfs02     Bound     nfs01     10Gi       RWX           53s


# 5.修改之前用nfs存儲的配置文件 現在改成用pvc
[root@k8s-master ~/k8s_yaml/tomcat_demo]# cat mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1 #啟一個就夠了 數據不會丟
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      nodeName: 10.0.0.6
      volumes:
      - name: mysql
        persistentVolumeClaim: #這里改掉
          claimName: nfs02     #這里改掉
      containers:
        - name: mysql
          image: 10.0.0.5:5000/mysql:5.7
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 3306
          volumeMounts:
            mountPath: /var/lib/mysql
            name: mysql
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: '123456'

四、分布式存儲glusterfs

nfsbug:單點故障,無法擴容

glusterfs:多個節點,可擴容 (生產環境常用,3個節點起步)

1.什么是glusterfs

Glusterfs是一個開源分布式文件系統,具有強大的橫向擴展能力,可支持數PB存儲容量和數千客戶端,通過網絡互聯成一個並行的網絡文件系統。

具有可擴展性、高性能、高可用性等特點。

2.安裝glusterfs

# 所有節點
yum install  centos-release-gluster -y
yum install  install glusterfs-server -y
systemctl start glusterd.service
systemctl enable glusterd.service
mkdir -p /gfs/test1 [生產上是需要兩塊新的硬盤]
mkdir -p /gfs/test2 [然后掛載]

3.添加存儲資源池

# 查看資源池 只有自己
[root@k8s-master ~]# gluster pool list
UUID					Hostname 	State
ee1a782b-a50d-4e3e-9ab7-e8107216cd37	localhost	Connected 

# 添加資源池 是同步的 其他節點也能看到
[root@k8s-master ~]# gluster peer probe 10.0.0.6
peer probe: success. 
[root@k8s-master ~]# gluster peer probe 10.0.0.7
peer probe: success. 
[root@k8s-master ~]# gluster pool list
UUID					Hostname 	State
4e061dac-ea41-472e-ba96-e9f0d709717e	10.0.0.6 	Connected 
1512b9ce-93fb-433e-a0ae-219929cfce4c	10.0.0.7 	Connected 
ee1a782b-a50d-4e3e-9ab7-e8107216cd37	localhost	

4.glusterfs卷管理

為什么創建復制卷?

因為gluster在寫入數據時,會將數據分開寫入到不同節點,速度非常快,但是節點掛了數據就會丟,所以需要復制卷。

一般企業里是創建3個復制卷 這里測試就創建2個

# 創建分布式復制卷
[root@k8s-master ~]# gluster volume create msy replica 2 k8s-master:/gfs/test1 k8s-master:/gfs/test2 k8s-node-1:/gfs/test1 k8s-node-1:/gfs/test2 force

# 啟動卷
[root@k8s-master ~]# gluster volume start msy

# 查看卷
[root@k8s-master ~]# gluster volume info msy

# 掛載卷
[root@msy ~]# mount -t glusterfs 10.0.0.5:/msy /mnt [用了個新服務器測試掛載]

# 測試
[root@msy ~]# cp /etc/* /mnt/
注意權限 注意hosts(解析各個節點與ip)

# 查看/mnt數據
[root@msy ~]# ls -1 /mnt/|wc -l
150

# 查看存儲目錄

5.分布式復制卷擴容

# 擴容前查看容量
[root@msy ~]# df  -h
48G

# 擴容
[root@k8s-master ~]# gluster volume add-brick msy k8s-node-2:/gfs/test1 k8s-node-2:/gfs/test2 force

# 擴容后查看容量
[root@msy ~]# df  -h
72G

6.k8s如何使用glusterfs作為后端存儲

6.1.創建endpoints

# 1.查看glusterfs服務端口
[root@k8s-node-1 ~]# netstat -lntp   
tcp        0      0 0.0.0.0:49152           glusterfsd     
tcp        0      0 0.0.0.0:49153           glusterfsd     


# 2.創建配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim glusterfs-ep.yaml
apiVersion: v1
kind: Endpoints
metadata:
  name: glusterfs
  namespace: default #給誰用 一定要指定好
subsets:
- addresses: #glusterfs服務器ip
  - ip: 10.0.0.5 
  - ip: 10.0.0.6
  - ip: 10.0.0.7
  ports:
  - port: 49152 #服務端口
    protocol: TCP


# 3.創建endpoints資源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f glusterfs-ep.yaml


# 4.查看endpoints資源
為什么會有這么多?是因為你每創建一個svc 就會自動創建一個endpoints
[root@k8s-master ~/k8s_yaml/volume]# kubectl get endpoints 
NAME               ENDPOINTS                                      AGE
glusterfs          10.0.0.5:49152,10.0.0.6:49152,10.0.0.7:49152   14s
kubernetes         10.0.0.5:6443                                  17h
mysql              <none>                                         1h
myweb              172.16.66.5:8080                               1h
nginx-deployment   172.16.65.5:80,172.16.66.2:80,172.16.66.4:80   2h
readiness          <none>                                         5h




6.2.創建service

# 1.創建service配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim glusterfs-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: glusterfs
  namespace: default
spec:
  ports:
  - port: 49152
    protocol: TCP
    targetPort: 49152
  sessionAffinity: None
  type: ClusterIP


# 2.創建svc資源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f glusterfs-svc.yaml


# 3.查看svc資源
[root@k8s-master ~/k8s_yaml/volume]# kubectl get svc 
NAME               CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
glusterfs          10.254.241.22    <none>        49152/TCP        49s
kubernetes         10.254.0.1       <none>        443/TCP          17h
mysql              10.254.210.163   <none>        3306/TCP         1h
myweb              10.254.244.167   <nodes>       8080:30008/TCP   1h
nginx-deployment   10.254.206.85    <nodes>       80:30001/TCP     2h
readiness          10.254.157.186   <nodes>       80:30689/TCP     5h

6.3.自動關聯

svc和endpoints是通過名字自動關聯的,只要名字一致,又屬於同一個namespace,就會自動關聯。

[root@k8s-master ~/k8s_yaml/volume]# kubectl describe svc glusterfs 
Name:			glusterfs
Namespace:		default
Labels:			<none>
Selector:		<none>
Type:			ClusterIP
IP:			10.254.241.22
Port:			<unset>	49152/TCP
Endpoints:		10.0.0.5:49152,10.0.0.6:49152,10.0.0.7:49152 [關聯上了]
Session Affinity:	None
No events.

6.4. 創建gluster類型pv

# 查看卷
[root@k8s-node-1 ~]# gluster volume list
msy

# 創建gluster類型pv配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim gluster-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: gluster
  labels:
    type: glusterfs
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: "glusterfs"
    path: "msy"
    readOnly: false

# 創建資源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f gluster-pv.yaml

6.5.創建gluster類型pvc

# 創建配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim gluster-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: glusterfs02
  namespace: default
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi


# 創建pvc資源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f gluster-pvc.yaml
persistentvolumeclaim "glusterfs02" created


# 查看pvc資源
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pvc
NAME          STATUS    VOLUME    CAPACITY   ACCESSMODES   AGE
glusterfs02   Bound     gluster   50Gi       RWX           8s


# 查看pv資源 綁定上了
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pv
NAME    CAPACITY  ACCESSMODES  RECLAIMPOLICY  STATUS   CLAIM               REASON    AGE
gluster 50Gi      RWX          Retain         Bound     default/glusterfs02          5m

# 創建配置文件
[root@k8s-master ~/k8s_yaml/tomcat_demo]# cat mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1 #啟一個就夠了 數據不會丟
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      nodeName: 10.0.0.6
      volumes:
      - name: mysql
        persistentVolumeClaim: #這里改掉
          claimName: glusterfs     #這里改掉
      containers:
        - name: mysql
          image: 10.0.0.5:5000/mysql:5.7
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 3306
          volumeMounts:
            mountPath: /var/lib/mysql
            name: mysql
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: '123456'

# 創建資源
[root@k8s-master ~/k8s_yaml/tomcat_demo]# kubectl create -f mysql-rc.yml


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM