k8s扩展服务与深入功能


k8s扩展服务与深入功能

一、k8s的附加组件

1.dns服务

1.1.为什么需要dns服务?

之前交互时,rc配置文件会写svc的ip地址,但是随着节点变化,ip地址会变。如果配置dns服务,配置文件里直接写svc的名字而不写变化的ip地址,用名字解析为ip地址,直接写死,不管他变不变都能连接上。

这个过程需要依赖dns服务。

1.2.安装dns服务

# 1.下载镜像包 导入私有仓库
[root@k8s-node-1 ~]# ll
docker_k8s_dns.tar.gz

# 2.导入仓库 这个包是我打包的四个镜像

# 3.上传创建文件
[root@k8s-master ~/k8s_yaml/dns]# ll
-rw-r--r-- 1 root root 5683 Aug 15 09:32 skydns-deploy.yaml
-rw-r--r-- 1 root root 1135 Aug 15 09:31 skydns-svc.yaml

# 4.修改deploy配置文件
[root@k8s-master ~/k8s_yaml/dns]# vim skydns-deploy.yaml
spec:
  nodeSelector:
    kubernetes.io/hostname: 10.0.0.6
  containers:   
注意下ip什么的

# 5.创建资源
[root@k8s-master ~/k8s_yaml/dns]# kubectl create -f skydns-deploy.yaml
# 6.修改svc配置文件
[root@k8s-master ~/k8s_yaml/dns]# vim skydns-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.230.254  [主要是这里]
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

# 7.创建资源
[root@k8s-master ~/k8s_yaml/dns]# kubectl create -f skydns-svc.yaml
# 8.检查
[root@k8s-master ~/k8s_yaml/dns]# kubectl get all --namespace=kube-system [可以 -n]
# 9.修改所有node节点kubelet的配置文件
最后一行默认参数删掉 加上一条
vim /etc/kubernetes/kubelet
KUBELET_ARGS="--cluster_dns=10.254.230.254 --cluster_domain=cluster.local"

# 10.重启
systemctl restart kubelet

2.namespace命令空间

2.1.namespace作用

实现资源隔离

一个业务一个namespace

分类好 速度快

可以用业务名来命名namespace

2.2.创建namespace

# 1.创建namespace
[root@k8s-master ~]# kubectl create namespace wordpress
namespace "wordpress" created

# 2.查看namespace
[root@k8s-master ~]# kubectl get namespace
NAME          STATUS    AGE
default       Active    11h
kube-system   Active    11h
wordpress     Active    10s

2.3.在namespace里跑资源

# 1.在wordpress里跑nginx
[root@k8s-master ~]# kubectl run -n wordpress nginx --image=10.0.0.5:5000/nginx:1.13


# 2.查看资源
[root@k8s-master ~]# kubectl get all -n wordpress 
NAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx   1         1         1            1           1m

NAME                  DESIRED   CURRENT   READY     AGE
rs/nginx-1777049147   1         1         1         1m

NAME                        READY     STATUS    RESTARTS   AGE
po/nginx-1777049147-4sgbv   1/1       Running   0          1m


# 3.修改配置文件
[root@k8s-master ~]# kubectl edit -n wordpress deployment nginx
namespace: wordpress [配置文件里有一个这个 如果你不指定 这里写的就是default]

2.4.开放端口

# 1.创建service
[root@k8s-master ~/k8s_yaml/svc]# cp k8s_svc.yaml wordpress.svc.yaml

# 2.查看rs标签 发现标签是run:nginx
[root@k8s-master ~/k8s_yaml/svc]# kubectl get all --all-namespaces -o wide

# 3.修改配置文件
[root@k8s-master ~/k8s_yaml/svc]# vim wordpress.svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myweb
  namespace: wordpress [这里是名]
spec:
  type: NodePort  #ClusterIP
  ports:
    - port: 80          #clusterIP
      nodePort: 30005   #node port
      targetPort: 80    #pod port
  selector:
    run: nginx [这里是标签]

# 4.创建资源
[root@k8s-master ~/k8s_yaml/svc]# kubectl create -f wordpress.svc.yaml 

# 5.查看
[root@k8s-master ~/k8s_yaml/svc]# kubectl get all -n wordpress 
NAME           DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx   1         1         1            1           12m

NAME        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
svc/myweb   10.254.139.67   <nodes>       80:30005/TCP   2m

NAME                  DESIRED   CURRENT   READY     AGE
rs/nginx-1777049147   1         1         1         12m

NAME                        READY     STATUS    RESTARTS   AGE
po/nginx-1777049147-4sgbv   1/1       Running   0          12m

3.健康检查

3.1. 探针的种类

livenessProbe:健康状态检查,周期性检查服务是否存活,检查结果失败,将重启容器

readinessProbe:可用性检查,周期性检查服务是否可用,不可用将从service的endpoints中移除

3.2.探针的检测方法

  • exec:执行一段命令
  • httpGet:检测某个 http 请求的返回状态码
  • tcpSocket:测试某个端口是否能够连接

3.3.liveness探针的exec使用

# 1.创建配置文件
[root@k8s-master ~/k8s_yaml]# mkdir healht
[root@k8s-master ~/k8s_yaml]# cd healht/
[root@k8s-master ~/k8s_yaml/healht]# vim exec_pod.yaml
apiVersion: v1
kind: Pod
metadata:
  name: exec
spec:
  containers:
    - name: nginx
      image: 10.0.0.5:5000/nginx:1.13
      ports:
        - containerPort: 80
      args: #指定初始化命令
        - /bin/sh
        - -c
        - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
      livenessProbe:
        exec:
          command:
            - cat
            - /tmp/healthy
        initialDelaySeconds: 5
        periodSeconds: 5

# 2.创建资源
[root@k8s-master ~/k8s_yaml/healht]# kubectl create -f exec_pod.yaml 

# 3.查看资源 
[root@k8s-master ~/k8s_yaml/healht]# kubectl get pod
NAME          READY     STATUS    RESTARTS   AGE
exec          1/1       Running   0          32s

# 4.查看30s过后
[root@k8s-master ~/k8s_yaml/healht]# kubectl describe pod exec
...
Unhealthy

# 5.再过一会 自动创建了
[root@k8s-master ~/k8s_yaml/healht]# kubectl describe pod exec
...
Started

# 6.查看资源 重启状态变1
[root@k8s-master ~/k8s_yaml/healht]# kubectl get pod
NAME          READY     STATUS    RESTARTS   AGE
exec          1/1       Running   1          2m

3.4.liveness探针的httpGet使用

# 1.创建配置文件
[root@k8s-master ~/k8s_yaml/health]# vim pod_httpGet.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: httpget
spec:
  containers:
    - name: nginx
      image: 10.0.0.5:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        httpGet:
          path: /index.html
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3

# 2.创建资源
[root@k8s-master ~/k8s_yaml/health]# kubectl create -f pod_httpGet.yaml 

# 3.查看资源
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod httpget 
NAME      READY     STATUS    RESTARTS   AGE
httpget   1/1       Running   0          1m

# 4.进容器修改首页
[root@k8s-master ~/k8s_yaml/health]# kubectl exec -it httpget /bin/bash
root@httpget:/# 
root@httpget:/# cd /usr/share/nginx/html/
root@httpget:/usr/share/nginx/html# rm -rf index.html 

# 5.查看状态 秒启
[root@k8s-master ~/k8s_yaml/health]# kubectl describe pod httpget
Unhealthy
Started

# 6.查看资源 重启1次
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod httpget 
NAME      READY     STATUS    RESTARTS   AGE
httpget   1/1       Running   1          4m

3.5.liveness探针的tcpSocket使用

和上一个大致一样 不做过多测试

[root@k8s-master ~/k8s_yaml/health]# vim pod_tcpSocket.yaml
apiVersion: v1
kind: Pod
metadata:
  name: tcpSocket
spec:
  containers:
    - name: nginx
      image: 10.0.0.5:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        tcpSocket:
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3

3.6.readiness探针的httpGet使用

# 1.创建配置文件
[root@k8s-master ~/k8s_yaml/health]# vim nginx-rc-httpGet.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: readiness
spec:
  replicas: 2
  selector:
    app: readiness
  template:
    metadata:
      labels:
        app: readiness
    spec:
      containers:
      - name: readiness
        image: 10.0.0.5:5000/nginx:1.13
        ports:
        - containerPort: 80
        readinessProbe:
          httpGet:
            path: /qiangge.html
            port: 80
          initialDelaySeconds: 3
          periodSeconds: 3


# 2.创建资源
[root@k8s-master ~/k8s_yaml/health]# kubectl create -f nginx-rc-httpGet.yaml 


# 3.开放端口
[root@k8s-master ~/k8s_yaml/health]# kubectl expose replicationcontroller readiness --type=NodePort --port=80
service "readiness" exposed


# 4.查看资源 发现都没准备好
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod
NAME              READY     STATUS             RESTARTS   AGE
readiness-0q9mz   0/1       Running            0          2m
readiness-3mc3f   0/1       Running            0          2m


# 5.进入容器 让其中一个准备好
[root@k8s-master ~/k8s_yaml/health]# kubectl exec -it readiness-0q9mz /bin/bash
root@readiness-0q9mz:/# cd /usr/share/nginx/html/
root@readiness-0q9mz:/usr/share/nginx/html# echo hahaha >qiangge.html


# 6.查看资源 一个准备好了 并且ip也被加入了
[root@k8s-master ~/k8s_yaml/health]# kubectl get pod
NAME              READY     STATUS    RESTARTS   AGE
readiness-0q9mz   1/1       Running   0          6m
readiness-3mc3f   0/1       Running   0          6m

[root@k8s-master ~/k8s_yaml/health]# kubectl describe svc readiness
Endpoints:		172.16.66.5:80

可用性检查就是,你能用我就把你放进去,你不能用就算你跑起来了我也不会给你分流量。

4.dashboard服务

这个可以在网页上直接管理整个k8s

# 1.上传并导入镜像,打标签
[root@k8s-node-1 ~]# ll
-rw-r--r--  1 root root 86984704 Aug 15 09:36 kubernetes-dashboard-amd64_v1.4.1.tar.gz

[root@k8s-node-1 ~]# docker load -i kubernetes-dashboard-amd64_v1.4.1.tar.gz 
Loaded image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1
[root@k8s-node-1 ~]# docker tag index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1 10.0.0.5:5000/kubernetes-dashboard-amd64:v1.4.1


# 2.创建配置文件
[root@k8s-master ~/k8s_yaml]# mkdir dashboard
[root@k8s-master ~/k8s_yaml]# cd dashboard/
[root@k8s-master ~/k8s_yaml/dashboard]# ll
-rw-r--r-- 1 root root  274 Aug 15 09:31 dashboard-svc.yaml
-rw-r--r-- 1 root root 1004 Aug 15 09:31 dashboard.yaml


# 3.修改配置文件
[root@k8s-master ~/k8s_yaml/dashboard]# vim dashboard.yaml
spec:
      nodeSelector:
        kubernetes.io/hostname: 10.0.0.6
      containers:
注意下ip 什么的


# 4.创建资源
[root@k8s-master ~/k8s_yaml/dashboard]# kubectl create -f dashboard.yaml 


# 5.修改配置文件
[root@k8s-master ~/k8s_yaml/dashboard]# vim dashboard-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system [这个要和上面配置文件的名字一致]
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090


# 6.创建资源
[root@k8s-master ~/k8s_yaml/dashboard]# kubectl create -f dashboard-svc.yaml


# 7.网页访问10.0.0.5:8080/ui

5.通过apiservicer反向代理访问service

第一种:NodePort类型 
type: NodePort
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30008

第二种:ClusterIP类型 [只要没有指定 默认都是ClusterIP类型]
 type: ClusterIP
  ports:
    - port: 80
      targetPort: 80

二、k8s弹性伸缩

k8s弹性伸缩,需要附加插件heapster监控

1.安装heapster监控

# 1.上传并导入镜像,打标签
[root@k8s-node-1 ~]# ll
-rw-r--r--  1 root root 275096576 Jan  5 16:59 docker_heapster_grafana.tar.gz
-rw-r--r--  1 root root 260942336 Jan  5 16:59 docker_heapster_influxdb.tar.gz
-rw-r--r--  1 root root 991839232 Jan  5 16:59 docker_heapster.tar.gz
[root@k8s-node-1 ~]# 
for n in `ls *.tar.gz`;do docker load -i $n ;done 
docker tag docker.io/kubernetes/heapster_grafana:v2.6.0 10.0.0.5:5000/heapster_grafana:v2.6.0 
docker tag docker.io/kubernetes/heapster_influxdb:v0.5 10.0.0.5:5000/heapster_influxdb:v0.5 
docker tag docker.io/kubernetes/heapster:canary 10.0.0.5:5000/heapster:canary

    
# 2.创建配置文件
[root@k8s-master ~/k8s_yaml]# mkdir heapster
[root@k8s-master ~/k8s_yaml]# cd heapster/
[root@k8s-master ~/k8s_yaml/heapster]# ll
-rw-r--r-- 1 root root  414 Aug 15 09:31 grafana-service.yaml
-rw-r--r-- 1 root root  616 Aug 15 09:31 heapster-controller.yaml
-rw-r--r-- 1 root root  249 Aug 15 09:31 heapster-service.yaml
-rw-r--r-- 1 root root 1473 Aug 15 09:31 influxdb-grafana-controller.yaml
-rw-r--r-- 1 root root  259 Aug 15 09:31 influxdb-service.yaml
这些文件标签都是相同的 
    selector:
    name: influxGrafana

        
# 3. 修改配置文件 注意ip地址什么的
[root@k8s-master ~/k8s_yaml/heapster]# vim heapster-controller.yaml
spec:
  nodename: 10.0.0.6 #这里多加一个固定ip 因为刚上传镜像在6上
  containers:
  - name: heapster
    image: 10.0.0.5:5000/heapster:canary
    imagePullPolicy: IfNotPresent #这里把always换成IfNotPresent
    command:

[root@k8s-master ~/k8s_yaml/heapster]# vim influxdb-grafana-controller.yaml    
spec:
  nodeName: 10.0.0.6 #这里加上固定ip
  containers:
  - name: influxdb  

    
# 4.创建当前目录所有文件资源
[root@k8s-master ~/k8s_yaml/heapster]# kubectl create -f .

    
# 5.查看资源
[root@k8s-master ~/k8s_yaml/heapster]# kubectl get all -n kube-system
AME                       CLUSTER-IP       EXTERNAL-IP   PORT(S)             AGE
svc/heapster               10.254.36.31     <none>        80/TCP              1m
svc/kubernetes-dashboard   10.254.177.199   <none>        80/TCP              51m
svc/monitoring-grafana     10.254.197.174   <none>        80/TCP              1m
svc/monitoring-influxdb    10.254.187.46    <none>        8083/TCP,8086/TCP   1m

NAME                                              READY     STATUS    RESTARTS   AGE
po/heapster-jsvnq                                 1/1       Running   0          1m
po/influxdb-grafana-4jr7x                         2/2       Running   0          1m

heapster抓数据给influxdb,grafana出图

查看网站

2.弹性伸缩

# 1.查看配置文件 直接用现有的创
[root@k8s-master ~/k8s_yaml/deploy]# vim k8s_deploy.yaml 
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: 10.0.0.5:5000/nginx:1.13
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 100m   #加上了限制
[root@k8s-master ~/k8s_yaml/deploy]# cat k8s_svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: nginx-deployment
spec:
  type: NodePort  #ClusterIP
  ports:
    - port: 80          #clusterIP
      nodePort: 30001   #node port
      targetPort: 80    #pod port
  selector:
    app: nginx


# 2.创建资源
[root@k8s-master ~/k8s_yaml/deploy]# kubectl create -f .


# 3.查看资源
[root@k8s-master ~/k8s_yaml/deploy]# kubectl get all
NAME                      DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/nginx-deployment   3         3         3            3           26s

NAME                   CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
svc/nginx-deployment   10.254.206.85    <nodes>       80:30001/TCP     26s

NAME                                   READY     STATUS    RESTARTS   AGE
po/nginx-deployment-2455254582-lv02t   1/1       Running   0          26s
po/nginx-deployment-2455254582-srkjl   1/1       Running   0          26s
po/nginx-deployment-2455254582-vddw7   1/1       Running   0          26s

# 1.创建弹性伸缩规则
kubectl autoscale deploy nginx-deployment --max=8 --min=1 --cpu-percent=8 [这个规则要按照生产环境设置]

# 2.测试
[root@k8s-node-2 ~]# yum install httpd-tools -y
[root@k8s-node-2 ~]# ab -n 1000000 -c 40 http://10.0.0.6:30001/index.html

# 3.出图

扩了3个出来

结束测试

缩回至一个

三、持久化存储

有了高可用,pod挂了会启动新的pod,但是启动新的pod就会把原来pod的数据搞丢。

1.数据持久化类型分类

1)emptyDir:

2)HostPath:

3)nfs

4)pv: persistent volume 【全局】

**5)pvc: persistent volume claim **【某个namespace】

2.HostPath类型

实现:

bug:

node节点转移,持久化卷还在死掉的节点上,转移不过来,还是丢数据

3.nfs类型

3.1.安装nfs服务端

[root@k8s-master ~]# yum install nfs-utils -y
[root@k8s-node-1 ~]# yum install nfs-utils -y
[root@k8s-node-2 ~]# yum install nfs-utils -y

3.2.修改配置文件

# 1.挂载共享目录
[root@k8s-master ~]# vim /etc/exports
/data  10.0.0.0/24(rw,async,no_root_squash,no_all_squash)

# 2.创建共享目录
[root@k8s-master ~]# mkdir /data

# 3.重启服务
[root@k8s-master ~]# systemctl start rpcbind
[root@k8s-master ~]# systemctl start nfs

# 4.客户端检查
[root@k8s-node-1 ~]# showmount -e 10.0.0.5
Export list for 10.0.0.5:
/data 10.0.0.0/24
[root@k8s-node-2 ~]# showmount -e 10.0.0.5
Export list for 10.0.0.5:
/data 10.0.0.0/24

# 5.修改配置文件
[root@k8s-master ~/k8s_yaml/tomcat_demo]# vim mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1 #启一个就够了 数据不会丢
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      nodeName: 10.0.0.6
      volumes:
      - name: mysql
        nfs:
          path: /data/mysql
          server: 10.0.0.5
      containers:
        - name: mysql
          image: 10.0.0.5:5000/mysql:5.7
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 3306
          volumeMounts:
            mountPath: /var/lib/mysql
            name: mysql

# 6.创建所需目录
[root@k8s-master ~]# mkdir /data/mysql

# 7.创建资源
[root@k8s-master ~/k8s_yaml/tomcat_demo]# kubectl create -f .

4.pv与pvc

pv只管存储数据 pvc去用

不用管语法怎么写 只用告诉pvc我需要个pv

# 1.上传所需文件
[root@k8s-master ~/k8s_yaml]# mkdir volume
[root@k8s-master ~/k8s_yaml]# cd volume/
[root@k8s-master ~/k8s_yaml/volume]# ll
-rw-r--r-- 1 root root 161 Aug 15 09:31 mysql_pvc.yaml
-rw-r--r-- 1 root root 291 Aug 15 09:31 mysql_pv.yaml


# 2.修改配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim mysql_pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs01
  labels:
    type: nfs01
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Recycle
  nfs:
    path: "/data/tomcat-mysql"
    server: 10.0.0.5

[root@k8s-master ~/k8s_yaml/volume]# mkdir /data/tomcat-mysql

[root@k8s-master ~/k8s_yaml/volume]# vim mysql_pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nfs02
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi


# 3.创建pv和pvc
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f .


# 4.查看pv和pvc
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pv
NAME      CAPACITY   ACCESSMODES   RECLAIMPOLICY   STATUS    CLAIM   REASON    AGE
nfs01     10Gi       RWX           Recycle         Bound     default/nfs02     50s
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pvc
NAME      STATUS    VOLUME    CAPACITY   ACCESSMODES   AGE
nfs02     Bound     nfs01     10Gi       RWX           53s


# 5.修改之前用nfs存储的配置文件 现在改成用pvc
[root@k8s-master ~/k8s_yaml/tomcat_demo]# cat mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1 #启一个就够了 数据不会丢
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      nodeName: 10.0.0.6
      volumes:
      - name: mysql
        persistentVolumeClaim: #这里改掉
          claimName: nfs02     #这里改掉
      containers:
        - name: mysql
          image: 10.0.0.5:5000/mysql:5.7
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 3306
          volumeMounts:
            mountPath: /var/lib/mysql
            name: mysql
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: '123456'

四、分布式存储glusterfs

nfsbug:单点故障,无法扩容

glusterfs:多个节点,可扩容 (生产环境常用,3个节点起步)

1.什么是glusterfs

Glusterfs是一个开源分布式文件系统,具有强大的横向扩展能力,可支持数PB存储容量和数千客户端,通过网络互联成一个并行的网络文件系统。

具有可扩展性、高性能、高可用性等特点。

2.安装glusterfs

# 所有节点
yum install  centos-release-gluster -y
yum install  install glusterfs-server -y
systemctl start glusterd.service
systemctl enable glusterd.service
mkdir -p /gfs/test1 [生产上是需要两块新的硬盘]
mkdir -p /gfs/test2 [然后挂载]

3.添加存储资源池

# 查看资源池 只有自己
[root@k8s-master ~]# gluster pool list
UUID					Hostname 	State
ee1a782b-a50d-4e3e-9ab7-e8107216cd37	localhost	Connected 

# 添加资源池 是同步的 其他节点也能看到
[root@k8s-master ~]# gluster peer probe 10.0.0.6
peer probe: success. 
[root@k8s-master ~]# gluster peer probe 10.0.0.7
peer probe: success. 
[root@k8s-master ~]# gluster pool list
UUID					Hostname 	State
4e061dac-ea41-472e-ba96-e9f0d709717e	10.0.0.6 	Connected 
1512b9ce-93fb-433e-a0ae-219929cfce4c	10.0.0.7 	Connected 
ee1a782b-a50d-4e3e-9ab7-e8107216cd37	localhost	

4.glusterfs卷管理

为什么创建复制卷?

因为gluster在写入数据时,会将数据分开写入到不同节点,速度非常快,但是节点挂了数据就会丢,所以需要复制卷。

一般企业里是创建3个复制卷 这里测试就创建2个

# 创建分布式复制卷
[root@k8s-master ~]# gluster volume create msy replica 2 k8s-master:/gfs/test1 k8s-master:/gfs/test2 k8s-node-1:/gfs/test1 k8s-node-1:/gfs/test2 force

# 启动卷
[root@k8s-master ~]# gluster volume start msy

# 查看卷
[root@k8s-master ~]# gluster volume info msy

# 挂载卷
[root@msy ~]# mount -t glusterfs 10.0.0.5:/msy /mnt [用了个新服务器测试挂载]

# 测试
[root@msy ~]# cp /etc/* /mnt/
注意权限 注意hosts(解析各个节点与ip)

# 查看/mnt数据
[root@msy ~]# ls -1 /mnt/|wc -l
150

# 查看存储目录

5.分布式复制卷扩容

# 扩容前查看容量
[root@msy ~]# df  -h
48G

# 扩容
[root@k8s-master ~]# gluster volume add-brick msy k8s-node-2:/gfs/test1 k8s-node-2:/gfs/test2 force

# 扩容后查看容量
[root@msy ~]# df  -h
72G

6.k8s如何使用glusterfs作为后端存储

6.1.创建endpoints

# 1.查看glusterfs服务端口
[root@k8s-node-1 ~]# netstat -lntp   
tcp        0      0 0.0.0.0:49152           glusterfsd     
tcp        0      0 0.0.0.0:49153           glusterfsd     


# 2.创建配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim glusterfs-ep.yaml
apiVersion: v1
kind: Endpoints
metadata:
  name: glusterfs
  namespace: default #给谁用 一定要指定好
subsets:
- addresses: #glusterfs服务器ip
  - ip: 10.0.0.5 
  - ip: 10.0.0.6
  - ip: 10.0.0.7
  ports:
  - port: 49152 #服务端口
    protocol: TCP


# 3.创建endpoints资源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f glusterfs-ep.yaml


# 4.查看endpoints资源
为什么会有这么多?是因为你每创建一个svc 就会自动创建一个endpoints
[root@k8s-master ~/k8s_yaml/volume]# kubectl get endpoints 
NAME               ENDPOINTS                                      AGE
glusterfs          10.0.0.5:49152,10.0.0.6:49152,10.0.0.7:49152   14s
kubernetes         10.0.0.5:6443                                  17h
mysql              <none>                                         1h
myweb              172.16.66.5:8080                               1h
nginx-deployment   172.16.65.5:80,172.16.66.2:80,172.16.66.4:80   2h
readiness          <none>                                         5h




6.2.创建service

# 1.创建service配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim glusterfs-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: glusterfs
  namespace: default
spec:
  ports:
  - port: 49152
    protocol: TCP
    targetPort: 49152
  sessionAffinity: None
  type: ClusterIP


# 2.创建svc资源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f glusterfs-svc.yaml


# 3.查看svc资源
[root@k8s-master ~/k8s_yaml/volume]# kubectl get svc 
NAME               CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
glusterfs          10.254.241.22    <none>        49152/TCP        49s
kubernetes         10.254.0.1       <none>        443/TCP          17h
mysql              10.254.210.163   <none>        3306/TCP         1h
myweb              10.254.244.167   <nodes>       8080:30008/TCP   1h
nginx-deployment   10.254.206.85    <nodes>       80:30001/TCP     2h
readiness          10.254.157.186   <nodes>       80:30689/TCP     5h

6.3.自动关联

svc和endpoints是通过名字自动关联的,只要名字一致,又属于同一个namespace,就会自动关联。

[root@k8s-master ~/k8s_yaml/volume]# kubectl describe svc glusterfs 
Name:			glusterfs
Namespace:		default
Labels:			<none>
Selector:		<none>
Type:			ClusterIP
IP:			10.254.241.22
Port:			<unset>	49152/TCP
Endpoints:		10.0.0.5:49152,10.0.0.6:49152,10.0.0.7:49152 [关联上了]
Session Affinity:	None
No events.

6.4. 创建gluster类型pv

# 查看卷
[root@k8s-node-1 ~]# gluster volume list
msy

# 创建gluster类型pv配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim gluster-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: gluster
  labels:
    type: glusterfs
spec:
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: "glusterfs"
    path: "msy"
    readOnly: false

# 创建资源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f gluster-pv.yaml

6.5.创建gluster类型pvc

# 创建配置文件
[root@k8s-master ~/k8s_yaml/volume]# vim gluster-pvc.yaml 
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: glusterfs02
  namespace: default
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Gi


# 创建pvc资源
[root@k8s-master ~/k8s_yaml/volume]# kubectl create -f gluster-pvc.yaml
persistentvolumeclaim "glusterfs02" created


# 查看pvc资源
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pvc
NAME          STATUS    VOLUME    CAPACITY   ACCESSMODES   AGE
glusterfs02   Bound     gluster   50Gi       RWX           8s


# 查看pv资源 绑定上了
[root@k8s-master ~/k8s_yaml/volume]# kubectl get pv
NAME    CAPACITY  ACCESSMODES  RECLAIMPOLICY  STATUS   CLAIM               REASON    AGE
gluster 50Gi      RWX          Retain         Bound     default/glusterfs02          5m

# 创建配置文件
[root@k8s-master ~/k8s_yaml/tomcat_demo]# cat mysql-rc.yml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1 #启一个就够了 数据不会丢
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      nodeName: 10.0.0.6
      volumes:
      - name: mysql
        persistentVolumeClaim: #这里改掉
          claimName: glusterfs     #这里改掉
      containers:
        - name: mysql
          image: 10.0.0.5:5000/mysql:5.7
          imagePullPolicy: IfNotPresent
          ports:
          - containerPort: 3306
          volumeMounts:
            mountPath: /var/lib/mysql
            name: mysql
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: '123456'

# 创建资源
[root@k8s-master ~/k8s_yaml/tomcat_demo]# kubectl create -f mysql-rc.yml


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM