mkdir –p /root/redis-cluster
cd /root/redis-cluster
a.安裝nfs-共享存儲
centos系統中使用yum 安裝
yum -y install nfs-utils rpcbind
mkdir -p /usr/local/kubernetes/redis/{pv1,pv2,pv3,pv4,pv5,pv6}
vim /etc/exports
/usr/local/kubernetes/redis/pv1 *(rw,no_root_squash,no_all_squash,sync)
/usr/local/kubernetes/redis/pv2 *(rw,no_root_squash,no_all_squash,sync)
/usr/local/kubernetes/redis/pv3 *(rw,no_root_squash,no_all_squash,sync)
/usr/local/kubernetes/redis/pv4 *(rw,no_root_squash,no_all_squash,sync)
/usr/local/kubernetes/redis/pv5 *(rw,no_root_squash,no_all_squash,sync)
/usr/local/kubernetes/redis/pv6 *(rw,no_root_squash,no_all_squash,sync)
啟動服務nfs rpcbind 服務
systemctl enable nfs
systemctl enable rpcbind
systemctl start nfs
systemctl start rpcbind
所有節點安裝nfs客戶端
安裝rpcbind、nfs-utils軟件包並啟動服務
b.創建PV,創建6個供pvc掛載使用
vim pvxin.yam
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv1
spec:
capacity:
storage: 2000M #磁盤大小2000M
accessModes:
- ReadWriteMany #多客戶可讀寫
nfs:
server: 192.168.1.32
path: "/usr/local/kubernetes/redis/pv1"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv2
spec:
capacity:
storage: 2000M
accessModes:
- ReadWriteMany
nfs:
server: 192.168.1.32
path: "/usr/local/kubernetes/redis/pv2"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv3
spec:
capacity:
storage: 2000M
accessModes:
- ReadWriteMany
nfs:
server: 192.168.1.32
path: "/usr/local/kubernetes/redis/pv3"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv4
spec:
capacity:
storage: 2000M
accessModes:
- ReadWriteMany
nfs:
server: 192.168.1.32
path: "/usr/local/kubernetes/redis/pv4"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv5
spec:
capacity:
storage: 2000M
accessModes:
- ReadWriteMany
nfs:
server: 192.168.1.32
path: "/usr/local/kubernetes/redis/pv5"
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-pv6
spec:
capacity:
storage: 2000M
accessModes:
- ReadWriteMany
nfs:
server: 192.168.1.32
path: "/usr/local/kubernetes/redis/pv6"
kubectl apply -f pvxin.yaml #如果運行報行錯誤就復制下面參考鏈接里面的,不知道怎么的我直接從linux復制過來再復制到新環境也會報錯
c.創建configmap 存放redis配置文件
vim redis.conf
appendonly yes
cluster-enabled yes
cluster-config-file /var/lib/redis/nodes.conf
cluster-node-timeout 5000
dir /var/lib/redis
port 6379
創建名為redis-conf的Configmap:
kubectl create configmap redis-conf --from-file=redis.conf
kubectl get cm
查看創建的configmap:
kubectl describe cm redis-conf
d.創建headless service
它是statefulset 實現穩定網絡標識的基礎。文件如下:
vim redis-headless-service.yaml
apiVersion: v1
kind: Service
metadata:
name: redis-service
labels:
app: redis
spec:
ports:
- name: redis-port
port: 6379
clusterIP: None
selector:
app: redis
appCluster: redis-cluster
kubectl apply -f redis-headless-service.yaml
kubectl get svc redis-service
f.創建redis集群節點,配置如下:
vim redis-cluster-node.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-app
spec:
serviceName: "redis-service"
replicas: 6
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
appCluster: redis-cluster
spec:
terminationGracePeriodSeconds: 20
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- redis
topologyKey: kubernetes.io/hostname
containers:
- name: redis
image: redis
command:
- "redis-server"
args:
- "/etc/redis/redis.conf"
- "--protected-mode"
- "no"
resources:
requests:
cpu: "100m"
memory: "100Mi"
ports:
- name: redis
containerPort: 6379
protocol: "TCP"
- name: cluster
containerPort: 16379
protocol: "TCP"
volumeMounts:
- name: "redis-conf"
mountPath: "/etc/redis"
- name: "redis-data"
mountPath: "/var/lib/redis"
volumes:
- name: "redis-conf"
configMap:
name: "redis-conf"
items:
- key: "redis.conf"
path: "redis.conf"
volumeClaimTemplates:
- metadata:
name: redis-data
spec:
accessModes: [ "ReadWriteMany" ]
resources:
requests:
storage: 2000M
kubectl apply -f redis-cluster-node.yaml
kubectl get pods -o wide
在K8S集群內部,這些Pod就可以利用該域名互相通信。我們可以使用busybox鏡像的nslookup檢驗這些域名。
kubectl run -i --tty --image busybox dns-test --restart=Never --rm bin/sh
這里隨便用一個測試解析
/ # nslookup redis-app-0.redis-service.default.svc.cluster.local
使用kubectl get pv -o wide查看綁定情況
kubectl get pv -o wide
e. 初始化redis 集群,使用redis-tribe工具進行集群的初始化,創建centos容器
kubectl run -i --tty centos --image=centos --restart=Never /bin/bash
進入容器
vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
注意:默認拉取的鏡像是centos8,所以要替換部分內容
sed -i -e "s|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g" /etc/yum.repos.d/CentOS-*
yum install glibc-langpack-zh -y
安裝redis-trib
yum -y install redis-trib.noarch bind-utils
創建一個新集群,--replicas 1 創建集群中每個主節點分配一個從節點,達到3主3從
[root@centos ]# redis-trib create --replicas 1 `dig +short redis-app-0.redis-service.default.svc.cluster.local`:6379 `dig +short redis-app-1.redis-service.default.svc.cluster.local`:6379 `dig +short redis-app-2.redis-service.default.svc.cluster.local`:6379 `dig +short redis-app-3.redis-service.default.svc.cluster.local`:6379 `dig +short redis-app-4.redis-service.default.svc.cluster.local`:6379 `dig +short redis-app-5.redis-service.default.svc.cluster.local`:6379
輸入 yes
命令dig +short redis-app-0.redis-service.default.svc.cluster.local用於將Pod的域名轉化為IP,這是因為redis-trib不支持域名來創建集群。
隨便進入一個redis pod中檢驗一下
kubectl exec -it redis-app-2 /bin/bash
root@redis-app-2:/data# /usr/local/bin/redis-cli -c
127.0.0.1:6379> cluster info
127.0.0.1:6379> cluster nodes
exit
exit
f.創建可訪問的service,用於redis集群提供訪問和負載均衡,這里配置nodeport
vim redis-access-service.yaml
apiVersion: v1
kind: Service
metadata:
name: redis-access-service
labels:
app: redis
spec:
ports:
- name: redis-port
protocol: "TCP"
port: 6379
targetPort: 6379
nodePort: 6379
selector:
app: redis
appCluster: redis-cluster
type: NodePort
kubectl apply -f redis-access-service.yaml
kubectl get svc redis-access-service -o wide
服務名稱redis-access-service ,端口 6379,集群內都可以通過10.0.0.129:6379來訪問了
集群外使用 192.168.1.16:6379訪問,如圖
參考鏈接