http://slack.minio.org.cn/people/1
https://github.com/minio/operator
hostpath形式
1、node加標簽
kubectl label nodes test-01 minio=true kubectl label nodes test-02 minio=true kubectl label nodes test-03 minio=true
kubectl label nodes test-04 minio=true
2、建立storageclass
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: local-storage provisioner: kubernetes.io/no-provisioner reclaimPolicy: Retain volumeBindingMode: WaitForFirstConsumer
3、minio.yaml
apiVersion: v1 kind: Service metadata: name: cluster-minio namespace: velero labels: app: cluster-minio spec: clusterIP: None ports: - port: 9000 name: cluster-minio selector: app: cluster-minio --- apiVersion: apps/v1 kind: StatefulSet metadata: name: cluster-minio namespace: velero spec: selector: matchLabels: app: cluster-minio serviceName: cluster-minio replicas: 4 template: metadata: labels: app: cluster-minio spec: affinity: podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: - labelSelector: matchExpressions: - key: app operator: In values: - "cluster-minio" topologyKey: kubernetes.io/hostname tolerations: - operator: Exists containers: - name: cluster-minio env: - name: MINIO_ACCESS_KEY value: "cluster-minio" - name: MINIO_SECRET_KEY value: "cluster-minio123" image: 10.1.11.46/k8s-deploy/minio:latest args: - server - http://cluster-minio-{0...3}.cluster-minio.velero.svc.cluster.local/data ports: - containerPort: 9000 # These volume mounts are persistent. Each pod in the PetSet # gets a volume mounted based on this field. volumeMounts: - name: data mountPath: /data # These are converted to volume claims by the controller # and mounted at the paths mentioned above. volumeClaimTemplates: - metadata: name: data spec: accessModes: - ReadWriteOnce resources: requests: storage: 100Mi # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 storageClassName: local-storage volumeMode: Filesystem
4、建立pv
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-0 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-0 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: local-storage volumeMode: Filesystem --- apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-1 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-1 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: local-storage volumeMode: Filesystem --- apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-2 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-2 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: local-storage volumeMode: Filesystem --- apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-3 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi hostPath: path: /minio-data-3 type: "" nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: minio operator: In values: - "true" persistentVolumeReclaimPolicy: Retain storageClassName: local-storage volumeMode: Filesystem
5、建立service
apiVersion: v1 kind: Service metadata: name: minio-nodeport namespace: velero labels: app: minio spec: type: NodePort ports: - port: 9000 name: minio targetPort: 9000 selector: app: minio
nfs形式
前提:新建好nfs相關內容
nfs-client.yaml
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: annotations: NFSADDR: 192.168.92.147 NFSPATH: /nfs/ type: NFS name: nfs1 parameters: archiveOnDelete: "false" provisioner: nfs-client-provisioner-nfs reclaimPolicy: Delete volumeBindingMode: Immediate
1、sts
apiVersion: v1 kind: Service metadata: name: minio labels: app: minio spec: clusterIP: None ports: - port: 9000 name: minio selector: app: minio --- apiVersion: apps/v1 kind: StatefulSet metadata: name: minio spec: selector: matchLabels: app: minio serviceName: minio replicas: 4 template: metadata: labels: app: minio spec: tolerations: - key: "node-role.kubernetes.io/master" operator: "Exists" effect: "NoSchedule" containers: - name: minio env: - name: MINIO_ACCESS_KEY value: "minio" - name: MINIO_SECRET_KEY value: "minio123" image: minio/minio:RELEASE.2020-06-01T17-28-03Z args: - server - http://minio-{0...3}.minio.default.svc.cluster.local/data ports: - containerPort: 9000 # These volume mounts are persistent. Each pod in the PetSet # gets a volume mounted based on this field. volumeMounts: - name: data mountPath: /data # These are converted to volume claims by the controller # and mounted at the paths mentioned above. volumeClaimTemplates: - metadata: name: data spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 storageClassName: nfs-client
2、建立pv
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-0 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/0 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-1 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/1 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-2 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/2 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
apiVersion: v1 kind: PersistentVolume metadata: name: minio-pv-3 spec: accessModes: - ReadWriteOnce capacity: storage: 1Gi nfs: path: /nfs/top/minio/3 server: 192.168.*.* persistentVolumeReclaimPolicy: Delete storageClassName: nfs-client volumeMode: Filesystem
3、登錄nfs服務器新建目錄
mkdir -p /nfs/top/minio/{0,1,2,3}
4、對外服務
apiVersion: v1 kind: Service metadata: labels: app: minio name: minio-svc-nodeport namespace: default spec: ports: - name: minio port: 9000 protocol: TCP targetPort: 9000
nodePort: 33007 selector: app: minio sessionAffinity: None type: NodePort
5、訪問
[root@host-239 minio]# kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE minio-svc-nodeport NodePort 10.*.*.* <none> 9000:33007/TCP 59s
使用nodeip加33007端口訪問minio
壓測
參考 https://www.cnblogs.com/yuhaohao/p/13099507.html
https://blog.csdn.net/ff_gogo/article/details/85252189
1、安裝jdk環境和其他依賴
yum install -y wget nmap-ncat
2、下載
wget https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c4/0.4.2.c4.zip
3、解壓,修改配置
主要修改conf/s3-config-sample.xml的storage內容,添加minio 賬號密碼和地址
每個workflow可以多個workstage,每個workstage下可以有多個work;
每鍾workstage對應一個工作類別,有work下的type字段標識,總共有init(創建bucket),write(創建object寫數據)、cleanup(刪除object)、dispose(刪除bucket)。
修改后的內容:
<?xml version="1.0" encoding="UTF-8" ?> <workload name="s3-sample" description="sample benchmark for s3"> <storage type="s3" config="accesskey=minio;secretkey=minio123;endpoint=http://10.1.11.*:30780" /> <workflow> <workstage name="init"> <work type="init" workers="1" config="cprefix=s3testqwer;containers=r(1,2)" /> </workstage> <workstage name="prepare"> <work type="prepare" workers="1" config="cprefix=s3testqwer;containers=r(1,2);objects=r(1,10);sizes=c(128)KB" /> </workstage> <workstage name="main"> <work name="main" workers="8" runtime="30"> <operation type="read" ratio="80" config="cprefix=s3testqwer;containers=u(1,2);objects=u(1,10)" /> <operation type="write" ratio="20" config="cprefix=s3testqwer;containers=u(1,2);objects=u(11,20);sizes=c(128)KB" /> </work> </workstage> <workstage name="cleanup"> <work type="cleanup" workers="1" config="cprefix=s3testqwer;containers=r(1,2);objects=r(1,20)" /> </workstage> <workstage name="dispose"> <work type="dispose" workers="1" config="cprefix=s3testqwer;containers=r(1,2)" /> </workstage> </workflow> </workload>