k8s之emptyDir、hostPath、configMap、secret、pv/pvc、
目錄
存儲卷概念
數據持久化
pod是由容器組成的,而容器宕機或停止后,數據就隨之丟失了,所以引出了存儲卷的概念,存儲卷就是為了pod保存數據而生的,
#存儲卷類型有很多 常用的四種比如 emptydir、hostpath、nfs以及雲存儲(ceph,glasterfs)等
1、emptyDir
#臨時掛載卷,emptyDir類型的volume在pod分配到node上時被創建,kubernetes會在node上自動分配 一個目錄,因此無需指定宿主機node上對應的目錄文件。這個目錄的初始內容為空,當Pod從node上移除時,emptyDir中的數據會被永久刪除。emptyDir Volume主要用於某些應用程序無需永久保存的臨時目錄。
總結:emptyDir不需要創建 、k8s隨機在node節點分配目錄、pod從node刪除,emptyDir也會立即刪除。
目的是為了多個pod直接數據臨時共享,比如共享日志,避免重復創建數據,
兩個pod掛載一個
1、創建一個存儲卷
2、掛載
#vim emptydir.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: emptydir
spec:
replicas: 2 #兩個副本
selector:
matchLabels:
app: emptydir
template:
metadata:
labels:
app: emptydir
spec:
containers:
- name: busybox
image: busybox
command:
- '/bin/sh'
- '-c'
- 'while true; do echo `hostname` > /opt/test/index.html; sleep 1; done'
volumeMounts:
- mountPath: /opt/test/#掛載地址(容器里)
name: test01 #使用那個存儲卷
volumes: #創建一個存儲卷
- name: test01 #存儲卷名字
emptyDir: {} #存儲卷類型
#啟動
kubectl apply -f emptydir.yaml
#查看
kubectl get pods
#驗證
[root@k8s-master-01 k8s]# kubectl exec -it emptydir-64989648f-gbws8 -- sh
/ # cd /opt/
/opt # ls
test
/opt # cd test/
/opt/test # ll
sh: ll: not found
/opt/test # ls
index.html
/opt/test # cat index.html
emptydir-64989648f-gbws8
/opt/test # exit
2、hostPath
在宿主主機上創建一個存儲卷。
容器部署到那一台主機上,就相當於跟當前主機創建一個存儲卷。
#通常用來關聯系統時間 字符編碼
運行在那台主機上,就掛載到那台主機上
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: hostname
spec:
selector:
matchLabels:
app: hostname
template:
metadata:
labels:
app: hostname
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
#掛載目錄
- mountPath: /usr/share/nginx/html/
name: test01
volumes:
- name: test01
hostPath:
path: /opt #宿主主機上的路徑 (node)
#啟動
#發現地址為10.244.1.24
curl 10.244.1.24
報403
#因為宿主主機上/opt目錄下沒有index.html文件
創建完之后再次curl 10.244.1.24
發現是index里面的內容

3、configMap
將配置資源化, 可以映射成一個文件、環境變量
#將配置文件寫入到k8s的配置清單中
configmap 一旦掛載,當前目錄中所有的文件全部刪除。
subPath 添加他可以保留文件 ,但是熱更新失效
1、映射成一個文件
2、映射成一個環境變量
Deployment.spec.template.spec.containers.envFrom
name: 上面內容
option: true
3.熱更新nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx
data:
nginx.conf: | #一行寫不開 加個|
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 4096;
default_type application/octet-stream;
include /etc/nginx/conf.d/*.conf;
}
default.conf: |
server {
listen 80;
server_name www.test.com;
location / {
root /opt/;
index index.html;
}
}
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx
spec:
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- mountPath: /etc/nginx/ #下面相對路徑的參照路徑
name: nginxconf #存儲卷名字
volumes:
- name: nginxconf
configMap:
name: nginx
items:
- key: nginx.conf
path: ./nginx.conf #相對路徑 部署路徑
- key: default.conf
path: ./conf.d/default.conf
4、secret
加密版的configMap。 密碼加密放在容器里
#加密 echo -n '123456' | base64 稍微那么安全一點
1、普通類型
---
kind: Secret
apiVersion: v1
metadata:
name: secret
data:
# 如果不存加密的 登錄不上去
MYSQL_ROOT_PASSWORD: YTZYQXUlKlZpXktiW0RVUk1ZI3gyc2cjZyNecm1oLl0=
type:
2、存放docker倉庫用戶名密碼
[root@k8s-master-01 ~]# kubectl create secret docker-registry aliyun01 --docker-server=registry.cn-hangzhou.aliyuncs.com --docker-username=yangyang091022 --docker-password=123456
3、存放集群秘鑰證書
#創建證書
---
apiVersion: v1
data:
tls.crt: >-
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQnBEQ0NBUTBDQVFBd1pERUxNQWtHQTFVRUJoTUNRMDR4RVRBUEJnTlZCQWdNQ0ZOb1lXNW5TR0ZwTVJBdwpEZ1lEVlFRSERBZFJhVzVuY0FoUU1Rc3dDUVlEVlFRS0RBSnpZakVNTUFvR0ExVUVDd3dEWW05NU1SVXdFd1lEClZRUUREQXhpWldGMWRHbG1kV3hDYjNrd2daOHdEUVlKS29aSWh2Y05BUUVCQlFBRGdZMEFNSUdKQW9HQkFNMGQKNjROVFhMaGcwbGJ2WlBtV1krZmsvdEpqYnFCSk1JU2p3MEhScjd0QjhZdGxNa3F5QlptRWp5UGhuKzhjazVWRgpvQVVhZXhLcCs5UHY1VkFsWGkrMzcySC9YaFJJTk1MTWpQaHNmbmRIWUZ1OGpPM095N2VwQlMrWVprOEk2RlZaCjlXUjBIL0l5WXBEWG1veHNFWlk1TXE1YVhwS0x1OTJ2MzZYTHBzbEZBZ01CQUFHZ0FEQU5CZ2txaGtpRzl3MEIKQVFzRkFBT0JnUUNhV2U4UzJVRVBzZ29CSDlSb1lQc2Q0cCtMM3F5L2tpZ1hoYmNHNjBOV3NGRHdLNUxVb2txYgpxUitrZ2JBbm9Qd01aSDE2MlVPZTh0VmhoWEM2bEFmVS94ZE1PV1Nvc0djZzZ1L0ViQzJhMHlhSTNpcEpVWnRpCmQxaDVsV3JCdHhYZVhyUktKK1grVHFqZzFwT0xmU1lBTGhoWGJPY3p4MVp5QThsVlpSZkFvUT09Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQ==
tls.key: >-
LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpQcm9jLVR5cGU6IDQsRU5DUllQVEVECkRFSy1JbmZvOiBJREVBLUNCQyxENDg3QkQzM0FDRjhFNDQyCgp0QUJFWUVreGlFLy9GcytIYmE2VHl6M2pkSE1YSjAvR09UcDkwRUV0OWk4NUx4SWdaQUJTWkZDb3BIU0hRelpPCjlZSm5UeGpZQkdOYlFscEcyWDBVcnNqVm9id0FUL0ZERmZaaE9DUlNVeUJhdUcrRVl1QW9HMlpkQUZEL2YzZ1cKQUk2VGIySU5tV3JOeWg4VUFxT3lpUmw4TThOWGdFUkx6MkhZSzhnVnJkV1JuRWRUa3Nvck91RTQ2ck96UTlqUAp2RkNQd0QwUWVBU01uWkNaY1JLWllYU0ZiaktzclgybXMzVEZyay9wWS9SczlkWDBrTmZOd3U0ampzeUNvbEp0CndtdStDbEtNWmJVdGpUejZFQ2NnVm05SXlQeldlUld1V0IrelRhZE5yc21BZFVYL3FPNnR1KytReUNsT25LOWoKZkRXSmJSUzQvQW5oUzh2S2F0ZVN5cGJaL2RFekdEdjhKYWRkQWZOSTRVOTRNWEtYRXgyMDVrcHRhR3dmTUxGKwpqaU1GblhtaVN1OTFFZmlwSlhuaSthMGtMR1lrY0M1RjdNUXYxaEtTK2RXOUI2MzhETnVJM0NJS1d6RzNla2lNCnB3WVUyWXVCSGp6MkZtK0pxUjRpSksxcTByczRSSUxxYjU2QXJTenBMUjNEK05MclNhM0FJMDRrWUwwa2dNeE4KOGJkYzNsQ255MkVVVisxVXVNTFZoWW5EbWZNRE0vZ1cvM2xDZERXVnAvdlNrRklSUkhtVDRKR1VpUWZabjVSKwpnZkIxdWZ4NUVvd3FHN0FWWUxDZ2ZBbFJsTjRpZzVxa21rMi9RM0F0ZVJGdE9PQjVJbTRTMFlvWUh3OFlPYjdkClVheXJPUmF5SENwcVcvNjRYOEpCTEl6eXZiK01GTXBmYThBVTl5Qy8zTFRlZUl2SlR2clZnanorZ2JZNUx5akMKdEFKSTArWkZGUkFsZXN4SW1NbW9wVkEzTnVGNTdhU2FFQm9jMWZrcHB6RUNDTGVPZXlSeEFYekpiUXRpekkxeApqZG41WVJkZEZETXVIdC95RkxxVCswa25ZYWR0eGVCVG01ZCtjZ3N0YUJHdUpSQWVFZzk3VWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQ==
immutable: false
kind: Secret
metadata:
name: www-test-com
namespace: default
type: kubernetes.io/tls
---
kind: Service
apiVersion: v1
metadata:
name: aliyun01
spec:
selector:
app: aliyun01
ports:
- port: 80
targetPort: 80
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: aliyun01
spec:
selector:
matchLabels:
app: aliyun01
template:
metadata:
labels:
app: aliyun01
spec:
imagePullSecrets:
- name: aliyun
- name: aliyun01
containers:
- name: aliyun01
image: registry.cn-hangzhou.aliyuncs.com/alvinos/nginx:v12
5、pv/pvc
pv : 具體存儲卷 類似於飯店后面的食材,提前准備好 集群級資源
pvc : 存儲請求 類似於客人點的菜,飯店需要根據pvc去尋找對應的pv 命名空間級資源
1、pv
[root@k8s-master-01 v1]# mkdir -p /nfs/v{1..5}
[root@k8s-master-01 v1]# cd /nfs/v1
[root@k8s-master-01 v1]# yum install nfs-utils rpcbind -t
2、vim /etc/exports
/nfs/v1 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v2 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v3 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v4 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
/nfs/v5 192.168.11.0/24(rw,sync,all_squash,anonuid=666,anongid=666)
3、systemctl start nfs-server rpcbind
4、[root@k8s-master-01 v1]# showmount -e
Export list for k8s-master-01:
/nfs/v5 192.168.11.0/24
/nfs/v4 192.168.11.0/24
/nfs/v3 192.168.11.0/24
/nfs/v2 192.168.11.0/24
/nfs/v1 192.168.11.0/24
5.1、訪問策略
pv的訪問策略有三種:
1、ReadWriteMany : 多路可讀可寫
2、ReadWriteOnce :單路可讀可寫
3、ReadOnlyMany :多路只讀
4、ReadWriteOncePod : 當節點只讀(1.22版本以上才有)
5.2、配置清單
#pv.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv001
labels:
app: pv001
spec:
nfs:
path: /nfs/v2
server: 192.168.11.206 #masterip地址
accessModes:
- "ReadWriteMany"
- "ReadWriteOnce"
capacity:
storage: 2Gi
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: pv001
labels:
app: pv001
spec:
nfs:
path: /nfs/v1
server: 192.168.11.206 #masterip地址
accessModes:
- "ReadWriteMany"
- "ReadWriteOnce"
capacity:
storage: 10Gi
---
#pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc
namespace: default
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "6Gi"
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: pv-pvc
spec:
selector:
matchLabels:
app: pv-pvc
template:
metadata:
labels:
app: pv-pvc
spec:
containers:
- name: nginx
image: nginx
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc #綁定存儲卷的名字
volumes:
- name: pvc
persistentVolumeClaim:
claimName: pvc #pvc的名字
---
[root@k8s-master-01 k8s]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv001 2Gi RWO,RWX Retain Available 11s
