使用ansible自動化部署Kubernetes
服務列表
IP | 主機名 | 角色 |
---|---|---|
192.168.7.111 | kube-master1,kube-master1.pansn.cn | K8s 集群主節點 1 |
192.168.7.110 | kube-master2,kube-master2.pansn.cn | K8s 集群主節點 2 |
192.168.7.109 | kube-master3,kube-master3.pansn.cn | K8s 集群主節點 3 |
192.168.7.108 | node1,node1.pansn.cn | K8s 集群工作節點 1 |
192.168.7.107 | node2,node2.pansn.cn | K8s 集群工作節點 2 |
192.168.7.106 | node3,node3.pansn.cn | K8s 集群工作節點 3 |
192.168.7.105 | etcd-node1,etcd-node1.pansn.cn | 集群狀態存儲 etcd |
192.168.7.104 | ha1,ha1.pansn.cn | K8s 主節點訪問入口 1(高可用及負載均衡) |
192.168.7.103 | ha2,ha2.pansn.cn | K8s 主節點訪問入口 1(高可用及負載均衡) |
192.168.7.102 | harbor2,harbor.pansn.cn | 容器鏡像倉庫 |
192.168.7.101 | harbor2,harbor.pansn.cn | VIP |
一、環境部署准備
系統
Ubuntu 18.04.3
1.1.安裝Ansible
1、各主機環境准備
~# apt update
~# apt install python2.7
~# ln -sv /usr/bin/python2.7 /usr/bin/python
2、管理端安裝Ansible
root@k8s-master1:~# # apt install ansible
3、 Ansible 服務器基於 Key 與被控主機通訊
root@k8s-master1:~# cat > batch-copyid.sh << EOF
#!/bin/bash
#
# simple script to batch diliver pubkey to some hosts.
#
IP_LIST="
192.168.7.101
192.168.7.102
192.168.7.103
192.168.7.104
192.168.7.105
192.168.7.106
192.168.7.107
192.168.7.108
192.168.7.108
192.168.7.109
192.168.7.110
"
dpkg -l|grep sshpass
[ $? -eq 0 ] || echo "未安裝sshdpass,即將安裝sshpass " sleep 3 && apt install sshpass -y
ssh-keygen -f /root/.ssh/id_rsa -P ''
for host in ${IP_LIST}; do
sshpass -p 123456 ssh-copy-id -o StrictHostKeyChecking=no ${host}
if [ $? -eq 0 ]; then
echo "copy pubkey to ${host} done."
else
echo "copy pubkey to ${host} failed."
fi
done
EOF
1.2.時間同步
需要作為一台時間同步服務器
1、下載corony(所有節點都需要安裝)
root@k8s-master1:# apt install chrony -y
2、服務端配置
vim /etc/chrony/chrony.conf
# 1. 配置時間源,國內可以增加阿里的時間源
server ntp1.aliyun.com iburst
# 2. 配置允許同步的客戶端網段
allow 192.168.7.0/24
# 3. 配置離線也能作為源服務器
local stratum 10
3、啟動服務
systemctl start chrony
systemctl enable chrony
4、查看同步狀態
查看 ntp_servers 狀態
chronyc sources -v
查看 ntp_sync 狀態
chronyc sourcestats -v
查看 ntp_servers 是否在線
chronyc activity -v
查看 ntp 詳細信息
chronyc tracking -v
節點配置
5、修改配置文件(所有節點操作)
vim /etcc/chrony/chrony.conf
server 192.168.7.111 iburst
6、啟動服務
systemctl start chrony
systemctl enable chrony
7、檢查同步狀態
8、管理端驗證時間同步是否完成
ansible all -m shell -a 'timedatectl'
1.3.下載部署 Kubernetes 集群所需的文件
1、下載腳本
curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/2.2.0/easzup
2、執行腳本
chmode +x easzup
./easzup -D
會報錯
3、配置docker啟動腳本
根據easzup腳本配置,新建docker啟動腳本
# 嘗試unmask,但是service文件被刪除了?
root@kube-master1:/etc/ansible# systemctl unmask docker.service
Removed /etc/systemd/system/docker.service.
# 打開easzup腳本,將其生成docker.service文件的內容拷貝,自己寫docker.service文件
root@kube-master1:/etc/ansible# vim easzup
...
echo "[INFO] generate docker service file"
cat > /etc/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
[Service]
Environment="PATH=/opt/kube/bin:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStart=/opt/kube/bin/dockerd
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
ExecReload=/bin/kill -s HUP \$MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
4、啟動docker服務
root@docker-node1:/opt# systemctl daemon-reload
root@docker-node1:/opt# systemctl restart docker.service
如不啟動docker服務,就執行easzup,會報錯
5、重新下載
./easzup -D
root@kube-master1:/etc/ansible# curl -C- -fLO --retry 3 https://github.com/easzlab/kubeasz/releases/download/2.2.0/easzup
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 597 100 597 0 0 447 0 0:00:01 0:00:01 --:--:-- 447
100 12965 100 12965 0 0 4553 0 0:00:02 0:00:02 --:--:-- 30942
root@kube-master1:/etc/ansible# ls
ansible.cfg easzup hosts
root@kube-master1:/etc/ansible# chmode +x easzup
# 開始下載
root@kube-master1:/etc/ansible# ./easzup -D
[INFO] Action begin : download_all
[INFO] downloading docker binaries 19.03.5
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 60.3M 100 60.3M 0 0 1160k 0 0:00:53 0:00:53 --:--:-- 1111k
[INFO] generate docker service file
[INFO] generate docker config file
[INFO] prepare register mirror for CN
[INFO] enable and start docker
Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable docker
Failed to enable unit: Unit file /etc/systemd/system/docker.service is masked.
Failed to restart docker.service: Unit docker.service is masked. # 出錯,提示docker.service masked
[ERROR] Action failed : download_all # 下載失敗
# 嘗試unmask,但是service文件被刪除了?
root@kube-master1:/etc/ansible# systemctl unmask docker.service
Removed /etc/systemd/system/docker.service.
# 打開easzup腳本,將其生成docker.service文件的內容拷貝,自己寫docker.service文件
root@kube-master1:/etc/ansible# vim easzup
...
echo "[INFO] generate docker service file"
cat > /etc/systemd/system/docker.service << EOF
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
[Service]
Environment="PATH=/opt/kube/bin:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStart=/opt/kube/bin/dockerd
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
ExecReload=/bin/kill -s HUP \$MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
...
# 編寫docker.service啟動文件
root@kube-master1:/etc/ansible# vim /etc/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io
[Service]
Environment="PATH=/opt/kube/bin:/bin:/sbin:/usr/bin:/usr/sbin"
ExecStart=/opt/kube/bin/dockerd
ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT
ExecReload=/bin/kill -s HUP \$MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
下載成功后目錄和文件
二、部署Kubemetes
碼雲官方部署文檔:https://gitee.com/ilanni/kubeasz/,建議先看一邊部署文檔在自己部署
在官方文檔中部署分為兩種部署,分步部署、一鍵部署,要對安裝流程熟悉所以我們使用分步部署
2.1.基礎參數設定
2.1.1. 必要配置
cd /etc/ansible && cp example/hosts.multi-node hosts
, 然后實際情況修改此hosts文件
root@kube-master1:/etc/ansible# cp example/hosts.multi-node ./hosts
root@kube-master1:/etc/ansible# cat hosts
# 'etcd' cluster should have odd member(s) (1,3,5,...)
# variable 'NODE_NAME' is the distinct name of a member in 'etcd' cluster
[etcd]
192.168.7.105 NODE_NAME=etcd1
# master node(s)
[kube-master]
192.168.7.111
192.168.7.110
192.168.7.109
# work node(s)
[kube-node]
192.168.7.108
192.168.7.107
192.168.7.106
# [optional] harbor server, a private docker registry
# 'NEW_INSTALL': 'yes' to install a harbor server; 'no' to integrate with existed one
# 'SELF_SIGNED_CERT': 'no' you need put files of certificates named harbor.pem and harbor-key.pem in directory 'down'
[harbor]
192.168.7.101 HARBOR_DOMAIN="harbor.pansn.cn" NEW_INSTALL=no SELF_SIGNED_CERT=yes
192.168.7.102 HARBOR_DOMAIN="harbor.pansn.cn" NEW_INSTALL=no SELF_SIGNED_CERT=yes
# [optional] loadbalance for accessing k8s from outside
[ex-lb]
192.168.7.103 LB_ROLE=backup EX_APISERVER_VIP=192.168.7.248 EX_APISERVER_PORT=8443
192.168.7.104 LB_ROLE=master EX_APISERVER_VIP=192.168.1.248 EX_APISERVER_PORT=8443
# [optional] ntp server for the cluster
[chrony]
#192.168.1.1
[all:vars]
# --------- Main Variables ---------------
# Cluster container-runtime supported: docker, containerd
CONTAINER_RUNTIME="docker"
# Network plugins supported: calico, flannel, kube-router, cilium, kube-ovn
CLUSTER_NETWORK="flannel"
# Service proxy mode of kube-proxy: 'iptables' or 'ipvs'
PROXY_MODE="ipvs"
# K8S Service CIDR, not overlap with node(host) networking
SERVICE_CIDR="10.68.0.0/16"
# Cluster CIDR (Pod CIDR), not overlap with node(host) networking
CLUSTER_CIDR="172.20.0.0/16"
# NodePort Range
NODE_PORT_RANGE="20000-40000"
# Cluster DNS Domain
CLUSTER_DNS_DOMAIN="cluster.local."
# -------- Additional Variables (don't change the default value right now) ---
# Binaries Directory
bin_dir="/opt/kube/bin"
# CA and other components cert/key Directory
ca_dir="/etc/kubernetes/ssl"
# Deploy Directory (kubeasz workspace)
base_dir="/etc/ansible"
2.1.2.可選配置
初次使用可以不做修改,詳見配置指南
主要包括集群某個具體組件的個性化配置,具體組件的配置項可能會不斷增加;
可以在不做任何配置更改情況下使用默認值創建集群
可以根據實際需要配置 k8s 集群,常用舉例
配置 lb 節點負載均衡算法:修改 roles/lb/defaults/main.yml 變量 BALANCE_ALG: "roundrobin"
配置 docker 國內鏡像加速站點:修改 roles/docker/defaults/main.yml 相關變量
配置 apiserver 支持公網域名:修改 roles/kube-master/defaults/main.yml 相關變量
配置 flannel 使用鏡像版本:修改 roles/flannel/defaults/main.yml 相關變量
配置選擇不同 addon 組件:修改roles/cluster-addon/defaults/main.yml
2.1.2. 驗證ansible 安裝
ansible all -m ping
2.2.創建證書和環境准備
查看roles文件
roles/deploy/
├── defaults
│ └── main.yml # 配置文件:證書有效期,kubeconfig 相關配置
├── files
│ └── read-group-rbac.yaml # 只讀用戶的 rbac 權限配置
├── tasks
│ └── main.yml # 主任務腳本
└── templates
├── admin-csr.json.j2 # kubectl客戶端使用的admin證書請求模板
├── ca-config.json.j2 # ca 配置文件模板
├── ca-csr.json.j2 # ca 證書簽名請求模板
├── kube-proxy-csr.json.j2 # kube-proxy使用的證書請求模板
└── read-csr.json.j2 # kubectl客戶端使用的只讀證書請求模板
root@kube-master1:/etc/ansible# ansible-playbook 01.prepare.yml
2.3.部署etcd集群
roles/etcd/
root@kube-master1:/etc/ansible# tree roles/etcd/
roles/etcd/
├── clean-etcd.yml
├── defaults
│ └── main.yml
├── tasks
│ └── main.yml
└── templates
├── etcd-csr.json.j2 # 可修改證書相應信息后部署
└── etcd.service.j2
root@kube-master1:/etc/ansible# ansible-playbook 02.etcd.yml
在任意 etcd 節點驗證 etcd 運行信息
systemctl status etcd 查看服務狀態
journalctl -u etcd 查看運行日志
在任一 etcd 集群節點上執行如下命令
# 根據hosts中配置設置shell變量 $NODE_IPS
export NODE_IPS="192.168.1.1 192.168.1.2 192.168.1.3"
for ip in ${NODE_IPS}; do
ETCDCTL_API=3 etcdctl \
--endpoints=https://${ip}:2379 \
--cacert=/etc/kubernetes/ssl/ca.pem \
--cert=/etc/etcd/ssl/etcd.pem \
--key=/etc/etcd/ssl/etcd-key.pem \
endpoint health; done
** etcd 的輸出均為 healthy 時表示集群服務正常。**
2.4.部署docker服務
roles/docker/:
root@kube-master1:/etc/ansible# tree roles/docker/
roles/docker/
├── defaults
│ └── main.yml # 變量配置文件
├── files
│ ├── docker # bash 自動補全
│ └── docker-tag # 查詢鏡像tag的小工具
├── tasks
│ └── main.yml # 主執行文件
└── templates
├── daemon.json.j2 # docker daemon 配置文件
└── docker.service.j2 # service 服務模板
root@kube-master1:/etc/ansible# ansible-playbook 03.docker.yml
部署成功后驗證
~# systemctl status docker # 服務狀態
~# journalctl -u docker # 運行日志
~# docker version
~# docker info
2.5.部署master節點
部署master節點主要包含三個組件apiserver
scheduler
controller-manager
,其中:
- apiserver提供集群管理的REST API接口,包括認證授權、數據校驗以及集群狀態變更等
- 只有API Server才直接操作etcd
- 其他模塊通過API Server查詢或修改數據
- 提供其他模塊之間的數據交互和通信的樞紐
- scheduler負責分配調度Pod到集群內的node節點
- 監聽kube-apiserver,查詢還未分配Node的Pod
- 根據調度策略為這些Pod分配節點
- controller-manager由一系列的控制器組成,它通過apiserver監控整個集群的狀態,並確保集群處於預期的工作狀態
roles/kube-master/
├── defaults
│ └── main.yml
├── tasks
│ └── main.yml
└── templates
├── aggregator-proxy-csr.json.j2
├── basic-auth.csv.j2
├── basic-auth-rbac.yaml.j2
├── kube-apiserver.service.j2
├── kube-apiserver-v1.8.service.j2
├── kube-controller-manager.service.j2
├── kubernetes-csr.json.j2
└── kube-scheduler.service.j2
root@kube-master1:/etc/ansible# ansible-playbook 04.kube-master.yml
master 集群的驗證
運行 ansible-playbook 04.kube-master.yml
成功后,驗證 master節點的主要組件:
# 查看進程狀態
systemctl status kube-apiserver
systemctl status kube-controller-manager
systemctl status kube-scheduler
# 查看進程運行日志
journalctl -u kube-apiserver
journalctl -u kube-controller-manager
journalctl -u kube-scheduler
執行 kubectl get componentstatus
可以看到
root@kube-master3:~# kubectl get componentstatus
NAME STATUS MESSAGE ERROR
etcd-0 Healthy {"health":"true"}
scheduler Healthy ok
controller-manager Healthy ok
2.6.部署node節點
kube-node
是集群中運行工作負載的節點,前置條件需要先部署好kube-master
節點,它需要部署如下組件:
- docker:運行容器
- kubelet: kube-node上最主要的組件
- kube-proxy: 發布應用服務與負載均衡
- haproxy:用於請求轉發到多個 apiserver,詳見HA-2x 架構
- calico: 配置容器網絡 (或者其他網絡組件)
roles/kube-node/
├── defaults
│ └── main.yml # 變量配置文件
├── tasks
│ ├── main.yml # 主執行文件
│ ├── node_lb.yml # haproxy 安裝文件
│ └── offline.yml # 離線安裝 haproxy
└── templates
├── cni-default.conf.j2 # 默認cni插件配置模板
├── haproxy.cfg.j2 # haproxy 配置模板
├── haproxy.service.j2 # haproxy 服務模板
├── kubelet-config.yaml.j2 # kubelet 獨立配置文件
├── kubelet-csr.json.j2 # 證書請求模板
├── kubelet.service.j2 # kubelet 服務模板
└── kube-proxy.service.j2 # kube-proxy 服務模板
root@kube-master1:/etc/ansible# ansible-playbook 05.kube-node.yml
驗證node狀態
systemctl status kubelet # 查看狀態
systemctl status kube-proxy
journalctl -u kubelet # 查看日志
journalctl -u kube-proxy
運行 kubectl get node
可以看到類似
2.7.部署集群網絡
本次實驗中我們選擇flanne,其在所有node節點都在一個二層網絡時候,flannel提供hostgw實現,避免vxlan實現的udp封裝開銷,估計是目前最高效的;calico也針對L3 Fabric,推出了IPinIP的選項,利用了GRE隧道封裝;因此這些插件都能適合很多實際應用場景
root@kube-master1:/etc/ansible# tree roles/flannel/
roles/flannel/
├── defaults
│ └── main.yml
├── tasks
│ └── main.yml
└── templates
└── kube-flannel.yaml.j2
# 確保變量CLUSTER_NETWORK值為flannel
root@kube-master1:/etc/ansible# grep "CLUSTER_NETWORK" hosts
CLUSTER_NETWORK="flannel"
root@kube-master1:/etc/ansible# ansible-playbook 06.network.yml
驗證flannel網絡
執行flannel安裝成功后可以驗證如下:(需要等待鏡像下載完成,有時候即便上一步已經配置了docker國內加速,還是可能比較慢,請確認以下容器運行起來以后,再執行后續驗證步驟)
kubectl get pod --all-namespaces
在集群創建幾個測試pod: kubectl run test --image=busybox --replicas=3 sleep 30000
在各節點上分別 ping 這三個pod IP地址,確保能通
ping 172.20.3.5
ping 172.20.4.10
ping 172.50.5.7
2.8.部署集群插件
root@kube-master1:/etc/ansible# tree roles/cluster-addon/
roles/cluster-addon/
├── defaults
│ └── main.yml
├── tasks
│ ├── ingress.yml
│ └── main.yml
└── templates
├── coredns.yaml.j2
├── kubedns.yaml.j2
└── metallb
├── bgp.yaml.j2
├── layer2.yaml.j2
└── metallb.yaml.j2
root@kube-master1:/etc/ansible# ansible-playbook 07.cluster-addon.yml
2.9.驗證網絡連通性
1、測試 Pod 資源配置清單
root@kube-master1:/etc/ansible# cat /opt/k8s-data/pod-ex.yaml
apiVersion: v1
kind: Pod
metadata:
name: nginx-pod
labels:
app: nginx
spec:
containers:
- name: nginx-pod
image: nginx:1.16.1
2、創建該 Pod 資源
root@kube-master1:/etc/ansible# kubectl apply -f /opt/k8s-data/pod-ex.yaml
pod/nginx-pod created
root@kube-master1:/etc/ansible# kubectl get -f /opt/k8s-data/pod-ex.yaml -w
NAME READY STATUS RESTARTS AGE
nginx-pod 1/1 Running 0 8s
3、驗證網絡連通性
root@kube-master1:~# kubectl exec -it nginx-pod bash
root@nginx-pod:/# apt update
root@nginx-pod:/# apt install iputils-ping
ping 172.20.4.9
ping 192.168.7.111
ping www.baidu.com
三、高可用及私有倉庫集群部署
3.1.部署keepalived+haproxy
(兩個節點都需要部署)
apt install keepalived haproxy
3.2.節點-修改配置(keepalived)
1、修改配置-master節點
root@jenkins-node1:~# vim /etc/keepalived/keepalived.conf
root@jenkins-node1:~# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state MASTER
interface eth0
garp_master_delay 10
smtp_alert
virtual_router_id 55
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.7.200 dev eth0 label eth0:2
}
}
2、修改配置-backup節點
root@jenkins-node2:~# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_instance VI_1 {
state BACKUP #修改backup
interface eth0
garp_master_delay 10
smtp_alert
virtual_router_id 55
priority 80 #修改權重
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.7.200 dev eth0 label eth0:2 修改vip
}
}
3、修改內核參數
(所有節點都需要修改)
root@jenkins-node2:~# vim /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
net.ipv4.ip_forward = 1 #沒有這個haproxy啟動會報錯,同時無法實現vip漂移
net.ipv4.ip_nonlocal_bind = 1
4、啟動服務
systemctl restart keepalived.service
systemctl enable keepalived.service
5、驗證vip是否漂移
關閉master節點,看看是否vip漂移到backup節點上
3.3.節點-修改配置(haproxy)
修改haproxy
vim /etc/haroxy/haroxy.cfg
global
maxconn 100000
#chroot /usr/local/haproxy
uid 99
gid 99
daemon
nbproc 1
pidfile /run/haproxy.pid # 修改pid目錄
stats socket /run/haproxy/admin.sock mode 600 level admin # socket目錄
log 127.0.0.1 local3 info
defaults
option http-keep-alive
option forwardfor
maxconn 100000
mode http
timeout connect 300000ms
timeout client 300000ms
timeout server 300000ms
listen stats
mode http
bind 0.0.0.0:9999
stats enable
log global
stats uri /haproxy-status
stats auth haadmin:123456
listen kubernetes_cluster
mode tcp
balance roundrobin
bind 192.168.7.200:6443
server 192.168.7.111 192.168.7.111:6443 check inter 3s fall 3 rise 5
server 192.168.7.110 192.168.7.110:6443 check inter 3s fall 3 rise 5
server 192.168.7.109 192.168.7.109:6443 check inter 3s fall 3 rise 5
2、啟動服務
systemctl start haproxy
systemctl enable haproxy
3、查看9999端口是否起來
ps -ef|grep haproxy
4、發送配置到另外節點
scp /etc/haroxy/haroxy.cfg 192.168.7.104:/etc/haproxy
4、查看haproxy狀態頁
訪問:vip:9999/haproxy-status
3.4.部署harbor
Harbor依賴docker,因此要先把docker安裝完成
1、Ansible管理端部署docker
644 vim /etc/ansible/hosts
645 ansible -i /etc/ansible/hosts harbor -m shell -a 'sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common'
646 ansible -i /etc/ansible/hosts harbor -m shell -a 'curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | sudo apt-key add -'
647 ansible -i /etc/ansible/hosts harbor -m shell -a 'sudo add-apt-repository "deb [arch=amd64] https://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"'
648 ansible -i /etc/ansible/hosts harbor -m shell -a 'apt-cache madison docker-ce'
選擇自己需要版本安裝
ansible -i /etc/ansible/hosts harbor -m shell -a 'apt-get -y install docker-ce=5:19.03.4~3-0~ubuntu-bionic'
啟動服務
systemctl start docker
systemctl enable docker
2、部署docker-compose
apt install python-pip –y
pip install --upgrade pip
pip install docker-compose
2、生成SSL證書
apt install openssl
sudo apt-get install libssl-dev
mkdir /usr/local/src/harbor/certs/ -pv
生產key私鑰
openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key 2048
生成證書
openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj "/CN=harbor.pansn.cn" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt
3、部署harbor
cd /usr/local/src
tar xf harbor-offline-installer-v1.7.5.tgz
4、修改harbor配置
vim harbor.cfg
hostname = harbor.pansn.cn
ui_url_protocol = https
ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt
ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key
5、執行安裝harbor
# ./install.sh #執行安裝harbor
有上圖即可成功
6、登陸
賬號:admin
密碼:Harbor12345
3.4.1.創建一個images項目
私有項目:上傳和下載都需要登錄。
公開項目:上傳需要登錄,下載不需要登錄,默認是私有項目。
3.4.2.上傳鏡像
打標簽並上傳nginx鏡像到 harbor
# docker tag docker.io/nginx:1.16.1 harbor.pansn.cn/images/nginx:1.16.1^C
# docker push harbor.pansn.cn/images/nginx:1.16.1
3.4.3.同步harbor登錄證書到各node節點
報錯匯總
docker: Cannot connect to the Docker daemon at unix:///var/run/docker.sock.
沒做好時間同步