openshift 3.11 安裝部署


openshift 3.11 安裝部署

 

1 環境准備(所有節點)

openshift 版本 v3.11
1.1 機器環境
ip              cpu  mem   hostname  OSsystem
192.168.1.130    4    16   master  CentOS7.6
192.168.1.132    2    4    node01  CentOS7.6
192.168.1.135    2    4    node02  CentOS7.6
1.2 免密碼ssh登陸
ssh-keygen 
ssh-copy-id 192.168.1.130
ssh-copy-id 192.168.1.132
ssh-copy-id 192.168.1.135
1.3 hosts解析
vim /etc/hosts
192.168.1.130 master
192.168.1.132 node01
192.168.1.135 node02
---------------------
scp -rp /etc/hosts 192.168.1.132:/etc/hosts
scp -rp /etc/hosts 192.168.1.135:/etc/hosts
1.4 selinux和關閉防火牆

#sed -i 's/SELINUX=.*/SELINUX=enforcing/' /etc/selinux/config
#sed -i 's/SELINUXTYPE=.*/SELINUXTYPE=targeted/' /etc/selinux/config

開放8443端口給openshift,api使用
/sbin/iptables -I INPUT -p tcp --dport 8443 -j ACCEPT &&\ service iptables save

1.2.3 安裝需要的軟件包

yum install -y wget git ntp net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct nfs-utils yum-utils docker NetworkManager

1.2.4 其他
sysctl net.ipv4.ip_forward=1
yum install pyOpenSSL httpd-tools -y 
systemctl start NetworkManager 
systemctl enable NetworkManager

配置鏡像加速器
echo '{ "insecure-registries": ["172.30.0.0/16"], "registry-mirrors": ["https://xxxxx.mirror.aliyuncs.com"] }' >/etc/docker/daemon.json systemctl daemon-reload && \ systemctl enable docker && \ systemctl restart docker
1.2.5 鏡像下載
#master鏡像列表(主節點)
echo 'docker.io/cockpit/kubernetes
docker.io/openshift/origin-haproxy-router
docker.io/openshift/origin-haproxy-router  
docker.io/openshift/origin-service-catalog
docker.io/openshift/origin-node
docker.io/openshift/origin-deployer
docker.io/openshift/origin-control-plane
docker.io/openshift/origin-control-plane
docker.io/openshift/origin-template-service-broker
docker.io/openshift/origin-pod
docker.io/cockpit/kubernetes
docker.io/openshift/origin-web-console
quay.io/coreos/etcd' >image.txt && \
while read line; do docker pull $line ; done<image.txt


#node鏡像列表(兩個node節點)
echo 'docker.io/openshift/origin-haproxy-router
docker.io/openshift/origin-node
docker.io/openshift/origin-deployer
docker.io/openshift/origin-pod
docker.io/ansibleplaybookbundle/origin-ansible-service-broker
docker.io/openshift/origin-docker-registry' >image.txt && \
while read line; do docker pull $line ; done<image.txt

2 配置ansible(主節點)

2.1 下載openshift-ansible代碼

需要下載2.6.5版本的ansible

git clone -b release-3.11 https://github.com/openshift/openshift-ansible.git

wget https://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin311/ansible-2.6.5-1.el7.noarch.rpm &&\
yum localinstall ansible-2.6.5-1.el7.noarch.rpm -y &&\
yum install -y etcd &&\
systemctl enable etcd &&\
systemctl start etcd
2.2 配置文件
[root@master ~]# cat /etc/ansible/hosts
[all]
# all下放所有機器節點的名稱
master node01 node02 [OSEv3:children]
#這里放openshfit的角色,這里有三個角色,master,node,etcd masters nodes etcd [OSEv3:vars]
#這里是openshfit的安裝參數
#指定ansible使用ssh的用戶為root
ansible_ssh_user=root
#指定方式為origin
openshift_deployment_type=origin
#指定版本為3.11
openshift_release=3.11
openshift_enable_service_catalog=false openshift_clock_enabled=true openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] openshift_disable_check=disk_availability,docker_storage,memory_availability,docker_image_availability [masters]
#master角色的機器名稱包含 master [etcd]
#etcd角色的機器名稱包含
master [nodes] node角色的機器名稱包含
master openshift_node_group_name='node-config-all-in-one' node01 openshift_node_group_name='node-config-compute' node02 openshift_node_group_name='node-config-compute' #openshift_enable_service_catalog=false #openshift_hosted_registry_storage_kind=nfs #openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] #openshift_hosted_registry_storage_nfs_directory=/data/docker #openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' #openshift_hosted_registry_storage_volume_name=registry #openshift_hosted_registry_storage_volume_size=20Gi # openshiftclock_enabled=true # ansible_service_broker_install=false

3 使用ansible來進行安裝

#安裝前檢查
ansible-playbook ~/openshift-ansible/playbooks/prerequisites.yml
#安裝
ansible-playbook ~/openshift-ansible/playbooks/deploy_cluster.yml

#如需重新安裝,先卸載
ansible-playbook ~/openshift-ansible/playbooks/adhoc/uninstall.yml

4 安裝后配置(主節點)

4.1 配置nfs持久卷
yum install nfs-utils rpcbind -y 
mkdir -p /data/v0{01..20} /data/{docker,volume,registry}
chmod -R 777 /data 
vim /etc/exports
/data 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v001 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v002 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v003 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v004 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v005 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v006 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v007 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v008 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v009 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/v010 192.168.1.0/24(rw,sync,no_all_squash,no_root_squash)
/data/docker *(rw,sync,no_all_squash,no_root_squash)

systemctl restart rpcbind &&\
systemctl restart nfs && \
systemctl enable rpcbind &&\
systemctl enable nfs
exportfs -r
kubectl apply -f pv-01-10.yaml
配置文件參考章節最后 pv-01-10.yaml
4.2 創建openshift用戶
oc login -u system:admin                                ##使用系統管理員用戶登錄
htpasswd -b /etc/origin/master/htpasswd admin 123456    ##創建用戶
htpasswd -b /etc/origin/master/htpasswd dev dev         ##創建用戶
oc login -u admin                                       ##使用用戶登錄
oc logout                                               ##退出當前用戶
4.3 賦予創建的用戶集群管理員權限
oc login -u system:admin &&\                            
oc adm policy add-cluster-role-to-user cluster-admin xxxxx
4.4 訪問測試

需要添加hosts解析到本地電腦

192.168.1.130 master
192.168.1.132 node01
192.168.1.135 node02

賬號密碼是上面創建用戶的賬號密碼
http://master:8443 admin/123456

5 其他配置

5.1 部署集群節點管理cockpit
yum install -y cockpit cockpit-docker cockpit-kubernetes &&\
systemctl start cockpit &&\
systemctl enable cockpit.socket &&\
iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 9090 -j ACCEPT

https://192.168.1.130:9090 賬號密碼是機器的ssh賬號密碼

5.2 命令補全
#kubectl 命令補全  
mkdir -p /usr/share/bash-completion/kubernetes
kubectl completion bash >/usr/share/bash-completion/kubernetes/bash_completion
echo 'source /usr/share/bash-completion/kubernetes/bash_completion' >>~/.bash_profile

#oc 自動補全
mkdir -p /usr/share/bash-completion/openshift
oc completion bash >/usr/share/bash-completion/openshift/bash_completion
echo "source /usr/share/bash-completion/openshift/bash_completion" >> ~/.bash_profile

source ~/.bash_profile
5.3 openshift登錄
#admin用戶登陸openshift:用戶名dev 密碼:dev
oc login -n openshift

oc get svc -n default|grep docker-registry|awk '{print $3}'
#查看admin用戶的token
oc whoami -t
#登錄docker私庫
docker login -u admin -p `oc whoami -t` docker-registry.default.svc:5000
通過觀察service的docker-registry的IP

將svc添加每台主機的hosts做對應的解析
5.4 常用命令行操作
#master-restart api
#master-restart controllers
oc whoami -t                                            ###查看當前用戶token
oc login https://master:8443 --token=`oc whoami -t`     ###使用用戶token登錄
oc get nodes                                            ###查看當前node節點狀態

6 其他

6.1 pv-01-10.yaml文件
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv001
  labels:
    name: pv001
    type: nfs
spec:
  nfs:
    path: /data/v001
    server: 192.168.1.130
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv002
  labels:
    name: nfs-pv002
    type: nfs
spec:
  nfs:
    path: /data/v002
    server: 192.168.1.130
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv003
  labels:
    name: nfs-pv003
    type: nfs
spec:
  nfs:
    path: /data/v003
    server: 192.168.1.130
  capacity:
    storage: 30Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv004
  labels:
    name: nfs-pv004
    type: nfs
spec:
  nfs:
    path: /data/v004
    server: 192.168.1.130
  capacity:
    storage: 30Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv005
  labels:
    name: nfs-pv005
    type: nfs
spec:
  nfs:
    path: /data/v005
    server: 192.168.1.130
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv006
  labels:
    name: nfs-pv006
    type: nfs
spec:
  nfs:
    path: /data/v006
    server: 192.168.1.130
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv007
  labels:
    name: nfs-pv007
    type: nfs
spec:
  nfs:
    path: /data/v007
    server: 192.168.1.130
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv008
  labels:
    name: nfs-pv008
    type: nfs
spec:
  nfs:
    path: /data/v008
    server: 192.168.1.130
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv009
  labels:
    name: nfs-pv009
    type: nfs
spec:
  nfs:
    path: /data/v009
    server: 192.168.1.130
  capacity:
    storage: 2Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv010
  labels:
    name: nfs-pv010
    type: nfs
spec:
  nfs:
    path: /data/v010
    server: 192.168.1.130
  capacity:
    storage: 2Gi
  accessModes:
    - ReadWriteMany
    - ReadWriteOnce
    - ReadOnlyMany
  persistentVolumeReclaimPolicy: Retain
 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM