k8s-jenkins 構建全部流程
初始化
主機ip | 服務 |
---|---|
192.168.23.201 | docker-ce ; gitlab |
192.168.23.202 | docker-ce; harbor ; mysql |
192.168.23.203 | docker-ce; k8s-master |
192.168.23.204 | docker-ce; k8s-node1 |
192.168.23.205 | docker-ce; k8s-node2 |
先各自配置主機名
hostnamectl set-hostname gitlab
hostnamectl set-hostname harbor
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
關閉5台服務器的firewalld 和selinux
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i '/^SELINUX/ s/enforcing/disabled/' /etc/selinux/config
cat >> /etc/sysctl.conf << 'EOF'
net.ipv4.ip_forward=1
EOF
sysctl -p
systemctl restart network
安裝gitlab
先上傳gitlab軟件包
yum -y install policycoreutils openssh-server openssh-clients postfix
systemctl enable sshd && systemctl start sshd
systemctl enable postfix && systemctl start postfix
ls gitlab-ce-12.4.2-ce.0.el6.x86_64.rpm
rpm -ivh gitlab-ce-12.4.2-ce.0.el6.x86_64.rpm
cp -a /etc/gitlab/gitlab.rb /etc/gitlab/gitlab.rb.bak
sed -i '23 s/gitlab.example.com/192.168.23.201:82/' /etc/gitlab/gitlab.rb
sed -i "1113i nginx['listen_port'] = 82" /etc/gitlab/gitlab.rb
gitlab-ctl reconfigure
gitlab-ctl restart
瀏覽器訪問 192.168.23.201:82
Create a project---->Project name: tensquare_back ---->Create project
windows idea:
先配置遠程git: 左邊遠中項目右擊-->git-->repositry-->remotes
URL 為 gitlab 項目的 clone -->http
輸入對應的用戶名和密碼
在harbor主機配置安裝harbor
將harbor包上傳
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce
systemctl start docker
systemctl enable docker
ls docker-compose
chmod +x docker-compose
mv docker-compose /usr/local/bin/
ls /usr/local/bin/docker-compose
docker-compose --version
ls harbor-offline-installer-v1.9.2.tgz
tar -zxvf harbor-offline-installer-v1.9.2.tgz
mv harbor /opt/harbor
cd /opt/harbor/
cp -a harbor.yml harbor.yml.bak
sed -i '5 s/reg.mydomain.com/192.168.23.202/' harbor.yml
sed -i '10 s/80/85/' harbor.yml
./prepare
./install.sh
docker-compose up -d
瀏覽器訪問 192.168.23.202:85
賬戶: admin / Harbor12345
新建項目--->項目名稱: tensquare --->確定
右側,系統管理--->用戶管理--->創建用戶--->用戶名 tom 密碼Abcd1234
右側,項目---->tensquare--->成員----> +用戶 ----> 姓名: tom ,角色:維護人員
cat > /etc/docker/daemon.json << 'EOF'
{
"registry-mirrors": ["https://k0ki64fw.mirror.aliyuncs.com"],
"insecure-registries": ["192.168.23.202:85"]
}
EOF
systemctl restart docker
docker-compose up -d
docker login -u tom -p Abcd1234 192.168.23.202:85
在harbor主機,配置安裝mysql
上傳mysql 和sonnar-quba
安裝myslq
cd /root/
ls boost_1_59_0.tar.gz mysql-5.7.17.tar.gz tensquare_gathering.sql tensquare_user.sql
yum -y install \
ncurses \
ncurses-devel \
bison \
cmake
useradd -s /sbin/nologin mysql
tar zxvf mysql-5.7.17.tar.gz -C /opt/
tar zxvf boost_1_59_0.tar.gz -C /usr/local/
cd /usr/local/
mv boost_1_59_0 boost
cd /opt/mysql-5.7.17/
cmake \
-DCMAKE_INSTALL_PREFIX=/usr/local/mysql \
-DMYSQL_UNIX_ADDR=/usr/local/mysql/mysql.sock \
-DSYSCONFDIR=/etc \
-DSYSTEMD_PID_DIR=/usr/local/mysql \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \
-DMYSQL_DATADIR=/usr/local/mysql/data \
-DWITH_BOOST=/usr/local/boost \
-DWITH_SYSTEMD=1
make -j6 && make install
chown -R mysql.mysql /usr/local/mysql/
cat > /etc/my.cnf <<'EOF'
[client]
port = 3306
default-character-set=utf8
socket = /usr/local/mysql/mysql.sock
[mysql]
port = 3306
default-character-set=utf8
socket = /usr/local/mysql/mysql.sock
[mysqld]
user = mysql
basedir = /usr/local/mysql
datadir = /usr/local/mysql/data
port = 3306
character_set_server=utf8
pid-file = /usr/local/mysql/mysqld.pid
socket = /usr/local/mysql/mysql.sock
server-id = 1
sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,PIPES_AS_CONCAT,ANSI_QUOTES
EOF
chown mysql:mysql /etc/my.cnf
echo 'PATH=/usr/local/mysql/bin:/usr/local/mysql/lib:$PATH' >> /etc/profile
echo 'export PATH' >> /etc/profile
source /etc/profile
cd /usr/local/mysql/
bin/mysqld \
--initialize-insecure \
--user=mysql \
--basedir=/usr/local/mysql \
--datadir=/usr/local/mysql/data
cp /usr/local/mysql/usr/lib/systemd/system/mysqld.service /usr/lib/systemd/system/
systemctl daemon-reload
systemctl start mysqld
netstat -anpt | grep 3306
systemctl enable mysqld
mysqladmin -u root -p password "abc123"
mysql -u root -pABC123
create database tensquare_user;
use tensquare_user;
source /root/tensquare_user.sql;
create database tensquare_gathering;
use tensquare_gathering;
source /root/tensquare_gathering.sql;
grant all privileges on *.* to 'root'@'%' identified by 'abc123' with grant option;
flush privileges;
exit
安裝三台k8s機器
三台k8s都要操作
cat >>/etc/hosts<<EOF
192.168.23.203 k8s-master
192.168.23.204 k8s-node1
192.168.23.205 k8s-node2
EOF
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce
systemctl enable docker --now
cat >/etc/docker/daemon.json<<'EOF'
{
"registry-mirrors": ["https://k0ki64fw.mirror.aliyuncs.com"],
"insecure-registries": ["192.168.23.202:85"]
}
EOF
systemctl restart docker
modprobe br_netfilter
cat >/etc/sysctl.d/k8s.conf <<'EOF'
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
sysctl -p /etc/sysctl.d/k8s.conf
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash
/etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
swapoff -a
sed -i '/swap/ s/^/#/' /etc/fstab
mount -a
yum clean all
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
ls /etc/yum.repos.d/
yum install -y kubelet-1.17.0 kubeadm-1.17.0 kubectl-1.17.0
systemctl enable kubelet
kubelet --version
Master 節點操作
cat > /etc/docker/daemon.json << 'EOF'
{
"registry-mirrors": ["https://k0ki64fw.mirror.aliyuncs.com"],
"insecure-registries": ["192.168.23.202:85"],
"exec-opts":["native.cgroupdriver=systemd"]
}
EOF
[ `hostname` = 'k8s-master' ] && kubeadm init --kubernetes-version=1.17.0 \
--apiserver-advertise-address=192.168.23.203 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16
復制初始化后的命令到記事本
systemctl restart kubelet
systemctl status kubelet
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
mkdir k8s
cd k8s
wget --no-check-certificate https://docs.projectcalico.org/v3.10/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
sed -i 's/192.168.0.0/10.244.0.0/g' calico.yaml
kubectl apply -f calico.yaml
slave 節點需要完成
# 輸入初始化時復制的 此條命令
kubeadm join 192.168.23.203:6443 --token......
此時,可以在master 節點,使用 kubectl get nodes 查看到節點
在k8s-master安裝和配置NFS
yum install -y nfs-utils
mkdir -p /opt/nfs/{jenkins,maven}
chmod -R 777 /opt/nfs
cat > /etc/exports <<'EOF'
/opt/nfs/jenkins *(rw,no_root_squash)
/opt/nfs/maven *(rw,no_root_squash)
EOF
systemctl start nfs
systemctl enable nfs
安裝nfs-client
上傳nfs-client-provisioner 到 master
cd /root/
ls nfs-client.zip
unzip nfs-client.zip
cd nfs-client
sed -i '/192.*/ s/192.*/192.168.23.203/g' deployment.yaml
kubectl apply -f .
kubectl get pods
安裝Jenkins-master
`上傳jenkins-master 到 master住主機
cd /root/
ls jenkins-master.zip
unzip jenkins-master.zip
cd jenkins-master/
kubectl create ns kube-ops
kubectl get ns
kubectl create -f .
kubectl get pods -n kube-ops -o wide
kubectl get service -n kube-ops
等到jenkins-0 pod 狀態為READY 1/1 Running,使用瀏覽器訪問jenkins所在 node 的ip 以及內部8080映射出來的端口
要求輸入初始密碼,輸入下面的,然后tab 一下
cd /opt/nfs/jenkins/kube-ops-jenkins-home-jenkins-0
cd secrets/
cat initialAdminPassword
獲取到初始密碼后進入jenkins
選擇插件來安裝----->無 ------->安裝
創建用戶: jerry / abc123
配置Jenkins
配置鏡像加速
在服務器上,輸入下列,然后tab
cd /opt/nfs/jenkins/kube-ops-jenkins-home-jenkins-0-pvc #table一下
cd updates/
sed -i 's/http:\/\/updates.jenkins- ci.org\/download/https:\/\/mirrors.tuna.tsinghua.edu.cn\/jenkins/g' default.json && sed -i 's/http:\/\/www.google.com/https:\/\/www.baidu.com/g' default.json
在jenkins 的web 界面
在插件下載的 高級選項(Advanced) 界面,Update Site欄,使用下面地址替換
https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
提交后,重啟jenkins
下載必要插件
Localization: Chinese
Git
Pipeline
Extended Choice Parameter
kubernetes
然后重啟Jenkins
實現Jenkins與Kubernetes整合
系統管理--->系統配置--->雲---> a separate configuration page.--->新建雲--->kubernetes
(如果在系統配置里沒有 雲 ,則是沒有下載 kubernetes插件)
Kubernetes 地址: https://kubernetes.default.svc.cluster.local
Kubernetes 命名空間: kube-ops
----> 連接測試。 要能看到 Conneted to Kubenetes v1.17.0
Jenkins 地址: http://jenkins.kube-ops.svc.cluster.local:8080
---->保存
構建帶有maven到Jenkins鏡像
將Jenkins-slave.zip 包上傳到master上
cd /root
ls jenkins-slave.zip
unzip jenkins-slave.zip
cd jenkins-slave/
docker build -t jenkins-slave-maven:latest .
docker images | grep jenkins-slave-maven
docker login -u admin -p Harbor12345 192.168.23.202:85
docker tag jenkins-slave-maven:latest 192.168.23.202:85/library/jenkins-slave-maven:latest
docker push 192.168.23.202:85/library/jenkins-slave-maven
配置憑證和權限
配置gitlab的 username 方式憑證
配置Harbor的username 憑證
為所有k8s節點配置docker.sock文件權限
chmod 777 /var/run/docker.sock
下載插件 Kubernetes Continuous Deploy
配置kubernetes configuration 類型的憑據
描述: k8s-auth
Enter directly: 復制 master上 /root/.kube/config 里的所有內容
----->確定
獲取3個憑證的 id
創建pipeline項目, 編寫pipeline 腳本
創建pipeline項目
項目名: tensquare_back
參數化構建----->Extended Choice Parameter
Name: project_name ----> Description : 請輸入需要構建的項目
Basic Parameter Types---->Parameter Typ: Check Boxes ---->Numbe..... : 4 ---->Delimiter: , (注意,英文逗號)
Value: tensquare_eureka_server@10086,tensquare_zuul@10020,tensquare_admin_service@9001,tensquare_gathering@9002
---->
Default Value: tensquare_eureka_server@10086
----->
Description: 注冊中心,網關服務,權限管理,活動微服
--->
應用
流水線----->Pipeline script
注意修改 git憑證,harbor憑證,k8s憑證的 id
修改harbor倉庫地址,數據卷掛載的 服務器地址為 k8s-master的地址
def git_address = "http://192.168.23.201:82/root/tensquare_back.git"
def git_auth = "ce4ec592-44ae-45df-9e56-781d5a7be444"
//構建版本的名稱
def tag = "latest"
//Harbor私服地址
def harbor_url = "192.168.23.202:85"
//Harbor的項目名稱
def harbor_project_name = "tensquare"
//Harbor的憑證
def harbor_auth = "25b940d0-8df4-4e09-b8fa-b110570afc37"
//k8s憑證
def k8s_auth="20072727-2a75-4042-bc63-1c72cceeee33"
//定義k8s-barbor的憑證,定義資源名稱,需要創建此資源
def secret_name="registry-auth-secret"
podTemplate(label: 'jenkins-slave', cloud: 'kubernetes', containers: [
containerTemplate(
name: 'jnlp',
image: "192.168.23.202:85/library/jenkins-slave-maven:latest"
),
containerTemplate(
name: 'docker',
image: "docker:stable",
ttyEnabled: true,
command: 'cat'
),
],
volumes: [
hostPathVolume(mountPath: '/var/run/docker.sock', hostPath: '/var/run/docker.sock'),
nfsVolume(mountPath: '/usr/local/apache-maven/repo', serverAddress: '192.168.23.203' , serverPath: '/opt/nfs/maven'),
],
)
{
node("jenkins-slave"){
// 第一步
stage('pull code'){
checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[credentialsId: "${git_auth}", url: "${git_address}"]]])
}
// 第二步
stage('make public sub project'){
//編譯並安裝公共工程
sh "mvn -f tensquare_common clean install"
}
// 第三步
stage('make image'){
//把選擇的項目信息轉為數組
def selectedProjects = "${project_name}".split(',')
for(int i=0;i<selectedProjects.size();i++){
//取出每個項目的名稱和端口
def currentProject = selectedProjects[i];
//項目名稱
def currentProjectName = currentProject.split('@')[0]
//項目啟動端口
def currentProjectPort = currentProject.split('@')[1]
//定義鏡像名稱
def imageName = "${currentProjectName}:${tag}"
//編譯,構建本地鏡像
sh "mvn -f ${currentProjectName} clean package dockerfile:build"
container('docker') {
//給鏡像打標簽
sh "docker tag ${imageName} ${harbor_url}/${harbor_project_name}/${imageName}"
//登錄Harbor,並上傳鏡像
withCredentials([usernamePassword(credentialsId: "${harbor_auth}", passwordVariable: 'password', usernameVariable: 'username')])
{
//登錄
sh "docker login -u ${username} -p ${password} ${harbor_url}"
//上傳鏡像
sh "docker push ${harbor_url}/${harbor_project_name}/${imageName}"
}
//刪除本地鏡像
sh "docker rmi -f ${imageName}"
sh "docker rmi -f ${harbor_url}/${harbor_project_name}/${imageName}"
// 需要拉取部署的鏡像名稱
def deploy_image_name = "${harbor_url}/${harbor_project_name}/${imageName}"
//部署到K8S 。deploy.yml 需要寫在子項目的下面
sh """
sed -i 's#\$IMAGE_NAME#${deploy_image_name}#' ${currentProjectName}/deploy.yml
sed -i 's#\$SECRET_NAME#${secret_name}#' ${currentProjectName}/deploy.yml
"""
kubernetesDeploy configs: "${currentProjectName}/deploy.yml", kubeconfigId: "${k8s_auth}"
}
}
}
}
}
構建前步驟
三台k8s登錄harbor倉庫
docker login -u tom -p Abcd1234 192.168.23.202:85
master上創建secret資源
kubectl create secret docker-registry registry-auth-secret --docker-server=192.168.23.202:85 --docker-username=tom --docker-password=Abcd1234 --docker-email=tom@qq.com
eureka部分
在eureka項目下創建文件 Dockerfile
#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 10086
ENTRYPOINT ["java","-jar","/app.jar"]
在eureka項目下,創建文件deploy.yml
# 創建service 資源
apiVersion: v1
kind: Service
metadata:
name: eureka
labels:
app: eureka
spec:
type: NodePort
ports:
- port: 10086
name: eureka
targetPort: 10086
selector:
app: eureka
---
# 創建服務資源
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: eureka
spec:
serviceName: "eureka"
replicas: 2
selector:
matchLabels:
app: eureka
template:
metadata:
labels:
app: eureka
spec:
imagePullSecrets:
- name: $SECRET_NAME
containers:
- name: eureka
image: $IMAGE_NAME
ports:
- containerPort: 10086
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: EUREKA_SERVER
value: "http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/"
- name: EUREKA_INSTANCE_HOSTNAME
value: ${MY_POD_NAME}.eureka
podManagementPolicy: "Parallel"
修改eureka項目的application.yml文件
server:
port: ${PORT:10086}
spring:
application:
name: eureka
eureka:
server:
# 續期時間,即掃描失效服務的間隔時間(缺省為60*1000ms)
eviction-interval-timer-in-ms: 5000
enable-self-preservation: false
use-read-only-response-cache: false
client:
# eureka client間隔多久去拉取服務注冊信息 默認30s
registry-fetch-interval-seconds: 5
serviceUrl:
defaultZone: ${EUREKA_SERVER:http://127.0.0.1:${server.port}/eureka/}
instance:
# 心跳間隔時間,即發送一次心跳之后,多久在發起下一次(缺省為30s)
lease-renewal-interval-in-seconds: 5
# 在收到一次心跳之后,等待下一次心跳的空檔時間,大於心跳間隔即可,即服務續約到期時間(缺省為90s)
lease-expiration-duration-in-seconds: 10
instance-id: ${EUREKA_INSTANCE_HOSTNAME:${spring.application.name}}:${server.port}@${random.l ong(1000000,9999999)}
hostname: ${EUREKA_INSTANCE_HOSTNAME:${spring.application.name}}
zuul 部分
上傳父工程依賴
cd /opt/nfs/maven/com/tensquare
ls tensquare_parent.zip
unzip tensquare_parent.zip
在zuul 項目下創建Dockerfile文件
#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 10020
ENTRYPOINT ["java","-jar","/app.jar"]
在zuul 項目創建deploy.yml文件
---
apiVersion: v1
kind: Service
metadata:
name: zuul
labels:
app: zuul
spec:
type: NodePort
ports:
- port: 10020
name: zuul
targetPort: 10020
selector:
app: zuul
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zuul
spec:
serviceName: "zuul"
replicas: 2
selector:
matchLabels:
app: zuul
template:
metadata:
labels:
app: zuul
spec:
imagePullSecrets:
- name: $SECRET_NAME
containers:
- name: zuul
image: $IMAGE_NAME
ports:
- containerPort: 10020
podManagementPolicy: "Parallel"
修改zuul 項目的application.yml文件
server:
port: 10020 # 端口
# 基本服務信息
spring:
application:
name: tensquare-zuul # 服務ID
# Eureka配置
eureka:
client:
service-url:
defaultZone: http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/ # Eureka訪問地址
instance:
prefer-ip-address: true
# 修改ribbon的超時時間
ribbon:
ConnectTimeout: 1500 # 連接超時時間,默認500ms
ReadTimeout: 3000 # 請求超時時間,默認1000ms
# 修改hystrix的熔斷超時時間
hystrix:
command:
default:
execution:
isolation:
thread:
timeoutInMillisecond: 2000 # 熔斷超時時長,默認1000ms
# 網關路由配置
zuul:
routes:
admin:
path: /admin/**
serviceId: tensquare-admin-service
gathering:
path: /gathering/**
serviceId: tensquare-gathering
# jwt參數
jwt:
config:
key: itcast
ttl: 1800000
admin_service部分
在admin_service項目下創建Dockerfile文件
#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 9001
ENTRYPOINT ["java","-jar","/app.jar"]
在admin_service項目下創建deploy.yml文件
---
apiVersion: v1
kind: Service
metadata:
name: admin
labels:
app: admin
spec:
type: NodePort
ports:
- port: 9001
name: admin
targetPort: 9001
selector:
app: admin
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: admin
spec:
serviceName: "admin"
replicas: 2
selector:
matchLabels:
app: admin
template:
metadata:
labels:
app: admin
spec:
imagePullSecrets:
- name: $SECRET_NAME
containers:
- name: admin
image: $IMAGE_NAME
ports:
- containerPort: 9001
podManagementPolicy: "Parallel"
修改admin_service的application.yml文件
server:
port: 9001
spring:
application:
name: tensquare-admin-service #指定服務名
datasource:
driverClassName: com.mysql.jdbc.Driver
url: jdbc:mysql://192.168.23.202:3306/tensquare_user?characterEncoding=UTF8
username: root
password: abc123
jpa:
database: mysql
show-sql: true
#Eureka配置
eureka:
client:
service-url:
defaultZone: http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/
instance:
lease-renewal-interval-in-seconds: 5 # 每隔5秒發送一次心跳
lease-expiration-duration-in-seconds: 10 # 10秒不發送就過期
prefer-ip-address: true
# jwt參數
jwt:
config:
key: itcast
ttl: 1800000
gathering部分
在gathering項目下創建Dockerfile文件
#FROM java:8
FROM openjdk:8-jdk-alpine
ARG JAR_FILE
COPY ${JAR_FILE} app.jar
EXPOSE 9002
ENTRYPOINT ["java","-jar","/app.jar"]
在gathering項目下創建doploy.yml文件
---
apiVersion: v1
kind: Service
metadata:
name: gathering
labels:
app: gathering
spec:
type: NodePort
ports:
- port: 9002
name: gathering
targetPort: 9002
selector:
app: gathering
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: gathering
spec:
serviceName: "gathering"
replicas: 2
selector:
matchLabels:
app: gathering
template:
metadata:
labels:
app: gathering
spec:
imagePullSecrets:
- name: $SECRET_NAME
containers:
- name: gathering
image: $IMAGE_NAME
ports:
- containerPort: 9002
podManagementPolicy: "Parallel"
修改gathering的application.yml文件
server:
port: 9002
spring:
application:
name: tensquare-gathering #指定服務名
datasource:
driverClassName: com.mysql.jdbc.Driver
url: jdbc:mysql://192.168.23.202:3306/tensquare_gathering?characterEncoding=UTF8
username: root
password: abc123
jpa:
database: mysql
show-sql: true
#Eureka客戶端配置
eureka:
client:
service-url:
defaultZone: http://eureka-0.eureka:10086/eureka/,http://eureka- 1.eureka:10086/eureka/
instance:
lease-renewal-interval-in-seconds: 5 # 每隔5秒發送一次心跳
lease-expiration-duration-in-seconds: 10 # 10秒不發送就過期
prefer-ip-address: true
postman測試部分
現在k8s-master 上查看zuul 的映射端口
kubectl get service
選擇post 形式, http://192.168.23.204:映射端口/admin/admin/login
Body--->raw--->Json
{
"loginname":"admin",
"password":"123456"
}
----> Send
復制出現的token令牌
新開窗口,選擇get 方式獲取 http://192.168.23.204:zuul映射端口/gathering/gathering/
選擇 Headers 頁面沒
Key: token
Value: 復制的token令牌
---->send