- operator-sdk 實戰開發
operator-sdk 實戰開發
環境說明
系統:CentOS Linux release 7.5.1804 (Core)
golang:v1.15
operator-sdk:v1.7.0
docker:v1.19 # 因為 operator-sdk 使用了多階段構建功能,所以 docker 必須大於等於 v1.17 版本
k8s:k3s v1.20.6+k3s1 單節點
需求
定義一個 crd ,spec 包含以下信息:
Replicas # 副本數
Image # 鏡像
Resources # 資源限制
Envs # 環境變量
Ports # 服務端口
根據以上信息,controller 自動創建或者更新一個 deployment + service
注意:本實例只是展示了大概開發流程,故 crd 資源的 status 狀態未做任何更新,實際編寫中需要根據實際情況自行更新 status 狀態
創建 APP
$ mkdir -p $GOPATH/src/github.com/leffss/app
$ cd $GOPATH/src/github.com/leffss/app
$ operator-sdk init --domain=example.com --repo=github.com/leffss/app
創建 API
$ operator-sdk create api --group app --version v1 --kind App --resource=true --controller=true
修改 CRD 類型定義代碼 api/v1/app_types.go
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
/*
修改定義后需要使用 make generate 生成新的 zz_generated.deepcopy.go 文件
*/
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// AppSpec defines the desired state of App
type AppSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
Replicas *int32 `json:"replicas"` // 副本數
Image string `json:"image"` // 鏡像
Resources corev1.ResourceRequirements `json:"resources,omitempty"` // 資源限制
Envs []corev1.EnvVar `json:"envs,omitempty"` // 環境變量
Ports []corev1.ServicePort `json:"ports,omitempty"` // 服務端口
}
// AppStatus defines the observed state of App
type AppStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
//Conditions []AppCondition
//Phase string
appsv1.DeploymentStatus `json:",inline"` // 直接引用 DeploymentStatus
}
//type AppCondition struct {
// Type string
// Message string
// Reason string
// Ready bool
// LastUpdateTime metav1.Time
// LastTransitionTime metav1.Time
//}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// App is the Schema for the apps API
type App struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AppSpec `json:"spec,omitempty"`
Status AppStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// AppList contains a list of App
type AppList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []App `json:"items"`
}
func init() {
SchemeBuilder.Register(&App{}, &AppList{})
}
新增 resource/deployment/deployment.go
package deployment
import (
appv1 "github.com/leffss/app/api/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func New(app *appv1.App) *appsv1.Deployment {
labels := map[string]string{"app.example.com/v1": app.Name}
selector := &metav1.LabelSelector{MatchLabels: labels}
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, schema.GroupVersionKind{
Group: appv1.GroupVersion.Group,
Version: appv1.GroupVersion.Version,
Kind: "App",
}),
},
},
Spec: appsv1.DeploymentSpec{
Replicas: app.Spec.Replicas,
Selector: selector,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: newContainers(app),
},
},
},
}
}
func newContainers(app *appv1.App) []corev1.Container {
var containerPorts []corev1.ContainerPort
for _, servicePort := range app.Spec.Ports {
var cport corev1.ContainerPort
cport.ContainerPort = servicePort.TargetPort.IntVal
containerPorts = append(containerPorts, cport)
}
return []corev1.Container{
{
Name: app.Name,
Image: app.Spec.Image,
Ports: containerPorts,
Env: app.Spec.Envs,
Resources: app.Spec.Resources,
ImagePullPolicy: corev1.PullIfNotPresent,
},
}
}
新增 resource/service/service.go
package service
import (
appv1 "github.com/leffss/app/api/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func New(app *appv1.App) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, schema.GroupVersionKind{
Group: appv1.GroupVersion.Group,
Version: appv1.GroupVersion.Version,
Kind: "App",
}),
},
},
Spec: corev1.ServiceSpec{
Ports: app.Spec.Ports,
Selector: map[string]string{
"app.example.com/v1": app.Name,
},
},
}
}
修改 controller 代碼 controllers/app_controller.go
/*
Copyright 2021.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/json"
"reflect"
"github.com/leffss/app/resource/deployment"
"github.com/leffss/app/resource/service"
"k8s.io/apimachinery/pkg/api/errors"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
appv1 "github.com/leffss/app/api/v1"
corev1 "k8s.io/api/core/v1"
appsv1 "k8s.io/api/apps/v1"
)
// AppReconciler reconciles a App object
type AppReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=app.example.com,resources=apps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=app.example.com,resources=apps/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=app.example.com,resources=apps/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the App object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.2/pkg/reconcile
func (r *AppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
_ = r.Log.WithValues("app", req.NamespacedName)
// your logic here
// 獲取 crd 資源
instance := &appv1.App{}
if err := r.Client.Get(ctx, req.NamespacedName, instance); err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
return ctrl.Result{}, err
}
// crd 資源已經標記為刪除
if instance.DeletionTimestamp != nil {
return ctrl.Result{}, nil
}
oldDeploy := &appsv1.Deployment{}
if err := r.Client.Get(ctx, req.NamespacedName, oldDeploy); err != nil {
// deployment 不存在,創建
if errors.IsNotFound(err) {
// 創建deployment
if err := r.Client.Create(ctx, deployment.New(instance)); err != nil {
return ctrl.Result{}, err
}
// 創建service
if err := r.Client.Create(ctx, service.New(instance)); err != nil {
return ctrl.Result{}, err
}
// 更新 crd 資源的 Annotations
data, _ := json.Marshal(instance.Spec)
if instance.Annotations != nil {
instance.Annotations["spec"] = string(data)
} else {
instance.Annotations = map[string]string{"spec": string(data)}
}
if err := r.Client.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
} else {
return ctrl.Result{}, err
}
} else {
// deployment 存在,更新
oldSpec := appv1.AppSpec{}
if err := json.Unmarshal([]byte(instance.Annotations["spec"]), &oldSpec); err != nil {
return ctrl.Result{}, err
}
if !reflect.DeepEqual(instance.Spec, oldSpec) {
// 更新deployment
newDeploy := deployment.New(instance)
oldDeploy.Spec = newDeploy.Spec
if err := r.Client.Update(ctx, oldDeploy); err != nil {
return ctrl.Result{}, err
}
// 更新service
newService := service.New(instance)
oldService := &corev1.Service{}
if err := r.Client.Get(ctx, req.NamespacedName, oldService); err != nil {
return ctrl.Result{}, err
}
clusterIP := oldService.Spec.ClusterIP // 更新 service 必須設置老的 clusterIP
oldService.Spec = newService.Spec
oldService.Spec.ClusterIP = clusterIP
if err := r.Client.Update(ctx, oldService); err != nil {
return ctrl.Result{}, err
}
// 更新 crd 資源的 Annotations
data, _ := json.Marshal(instance.Spec)
if instance.Annotations != nil {
instance.Annotations["spec"] = string(data)
} else {
instance.Annotations = map[string]string{"spec": string(data)}
}
if err := r.Client.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *AppReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&appv1.App{}).
Complete(r)
}
修改 CRD 資源定義 config/samples/app_v1_app.yaml
apiVersion: app.example.com/v1
kind: App
metadata:
name: app-sample
namespace: default
spec:
# Add fields here
replicas: 2
image: nginx:1.16.1
ports:
- targetPort: 80
port: 8080
envs:
- name: DEMO
value: app
- name: GOPATH
value: gopath
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 100m
memory: 100Mi
修改 Dockerfile
# Build the manager binary
FROM golang:1.15 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
ENV GOPROXY https://goproxy.cn,direct
RUN go mod download
# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/
COPY resource/ resource/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
#FROM gcr.io/distroless/static:nonroot
FROM kubeimages/distroless-static:latest
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]
- 添加了 goproxy 環境變量
- 新增 COPY 自定義的文件夾 resource
- gcr.io/distroless/static:nonroot 變更為 kubeimages/distroless-static:latest
部署運行
第一種:本地運行 controller
用於開發測試
app 項目根目錄運行:
$ make generate && make manifests && make install && make run
- 本機需確保安裝了 kubectl 工具,並且證書文件 ~/.kube/config 存在(保證為集群管理員權限)
- 測試完畢后使用 ctrl + c 停止程序,然后 make uninstall 刪除 crd 定義
第二種:集群部署
1、make
$ make generate && make manifests && make install
2、構建鏡像
$ make docker-build IMG=leffss/app:v1
$ docker images |grep app
leffss/app v1 1eaa4b6a4781 About a minute ago 46.5MB
3、准備鏡像:
# 因為 k3s 安裝的 k8s v1.20 默認使用的是 containerd,所以要導入鏡像
$ docker save leffss/app:v1 > app.tar
$ ctr image import app.tar
$ docker pull kubesphere/kube-rbac-proxy:v0.8.0
$ docker tag kubesphere/kube-rbac-proxy:v0.8.0 gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0
$ docker save gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 > kube-rabc-proxy.tar
$ ctr image import kube-rabc-proxy.tar
4、運行
$ make deploy IMG=leffss/app:v1
5、結果確認:
$ kubectl get service -A |grep app
$ kubectl -n app-system get pod
$ kubectl -n app-system get deployment
6、CRD 確認
$ kubectl get crd
NAME CREATED AT
addons.k3s.cattle.io 2021-05-02T01:03:34Z
helmcharts.helm.cattle.io 2021-05-02T01:03:34Z
helmchartconfigs.helm.cattle.io 2021-05-02T01:03:34Z
apps.app.example.com 2021-05-04T12:10:43Z
創建自定義資源
$ kubectl apply -f config/samples/app_v1_app.yaml
app.app.example.com/app-sample created
查看控制台日志:
$ kubectl -n memcached-operator-system logs memcached-operator-controller-manager-6cf86db855-sqhpj -c manager
2021-05-05T19:56:32.798+0800 INFO controller-runtime.metrics metrics server is starting to listen {"addr": ":8080"}
2021-05-05T19:56:32.798+0800 INFO setup starting manager
2021-05-05T19:56:32.799+0800 INFO controller-runtime.manager starting metrics server {"path": "/metrics"}
2021-05-05T19:56:32.800+0800 INFO controller-runtime.manager.controller.app Starting EventSource {"reconciler group": "app.example.com", "reconciler kind": "App", "source": "kind source: /, Kind="}
2021-05-05T19:56:32.901+0800 INFO controller-runtime.manager.controller.app Starting Controller {"reconciler group": "app.example.com", "reconciler kind": "App"}
2021-05-05T19:56:32.901+0800 INFO controller-runtime.manager.controller.app Starting workers {"reconciler group": "app.example.com", "reconciler kind": "App", "worker count": 1}
- 如果是第一種部署方式則直接查看控制台,第二種則查看集群中 app-controller-manager 的 pod 中的 manager 容器控制台日志
如果是第二種直接將 controller 部署到 k8s 集群的方式,可能會出現 RBAC 權限錯誤,解決方法是修改部署時的權限配置,這里我們使用最簡單的方法是直接給 controller 綁定到 cluster-admin 集群管理員即可
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-admin-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: app-controller-manager
namespace: app-system
刪除 CRD 資源
$ kubectl apply -f config/samples/app_v1_app.yaml
app.app.example.com/app-sample deleted
刪除 CRD 定義
$ make uninstall
刪除 controller
$ make undeploy
- 只適用第二種部署方式
總結
實際開發中只需要使用 operator-sdk(或者 kubebuilder)創建 CRD 以及相應的 controller,然后根據需求自行定義 crd 屬性,並編寫對應的 controller 邏輯代碼,最終就可以實現一個完整的 operator。
補充:make deploy 含義
部署時使用 make deploy 實際是執行的什么命令呢?通過查看項目根目錄 Makefile 可知實際運行命令為:
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
- 實際上就是使用 kustomize 工具生成部署的 yaml 文件,然后使用 kubectl 工具應用
其他的 make install,make run 等命令也都在 Makefile 中有相關定義。
所以當在實際部署過程中,如果向手動部署的話,可以直接參考 Makefile 首先生成部署 yaml 文件,然后手動應用到 k8s 集群即可。