(1)安装operator sdk: (此处选择operator-sdk-1.26.1)


git clone https://github.com/operator-framework/operator-sdk
cd operator-sdk
git checkout master
make install

(2)初始化项目:



 export GO111MODULE=on
 go mod init  github.com/zhq/opdemo/v2
 operator-sdk init  --domain niginx.io  --license apache2 --owner "zhq"
 go mod tidy
 make
 

(3)创建API:


operator-sdk create api --group app --version v1beta1 --kind AppService
make

(4) 填充API结构体内容:


type AppServiceSpec struct {
    // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
    // Important: Run "make" to regenerate code after modifying this file

    // Foo is an example field of AppService. Edit appservice_types.go to remove/update
    Size      *int32                      `json:"size"`
    Image     string                      `json:"image"`
    Resources corev1.ResourceRequirements `json:"resources,omitempty"`
    Envs      []corev1.EnvVar             `json:"envs,omitempty"`
    Ports     []corev1.ServicePort        `json:"ports,omitempty"`
}

type AppServiceStatus struct {
    // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
    // Important: Run "make" to regenerate code after modifying this file
    appsv1.DeploymentStatus `json:",inline"`
}

if err = (&controllers.AppServiceReconciler{
        Client: mgr.GetClient(),
        Scheme: mgr.GetScheme(),
        Log:    ctrl.Log.WithName("reconsile"),
    }).SetupWithManager(mgr); err != nil {
        setupLog.Error(err, "unable to create controller", "controller", "AppService")
        os.Exit(1)
    }

(5)填写controller Reconsile函数内容,完成控制逻辑:


func (r *AppServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
    ctx := log.FromContext(ctx)

    // TODO(user): your logic here
    log := r.Log.WithValues("appservice", req.NamespacedName)

    // 获取 AppService 实例
    var appService appv1beta1.AppService
    err := r.Get(ctx, req.NamespacedName, &appService)
    if err != nil {
        // App 被删除的时候,忽略
        if client.IgnoreNotFound(err) != nil {
            return ctrl.Result{}, err
        }
        return ctrl.Result{}, nil
    }

    log.Info("fetch appservice objects", "appservice", appService)

    // 如果不存在,则创建关联资源
    // 如果存在,判断是否需要更新
    // 如果需要更新,则直接更新
    // 如果不需要更新,则正常返回
    deploy := &appsv1.Deployment{}
    oldSpecAnnotation := "annotations"
    if err := r.Get(ctx, req.NamespacedName, deploy); err != nil && errors.IsNotFound(err) {
        // 1. 关联 Annotations
        data, _ := json.Marshal(appService.Spec)
        if appService.Annotations != nil {
            appService.Annotations[oldSpecAnnotation] = string(data)
        } else {
            appService.Annotations = map[string]string{oldSpecAnnotation: string(data)}
        }
        if err := r.Client.Update(ctx, &appService); err != nil {
            return ctrl.Result{}, err
        }
        // 创建关联资源
        // 2. 创建 Deployment
        deploy := resources.NewDeploy(&appService)
        if err := r.Client.Create(ctx, deploy); err != nil {
            return ctrl.Result{}, err
        }
        // 3. 创建 Service
        service := resources.NewService(&appService)
        if err := r.Create(ctx, service); err != nil {
            return ctrl.Result{}, err
        }
        return ctrl.Result{}, nil
    }
    oldspec := appv1beta1.AppServiceSpec{}
    if err := json.Unmarshal([]byte(appService.Annotations[oldSpecAnnotation]), &oldspec); err != nil {
        return ctrl.Result{}, err
    }
    // 当前规范与旧的对象不一致,则需要更新
    if !reflect.DeepEqual(appService.Spec, oldspec) {
        // 更新关联资源
        newDeploy := resources.NewDeploy(&appService)
        oldDeploy := &appsv1.Deployment{}
        if err := r.Get(ctx, req.NamespacedName, oldDeploy); err != nil {
            return ctrl.Result{}, err
        }
        oldDeploy.Spec = newDeploy.Spec
        if err := r.Client.Update(ctx, oldDeploy); err != nil {
            return ctrl.Result{}, err
        }
        
        newService := resources.NewService(&appService)
        oldService := &corev1.Service{}
        if err := r.Get(ctx, req.NamespacedName, oldService); err != nil {
            return ctrl.Result{}, err
        }
    // 需要指定 ClusterIP 为之前的,不然更新会报错
        newService.Spec.ClusterIP = oldService.Spec.ClusterIP
        oldService.Spec = newService.Spec
        if err := r.Client.Update(ctx, oldService); err != nil {
            return ctrl.Result{}, err
        }
        return ctrl.Result{}, nil
    }

    return ctrl.Result{}, nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *AppServiceReconciler) SetupWithManager(mgr ctrl.Manager) error {
    return ctrl.NewControllerManagedBy(mgr).
        For(&appv1beta1.AppService{}).
        Complete(r)
}

(6)增加资源填充部分:


package resources

import (
    appv1beta1 "github.com/zhq/opdemo/v2/api/v1beta1"
    appsv1 "k8s.io/api/apps/v1"
    corev1 "k8s.io/api/core/v1"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/runtime/schema"
)

func NewDeploy(app *appv1beta1.AppService) *appsv1.Deployment {
    labels := map[string]string{"app": app.Name}
    selector := &metav1.LabelSelector{MatchLabels: labels}
    return &appsv1.Deployment{
        TypeMeta: metav1.TypeMeta{
            APIVersion: "apps/v1",
            Kind:       "Deployment",
        },
        ObjectMeta: metav1.ObjectMeta{
            Name:      app.Name,
            Namespace: app.Namespace,

            OwnerReferences: []metav1.OwnerReference{
                *metav1.NewControllerRef(app, schema.GroupVersionKind{
                    Group:   appv1beta1.GroupVersion.Group,
                    Version: appv1beta1.GroupVersion.Version,
                    Kind:    appv1beta1.Kind.Kind,
                }),
            },
        },
        Spec: appsv1.DeploymentSpec{
            Replicas: app.Spec.Size,
            Template: corev1.PodTemplateSpec{
                ObjectMeta: metav1.ObjectMeta{
                    Labels: labels,
                },
                Spec: corev1.PodSpec{
                    Containers: newContainers(app),
                },
            },
            Selector: selector,
        },
    }
}

func newContainers(app *appv1beta1.AppService) []corev1.Container {
    containerPorts := []corev1.ContainerPort{}
    for _, svcPort := range app.Spec.Ports {
        cport := corev1.ContainerPort{}
        cport.ContainerPort = svcPort.TargetPort.IntVal
        containerPorts = append(containerPorts, cport)
    }
    return []corev1.Container{
        {
            Name:            app.Name,
            Image:           app.Spec.Image,
            Resources:       app.Spec.Resources,
            Ports:           containerPorts,
            ImagePullPolicy: corev1.PullIfNotPresent,
            Env:             app.Spec.Envs,
        },
    }
}

package resources

import (
    appv1beta1 "github.com/zhq/opdemo/v2/api/v1beta1"
    corev1 "k8s.io/api/core/v1"
    metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    "k8s.io/apimachinery/pkg/runtime/schema"
)

func NewService(app *appv1beta1.AppService) *corev1.Service {
    return &corev1.Service{
        TypeMeta: metav1.TypeMeta{
            Kind:       "Service",
            APIVersion: "v1",
        },
        ObjectMeta: metav1.ObjectMeta{
            Name:      app.Name,
            Namespace: app.Namespace,
            OwnerReferences: []metav1.OwnerReference{
                *metav1.NewControllerRef(app, schema.GroupVersionKind{
                    Group:   appv1beta1.GroupVersion.Group,
                    Version: appv1beta1.GroupVersion.Version,
                    Kind:    appv1beta1.Kind.Kind,
                }),
            },
        },
        Spec: corev1.ServiceSpec{
            Type:  corev1.ServiceTypeNodePort,
            Ports: app.Spec.Ports,
            Selector: map[string]string{
                "app": app.Name,
            },
        },
    }
}

(7)启动operator调试(本地有k8s或者minikube环境)


 kubectl cluster-info

 make run

(8)创建CR,apply之后可以看到启动的deplyment:


apiVersion: app.niginx.io/v1beta1
kind: AppService
metadata:
  name: nginx
spec:
  size: 3
  image: nginx:1.7.9
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30002

(5)打包镜像:


docker pull quay.io/operator-framework/opm:latest
docker pull gcr.io/kubebuilder/kube-rbac-proxy (可使用dockerhub版本修改tag)

make docker-build docker-push IMG="harbor.test.com/test-operator:v0.0.1"
make bundle IMG="harbor.test.com/test-operator:v0.0.1"
make bundle-build bundle-push BUNDLE_IMG="harbor.test.com/test-operator-bundle:v0.0.1"

# 构建index镜像用于发布到应用商店
opm index add -u docker --bundles harbor.test.com/test-operator-bundle:v0.0.1 --binary-image=harbor.test.com/tools/opm:v1.26 --tag harbor.test.com/test-operator-index:v0.0.1 --skip-tls-verify --generate
## 多镜像index:
opm index add -u docker --bundles harbor.test.com/test-operator-bundle1:v0.0.1,bundles harbor.test.com/test-operator-bundle2:v0.0.1 --binary-image=harbor.test.com/tools/opm:v1.26 --tag harbor.test.com/test-operator-index:v5.0.1 --skip-tls-verify --generate

podman build --tls-verify=false -t harbor.test.com/test-operator-index:v0.0.1 -f index.Dockerfile .

导入商店:


apiVersion: operators.test.com/v1alpha1
kind: CatalogSource
metadata:
  name: demo-operator-index
  namespace: test-marketplace
spec:
  displayName: demo product index
  image: harbor.test.com/test-operator-index:v0.0.1
  publisher: testuser
  sourceType: grpc
  updateStrategy:
    registryPoll:
      interval: 30m

(6)本地测试:


operator-sdk run bundle harbor.test.com/test-operator-bundle:v0.0.1

(7)测试部署:

生成deploy yaml:


kubectl kustomize  test-operator/config/default >deploy.yaml

拷贝至部署环境安装:


kubectl apply -f deploy.yaml

(8) 增加pod监控:

operator-sdk create api --group=core --version=v1 --kind=Pod --controller=true --resource=false
func(r *PodReconciler)SetupWithManager(mgr ctrl.Manager)error{
      return ctrl.NewControllerManagedBy(mgr).
             For(&corev1.Pod{}).Complete(r)
}

(9) 修改默认权限:

在 <xx>_controller.go中增加权限描述:

//+kubebuilder:rbac:groups=cache.example.com,namespace=memcached-operator-system,resources=memcacheds,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=cache.example.com,namespace=memcached-operator-system,resources=memcacheds/status,verbs=get;update;patch

(10)  获取karmada配置:

​
apiVersion: v1
kind: Service
metadata:
  labels:
    app: karmada-apiserver
    karmada.io/bootstrapping: service-defaults
  name: karmada-apiserver-nodeport
  namespace: karmada-system
spec:
  ports:
  - name: server
    port: 5443
    protocol: TCP
    targetPort: 5443
    nodePort: 32443
  selector:
    app: karmada-apiserver
  type: NodePort

​

kubectl get secrets -n karmada-system kubeconfig-file -ojsonpath={.data.kubeconfig} | base64 -d > karmada.config

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐