一 Kubebuilder环境搭建

注:必须在当前的K8S集群有 nginx这个ingressclass

root@k8s:~# kubectl get ingressclass
NAME    CONTROLLER             PARAMETERS   AGE
nginx   k8s.io/ingress-nginx   <none>       19h

1.1 下载kubebuilder

wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v3.3.0/kubebuilder_linux_amd64 -O /usr/local/sbin/kubebuilder --no-check-certificate
chmod +x /usr/local/sbin/kubebuilder

kubebuilder version
Version: main.version{KubeBuilderVersion:"3.3.0", KubernetesVendor:"1.23.1", GitCommit:"47859bf2ebf96a64db69a2f7074ffdec7f15c1ec", BuildDate:"2022-01-18T17:03:29Z", GoOs:"linux", GoArch:"amd64"}

1.2 下载Golang

wget https://studygolang.com/dl/golang/go1.19.7.linux-amd64.tar.gz

tar -C /usr/local -xzf go1.19.7.linux-amd64.tar.gz

vim /etc/profile


export PATH=$PATH:/usr/local/go/bin
export GO111MODULE=on
export GOPROXY=https://goproxy.cn,direct
export PATH=$PATH:/root/go/bin

root@k8s:~# go version
go version go1.19.7 linux/amd64

1.3 编译golang需要的文件

以下只是一个 Demo和后续开发的项目无关

mkdir -p /usr/local/kubebuilder/elasticweb

go mod init elasticweb
kubebuilder init --domain jcrose.top
mkdir bin


kubebuilder create api \
--group elasticweb \
--version v1 \
--kind ElasticWeb

# 安装controller-gen,这个需要先 执行kubebuilder create api 才会产生这个包的
cd /root/go/pkg/mod/sigs.k8s.io/controller-tools@v0.8.0/cmd/controller-gen
go build -o controller-gen main.go
mv controller-gen /usr/local/sbin
cp /usr/local/sbin/controller-gen /usr/local/kubebuilder/elasticweb/bin


make install #第一次会失败,因为缺乏 kustomize 

# 安装kustomize
cd /root/go/pkg/mod/sigs.k8s.io/kustomize/kustomize/v3@v3.8.7
go build -o kustomize main.go
cp kustomize /usr/local/sbin
cp kustomize /usr/local/kubebuilder/elasticweb/bin

make install 


 kubectl get crds|grep jcrose
elasticwebs.elasticweb.jcrose.top   2023-04-09T14:25:18Z

安装启动容器的时候初始化的环境包

# 官方文档 https://github.com/kubernetes-sigs/controller-runtime/tree/main/tools/setup-envtest
# 安装setup-envtest
# cd /root/go/pkg/mod/sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20230403212152-53057ba616d1
go install sigs.k8s.io/controller-runtime/tools/setup-envtest@release-0.17
cp /root/go/bin/setup-envtest /usr/local/sbin/

此时安装了 kustomize,controller-gen,setup-envtest 三个文件

二 初始化项目

kubebuilder init --domain jcrose.top
kubebuilder create api --group infra --version v1 --kind App

2.1 controllers/app_controller.go

/*
Copyright 2024.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package controllers

import (
	"context"
	"fmt"

	appsv1 "k8s.io/api/apps/v1"
	corev1 "k8s.io/api/core/v1"
	apinetv1 "k8s.io/api/networking/v1"
	"k8s.io/apimachinery/pkg/api/errors"
	"k8s.io/apimachinery/pkg/api/resource"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/runtime"
	"k8s.io/klog/v2"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/client"
	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
	"sigs.k8s.io/controller-runtime/pkg/reconcile"

	infrav1 "jcrose/api/v1"
)

// AppReconciler reconciles a App object
type AppReconciler struct {
	client.Client
	Scheme *runtime.Scheme
	//Log    logr.Logger
}

//+kubebuilder:rbac:groups=infra.jcrose.top,resources=apps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=infra.jcrose.top,resources=apps/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=infra.jcrose.top,resources=apps/finalizers,verbs=update
//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the App object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.0/pkg/reconcile
func (r *AppReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
	ctx = context.Background()
	fmt.Println("Name", req.Name)
	fmt.Println("Namespace", req.Namespace)
	fmt.Println("NamespacedName", req.NamespacedName)

	//log := r.Log.WithValues("yunlizhi infra app", req.NamespacedName)
	// log.Info("1. start reconcile logic")

	// TODO(user): your logic here
	instance := &infrav1.App{}

	// 通过客户端工具查询,查询条件是
	err := r.Get(ctx, req.NamespacedName, instance)

	if err != nil {

		// 如果没有实例,就返回空结果,这样外部就不再立即调用Reconcile方法了
		if errors.IsNotFound(err) {
			klog.Info("2.1. instance not found, maybe removed")
			return reconcile.Result{}, nil
		}

		klog.Error(err, "2.2 error")
		// 返回错误信息给外部
		return ctrl.Result{}, err
	}

	klog.Info("3. instance : " + instance.String())

	// 查找deployment
	deployment := &appsv1.Deployment{}

	// 用客户端工具查询
	err = r.Get(ctx, req.NamespacedName, deployment)

	// 查找时发生异常,以及查出来没有结果的处理逻辑
	if err != nil {
		// 如果没有实例就要创建了
		if errors.IsNotFound(err) {
			klog.Info("4. deployment not exists")

			// 如果对QPS没有需求,此时又没有deployment,就啥事都不做了

			// 先要创建service
			if err = createServiceIfNotExists(ctx, r, instance, req); err != nil {
				klog.Error(err, "5.2 error")
				// 返回错误信息给外部
				return ctrl.Result{}, err
			}

			// 立即创建deployment
			if err = createDeploymentIfNotExists(ctx, r, instance, req); err != nil {
				klog.Error(err, "5.3 error")
				// 返回错误信息给外部
				return ctrl.Result{}, err
			}

			if err = createIngressIfNotExists(ctx, r, instance, req); err != nil {
				klog.Error(err, "5.3 error")
				// 返回错误信息给外部
				return ctrl.Result{}, err
			}

			// 如果创建成功就更新状态
			if err = updateStatus(ctx, r, instance); err != nil {
				klog.Error(err, "5.4. error")
				// 返回错误信息给外部
				return ctrl.Result{}, err
			}

			// 创建成功就可以返回了
			return ctrl.Result{}, nil
		} else {
			klog.Error(err, "7. error")
			// 返回错误信息给外部
			return ctrl.Result{}, err
		}
	}

	// 如果查到了deployment,并且没有返回错误,就走下面的逻辑

	klog.Info("11. update deployment's Replicas")
	// 通过客户端更新deployment
	if err = r.Update(ctx, deployment); err != nil {
		klog.Error(err, "12. update deployment replicas error")
		// 返回错误信息给外部
		return ctrl.Result{}, err
	}

	klog.Info("13. update status")

	// 如果更新deployment的Replicas成功,就更新状态
	if err = updateStatus(ctx, r, instance); err != nil {
		klog.Error(err, "14. update status error")
		// 返回错误信息给外部
		return ctrl.Result{}, err
	}

	return ctrl.Result{}, nil

}

// SetupWithManager sets up the controller with the Manager.
func (r *AppReconciler) SetupWithManager(mgr ctrl.Manager) error {
	return ctrl.NewControllerManagedBy(mgr).
		For(&infrav1.App{}).
		Complete(r)
}

func updateStatus(ctx context.Context, r *AppReconciler, App *infrav1.App) error {
	//log := r.Log.WithValues("func", "updateStatus")
	App.Status.TotalDomain = App.Spec.Domain

	if err := r.Status().Update(ctx, App); err != nil {
		klog.Error(err, "update instance error")
		return err
	}

	return nil
}

func createDeploymentIfNotExists(ctx context.Context, r *AppReconciler, app *infrav1.App, req ctrl.Request) error {
	//log := r.Log.WithValues("func", "createDeployment")

	deployment := &appsv1.Deployment{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: app.Namespace,
			Name:      app.Name,
		},
		Spec: appsv1.DeploymentSpec{
			Replicas: app.Spec.Replicas,
			Selector: &metav1.LabelSelector{
				MatchLabels: map[string]string{
					"app": app.Spec.Project,
				},
			},
			Template: corev1.PodTemplateSpec{
				ObjectMeta: metav1.ObjectMeta{
					Labels: map[string]string{
						"app": app.Spec.Project,
					},
				},
				Spec: corev1.PodSpec{
					Containers: []corev1.Container{
						{
							Name:            app.Spec.Project,
							Image:           app.Spec.Image,
							ImagePullPolicy: "IfNotPresent",
							Ports: []corev1.ContainerPort{
								{
									Name:          "http",
									Protocol:      corev1.ProtocolTCP,
									ContainerPort: *app.Spec.Port,
								},
							},
							Resources: corev1.ResourceRequirements{
								Requests: corev1.ResourceList{
									"cpu":    resource.MustParse("100m"),
									"memory": resource.MustParse("256Mi"),
								},
								Limits: corev1.ResourceList{
									"cpu":    resource.MustParse("200m"),
									"memory": resource.MustParse("512Mi"),
								},
							},
						},
					},
				},
			},
		},
	}
	klog.Info("set reference")
	if err := controllerutil.SetControllerReference(app, deployment, r.Scheme); err != nil {
		klog.Error(err, "SetControllerReference error")
		return err
	}
	klog.Info("start create deployment")
	if err := r.Create(ctx, deployment); err != nil {
		klog.Error(err, "create deployment error")
		return err
	}
	klog.Info("create deployment success")
	return nil
}

func createServiceIfNotExists(ctx context.Context, r *AppReconciler, app *infrav1.App, req ctrl.Request) error {
	//log := r.Log.WithValues("func", "createService")

	service := &corev1.Service{}

	err := r.Get(ctx, req.NamespacedName, service)

	// 如果查询结果没有错误,证明service正常,就不做任何操作
	if err == nil {
		klog.Info("service exists")
		return nil
	}

	// 如果错误不是NotFound,就返回错误
	if !errors.IsNotFound(err) {
		klog.Error(err, "query service error")
		return err
	}

	// 实例化一个数据结构
	service = &corev1.Service{
		ObjectMeta: metav1.ObjectMeta{
			Namespace: app.Namespace,
			Name:      app.Name,
		},
		Spec: corev1.ServiceSpec{
			Ports: []corev1.ServicePort{{
				Name: "http",
				Port: *app.Spec.Port,
			},
			},
			Selector: map[string]string{
				"app": app.Spec.Project,
			},
			Type: corev1.ServiceTypeNodePort,
		},
	}

	// 这一步非常关键!
	// 建立关联后,删除elasticweb资源时就会将deployment也删除掉
	klog.Info("set reference")
	if err := controllerutil.SetControllerReference(app, service, r.Scheme); err != nil {
		klog.Error(err, "SetControllerReference error")
		return err
	}

	// 创建service
	klog.Info("start create service")
	if err := r.Create(ctx, service); err != nil {
		klog.Error(err, "create service error")
		return err
	}

	klog.Info("create service success")
	app.Status.TotalDomain = app.Spec.Domain
	err = r.Status().Update(ctx, service)
	if err != nil {
		return err
	}

	return nil
}

func createIngressIfNotExists(ctx context.Context, r *AppReconciler, app *infrav1.App, req ctrl.Request) error {
	//log := r.Log.WithValues("func", "createIngress")

	ingress := &apinetv1.Ingress{}

	err := r.Get(ctx, req.NamespacedName, ingress)

	// 如果查询结果没有错误,证明service正常,就不做任何操作
	if err == nil {
		klog.Info("ingress exists")
		return nil
	}

	// 如果错误不是NotFound,就返回错误
	if !errors.IsNotFound(err) {
		klog.Error(err, "query service error")
		return err
	}

	// 实例化一个数据结构
	ingress.Name = app.Name
	ingress.Namespace = app.Namespace
	pathType := apinetv1.PathTypePrefix
	icn := "nginx"
	ingress.Spec = apinetv1.IngressSpec{
		IngressClassName: &icn,
		Rules: []apinetv1.IngressRule{
			{
				Host: app.Spec.Domain,
				IngressRuleValue: apinetv1.IngressRuleValue{
					HTTP: &apinetv1.HTTPIngressRuleValue{
						Paths: []apinetv1.HTTPIngressPath{
							{
								Path:     "/",
								PathType: &pathType,
								Backend: apinetv1.IngressBackend{
									Service: &apinetv1.IngressServiceBackend{
										Name: app.Name,
										Port: apinetv1.ServiceBackendPort{
											Number: *app.Spec.Port,
										},
									},
								},
							},
						},
					},
				},
			},
		},
	}

	// 这一步非常关键!
	// 建立关联后,删除elasticweb资源时就会将deployment也删除掉
	klog.Info("set reference")
	if err := controllerutil.SetControllerReference(app, ingress, r.Scheme); err != nil {
		klog.Error(err, "SetControllerReference error")
		return err
	}

	// 创建service
	klog.Info("start create ingress")
	if err := r.Create(ctx, ingress); err != nil {
		klog.Error(err, "create service error")
		return err
	}

	klog.Info("create service success")

	return nil
}

2.2 api/v1/app_types.go

/*
Copyright 2023.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1

import (
	"fmt"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// EDIT THIS FILE!  THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.

// AppSpec defines the desired state of App
type AppSpec struct {
	// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
	// Important: Run "make" to regenerate code after modifying this file

	// Foo is an example field of App. Edit app_types.go to remove/update
	Replicas *int32 `json:"replicas,omitempty"`
	Image    string `json:"image,omitempty"`
	Port     *int32 `json:"port,omitempty"`
	Project  string `json:"project,omitempty"`
	Domain   string `json:"domain,omitempty"`
}

// AppStatus defines the observed state of App
type AppStatus struct {
	// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
	// Important: Run "make" to regenerate code after modifying this file
	TotalStatus string `json:"totalStatus"`
	TotalDomain string `json:"totalDomain"`
}

//+kubebuilder:object:root=true
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="pod的个数"
// +kubebuilder:printcolumn:name="Domain",type="string",JSONPath=".spec.domain",description="ingress的域名"
// +kubebuilder:printcolumn:name="Image",type="string",JSONPath=".spec.image",description="app的镜像地址"
//+kubebuilder:subresource:status

// App is the Schema for the apps API
type App struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`

	Spec   AppSpec   `json:"spec,omitempty"`
	Status AppStatus `json:"status,omitempty"`
}

//+kubebuilder:object:root=true

// AppList contains a list of App
type AppList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []App `json:"items"`
}

func init() {
	SchemeBuilder.Register(&App{}, &AppList{})
}

func (in *App) String() string {
	return fmt.Sprintf("Image [%s], Port [%d], AppName [%s]",
		in.Spec.Image,
		*(in.Spec.Port),
		in.Spec.Project)
}

2.3 main.go

/*
Copyright 2023.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package main

import (
	"flag"
	"os"

	// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
	// to ensure that exec-entrypoint and run can make use of them.
	_ "k8s.io/client-go/plugin/pkg/client/auth"

	"k8s.io/apimachinery/pkg/runtime"
	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
	ctrl "sigs.k8s.io/controller-runtime"
	"sigs.k8s.io/controller-runtime/pkg/healthz"
	"sigs.k8s.io/controller-runtime/pkg/log/zap"

	infrav1 "yunlizhi/api/v1"
	"yunlizhi/controllers"
	//+kubebuilder:scaffold:imports
)

var (
	scheme   = runtime.NewScheme()
	setupLog = ctrl.Log.WithName("setup")
)

func init() {
	utilruntime.Must(clientgoscheme.AddToScheme(scheme))

	utilruntime.Must(infrav1.AddToScheme(scheme))
	//+kubebuilder:scaffold:scheme
}

func main() {
	var metricsAddr string
	var enableLeaderElection bool
	var probeAddr string
	flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
	flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
	flag.BoolVar(&enableLeaderElection, "leader-elect", false,
		"Enable leader election for controller manager. "+
			"Enabling this will ensure there is only one active controller manager.")
	opts := zap.Options{
		Development: true,
	}
	opts.BindFlags(flag.CommandLine)
	flag.Parse()

	ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))

	mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
		Scheme:                 scheme,
		MetricsBindAddress:     metricsAddr,
		Port:                   9443,
		HealthProbeBindAddress: probeAddr,
		LeaderElection:         enableLeaderElection,
		LeaderElectionID:       "dcd31429.yunlizhi.cn",
	})
	if err != nil {
		setupLog.Error(err, "unable to start manager")
		os.Exit(1)
	}

	if err = (&controllers.AppReconciler{
		Client: mgr.GetClient(),
		Log:    ctrl.Log.WithName("controllers").WithName("app"),
		Scheme: mgr.GetScheme(),
	}).SetupWithManager(mgr); err != nil {
		setupLog.Error(err, "unable to create controller", "controller", "App")
		os.Exit(1)
	}
	//+kubebuilder:scaffold:builder

	if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
		setupLog.Error(err, "unable to set up health check")
		os.Exit(1)
	}
	if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
		setupLog.Error(err, "unable to set up ready check")
		os.Exit(1)
	}

	setupLog.Info("starting manager")
	if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
		setupLog.Error(err, "problem running manager")
		os.Exit(1)
	}
}

三 部署 jcrose.top operator

3.1 修改Dockerfile,新增一个GOPROXY提高编译速度

# Build the manager binary
FROM golang:1.17 as builder

ENV GOPROXY=https://goproxy.cn,direct	#主要是这一行

WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download

# Copy the go source
COPY main.go main.go
COPY api/ api/
COPY controllers/ controllers/

# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go

# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM katanomi/distroless-static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532

ENTRYPOINT ["/manager"]

3.2 部署

# 制作推送镜像至阿里云,需要先登录
# 这个需要在阿里云的镜像仓库创建这个仓库

make docker-build docker-push IMG=registry.cn-zhangjiakou.aliyuncs.com/jcrose-k8s/jcrose-deployment:v2


# kind可以直接导入
kind load docker-image registry.cn-zhangjiakou.aliyuncs.com/jcrose-k8s/jcrose-deployment:v2

make install
make manifests	#配置RBAC权限

# 部署controller
make deploy IMG=registry.cn-zhangjiakou.aliyuncs.com/jcrose-k8s/jcrose-deployment:v2

3.3 资源文件

apiVersion: infra.jcrose.top/v1
kind: App
metadata:
  name: jcrose-sample
spec:
  # Add fields here
  replicas: 1
  image: registry.cn-zhangjiakou.aliyuncs.com/jcrose-k8s/jcrose-deployment:v2
  port: 8080
  domain: tomcat.jcrose.com
  project: tomcat

四 最终效果

root@k8s:~# kubectl get app
NAME            REPLICAS   DOMAIN              IMAGE
jcrose-sample   1          tomcat.jcrose.com   registry.cn-zhangjiakou.aliyuncs.com/jcrose-k8s/jcrose-deployment:v2
root@k8s:~# kubectl get ingress
NAME            CLASS   HOSTS               ADDRESS          PORTS   AGE
jcrose-sample   nginx   tomcat.jcrose.com   10.106.189.253   80      27m
root@k8s:~# kubectl get app
NAME            REPLICAS   DOMAIN              IMAGE
jcrose-sample   1          tomcat.jcrose.com   registry.cn-zhangjiakou.aliyuncs.com/jcrose-k8s/jcrose-deployment:v2
root@k8s:~# kubectl get svc
NAME                                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE
jcrose-controller-manager-metrics-service   ClusterIP   10.102.238.108   <none>        8443/TCP         20h
jcrose-sample                               NodePort    10.105.10.100    <none>        8080:20408/TCP   32m

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐