Kubernetes sample-controller 例子介绍

sample-controller

sample-controller 是 K8s 官方自定义 CDR 及控制器是实现的例子

通过使用这个自定义 CDR 控制器及阅读它的代码,基本可以了解如何制作一个 CDR 控制器

CDR 运作原理

网上有更好的文章,说明其运作原理:

  • https://www.zhaohuabing.com/post/2023-03-09-how-to-create-a-k8s-controller/
  • https://www.zhaohuabing.com/post/2023-04-04-how-to-create-a-k8s-controller-2/

CDR 定义 yaml 文件格式细节

官方文档: https://kubernetes.io/zh-cn/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/

sample-controller 目录文件说明

fananchong@myubuntu:~/k8s.io/sample-controller$ tree -L 2
.
|-- artifacts
|   `-- examples                            # CRD yaml 文件定义例子
|-- controller.go                           # 控制器实现
|-- hack                                    # k8s.io/code-generator 提供的生成 pkg/generated/ 、 pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go
|   |-- boilerplate.go.txt
|   |-- custom-boilerplate.go.txt
|   |-- tools.go
|   |-- update-codegen.sh
|   `-- verify-codegen.sh
|-- main.go                                 # main 函数
|-- pkg
|   |-- apis                                # k8s.io/code-generator 根据 types.go 、 doc.go 来生成
|   |-- generated                           # 自动生成
|   `-- signals
`-- vendor                                  # go mod vendor
    |-- github.com
    |-- golang.org
    |-- google.golang.org
    |-- gopkg.in
    |-- k8s.io
    |-- modules.txt
    `-- sigs.k8s.io

controller.go 关键代码分析

  1. Informer 监听事件
    fooInformer 监听 Foo CDR 事件
    deploymentInformer 监听 Deployment 事件

        // Set up an event handler for when Foo resources change
        fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
            AddFunc: controller.enqueueFoo,
            UpdateFunc: func(old, new interface{}) {
                controller.enqueueFoo(new)
            },
        })
        // Set up an event handler for when Deployment resources change. This
        // handler will lookup the owner of the given Deployment, and if it is
        // owned by a Foo resource then the handler will enqueue that Foo resource for
        // processing. This way, we don't need to implement custom logic for
        // handling Deployment resources. More info on this pattern:
        // https://github.com/kubernetes/community/blob/8cafef897a22026d42f5e5bb3f104febe7e29830/contributors/devel/controllers.md
        deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
            AddFunc: controller.handleObject,
            UpdateFunc: func(old, new interface{}) {
                newDepl := new.(*appsv1.Deployment)
                oldDepl := old.(*appsv1.Deployment)
                if newDepl.ResourceVersion == oldDepl.ResourceVersion {
                    // Periodic resync will send update events for all known Deployments.
                    // Two different versions of the same Deployment will always have different RVs.
                    return
                }
                controller.handleObject(new)
            },
            DeleteFunc: controller.handleObject,
        })
    
  2. Foo CDR 事件处理
    压入工作队列

    // enqueueFoo takes a Foo resource and converts it into a namespace/name
    // string which is then put onto the work queue. This method should *not* be
    // passed resources of any type other than Foo.
    func (c *Controller) enqueueFoo(obj interface{}) {
        var key string
        var err error
        if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
            utilruntime.HandleError(err)
            return
        }
        c.workqueue.Add(key)
    }
    
  3. Deployment 事件处理
    判断是属于 Foo 控制器创建的 Deployment ,压入队列

    // handleObject will take any resource implementing metav1.Object and attempt
    // to find the Foo resource that 'owns' it. It does this by looking at the
    // objects metadata.ownerReferences field for an appropriate OwnerReference.
    // It then enqueues that Foo resource to be processed. If the object does not
    // have an appropriate OwnerReference, it will simply be skipped.
    func (c *Controller) handleObject(obj interface{}) {
        var object metav1.Object
        var ok bool
        logger := klog.FromContext(context.Background())
        if object, ok = obj.(metav1.Object); !ok {
            tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
            if !ok {
                utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
                return
            }
            object, ok = tombstone.Obj.(metav1.Object)
            if !ok {
                utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
                return
            }
            logger.V(4).Info("Recovered deleted object", "resourceName", object.GetName())
        }
        logger.V(4).Info("Processing object", "object", klog.KObj(object))
        if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
            // If this object is not owned by a Foo, we should not do anything more
            // with it.
            if ownerRef.Kind != "Foo" {
                return
            }
    
            foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
            if err != nil {
                logger.V(4).Info("Ignore orphaned object", "object", klog.KObj(object), "foo", ownerRef.Name)
                return
            }
    
            c.enqueueFoo(foo)
            return
        }
    }
    
  4. Controller.Run 处理
    从队列中取出元素
    如果副本预期不一致,做 scale 处理

    // syncHandler compares the actual state with the desired, and attempts to
    // converge the two. It then updates the Status block of the Foo resource
    // with the current status of the resource.
    func (c *Controller) syncHandler(ctx context.Context, key string) error {
        // Convert the namespace/name string into a distinct namespace and name
        logger := klog.LoggerWithValues(klog.FromContext(ctx), "resourceName", key)
    
        namespace, name, err := cache.SplitMetaNamespaceKey(key)
        if err != nil {
            utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
            return nil
        }
    
        // Get the Foo resource with this namespace/name
        foo, err := c.foosLister.Foos(namespace).Get(name)
        if err != nil {
            // The Foo resource may no longer exist, in which case we stop
            // processing.
            if errors.IsNotFound(err) {
                utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
                return nil
            }
    
            return err
        }
    
        deploymentName := foo.Spec.DeploymentName
        if deploymentName == "" {
            // We choose to absorb the error here as the worker would requeue the
            // resource otherwise. Instead, the next time the resource is updated
            // the resource will be queued again.
            utilruntime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
            return nil
        }
    
        // Get the deployment with the name specified in Foo.spec
        deployment, err := c.deploymentsLister.Deployments(foo.Namespace).Get(deploymentName)
        // If the resource doesn't exist, we'll create it
        if errors.IsNotFound(err) {
            deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(context.TODO(), newDeployment(foo), metav1.CreateOptions{})
        }
    
        // If an error occurs during Get/Create, we'll requeue the item so we can
        // attempt processing again later. This could have been caused by a
        // temporary network failure, or any other transient reason.
        if err != nil {
            return err
        }
    
        // If the Deployment is not controlled by this Foo resource, we should log
        // a warning to the event recorder and return error msg.
        if !metav1.IsControlledBy(deployment, foo) {
            msg := fmt.Sprintf(MessageResourceExists, deployment.Name)
            c.recorder.Event(foo, corev1.EventTypeWarning, ErrResourceExists, msg)
            return fmt.Errorf("%s", msg)
        }
    
        // If this number of the replicas on the Foo resource is specified, and the
        // number does not equal the current desired replicas on the Deployment, we
        // should update the Deployment resource.
        if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
            logger.V(4).Info("Update deployment resource", "currentReplicas", *foo.Spec.Replicas, "desiredReplicas", *deployment.Spec.Replicas)
            deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(context.TODO(), newDeployment(foo), metav1.UpdateOptions{})
        }
    
        // If an error occurs during Update, we'll requeue the item so we can
        // attempt processing again later. This could have been caused by a
        // temporary network failure, or any other transient reason.
        if err != nil {
            return err
        }
    
        // Finally, we update the status block of the Foo resource to reflect the
        // current state of the world
        err = c.updateFooStatus(foo, deployment)
        if err != nil {
            return err
        }
    
        c.recorder.Event(foo, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced)
        return nil
    }
    

你可能感兴趣的:(kubernetes,容器,云原生,CRD,controller)