云原生学习路线导航页(持续更新中)
- 本文是 Kubernetes operator学习 系列第一篇,主要对client-go进行学习,从源码阅读角度,学习client-go各个组件的实现原理、如何协同工作等
- 参考视频:Bilibili 2022年最新k8s编程operator篇,UP主:白丁云原生
- 本文参考资料
- https://pan.baidu.com/s/1BibLAishAFJLeTyYCLnlbA 提取码: en2p
- https://zhuanlan.zhihu.com/p/573982128
- https://xinchen.blog.csdn.net/article/details/113753087
- 并根据个人理解进行了汇总和修改
综上所述,要想学习 Operator,使用 CRD + Custom Controller 扩展kubernetes功能,必须先学习 Client-go 库,学会如何与APIServer进行交互
我们假设前提:kubernetes版本为 v1.x.y
图片参考来源:https://zhuanlan.zhihu.com/p/573982128
下面先介绍各组件整体的运转流程,然后对 client-go 和 一个 CRDController 应该包含哪些组件进行详细介绍。
RESTClient
:最基础的客户端,提供最基本的封装,可以通过它组装与API Server即时通讯时 的 urlClientset
:是一个Client的集合,在Clientset中包含了所有K8S内置资源 的 Client,通过Clientset便可以很方便的操作如Pod、Service这些资源dynamicClient
:动态客户端,可以操作任意K8S的资源,包括CRD定义的资源DiscoveryClient
:用于发现K8S提供的资源组、资源版本和资源信息,比如:kubectl api-resources
type RESTClient struct {
// base is the root URL for all invocations of the client
base *url.URL
// versionedAPIPath is a path segment connecting the base URL to the resource root
versionedAPIPath string
// content describes how a RESTClient encodes and decodes responses.
content ClientContentConfig
// creates BackoffManager that is passed to requests.
createBackoffMgr func() BackoffManager
// rateLimiter is shared among all requests created by this client unless specifically
// overridden.
rateLimiter flowcontrol.RateLimiter
// warningHandler is shared among all requests created by this client.
// If not set, defaultWarningHandler is used.
warningHandler WarningHandler
// Set specific behavior of the client. If not set http.DefaultClient will be used.
Client *http.Client
}
func RESTClientFor(config *Config) (*RESTClient, error)
,直接 rest点 调用rest.Config
类型参数,Config中包含了 限速器、编解码器 等
// Interface captures the set of operations for generically interacting with Kubernetes REST apis.
type Interface interface {
GetRateLimiter() flowcontrol.RateLimiter
Verb(verb string) *Request
Post() *Request
Put() *Request
Patch(pt types.PatchType) *Request
Get() *Request
Delete() *Request
APIVersion() schema.GroupVersion
}
func (r *Request) Namespace(namespace string) *Request
:设置 当前Resquest 访问的 namespacefunc (r *Request) Resource(resource string) *Request
:设置 当前Resquest 想要访问的资源类型func (r *Request) Name(resourceName string) *Request
:设置 当前Resquest 想要访问的资源的名称func (r *Request) Do(ctx context.Context) Result
:格式化并执行请求。返回一个 Result 对象,以便于处理响应。type Config struct {
// API 服务器的主机地址,格式为 https://:。默认情况下,它为空字符串,表示使用当前上下文中的集群配置。
Host string
// 指定 API 服务器的路径,目前只有两种取值:/api、/apis
// - /api:访问core API 组资源时,其实group值为空
// - /apis:访问其他 API 组资源时,都是apis,他们都有group值
APIPath string
// 对请求内容的配置,会影响对象在发送到服务器时的转换方式
// - ContentConfig中有两个重要属性:
// - NegotiatedSerializer:用于序列化和反序列化请求和响应的接口
// - GroupVersion:请求资源的 API 组和版本
ContentConfig
// 用于进行基本身份验证的用户名的字符串
Username string
// 用于进行基本身份验证的密码的字符串
Password string `datapolicy:"password"`
// 用于进行身份验证的令牌的字符串
BearerToken string `datapolicy:"token"`
// 包含身份验证令牌的文件的路径
BearerTokenFile string
// TLS 客户端配置,包括证书和密钥
TLSClientConfig
// 每秒允许的请求数(Queries Per Second)。默认为 5.0。
QPS float32
// 突发请求数。默认为 10
Burst int
// 速率限制器,用于控制向 API 服务器发送请求的速率
RateLimiter flowcontrol.RateLimiter
// 与 API 服务器建立连接的超时时间
Timeout time.Duration
// 用于创建网络连接的 Dial 函数
Dial func(ctx context.Context, network, address string) (net.Conn, error)
// ......
}
package main
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
// 在你机器的homeDir下,放入集群的config文件,用于连接集群(可以直接从集群master的~/.kube/config拷贝过来)
// clientcmd是位于client-go/tools/clientcmd目录下的工具
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
if err != nil {
panic(err)
}
// 设置默认 GroupVersion(我要操作的是pod,不属于任何的group,所以使用了SchemeGroupVersion。你要操作什么,就写什么GroupVersion即可)
config.GroupVersion = &v1.SchemeGroupVersion
// 设置序列化/反序列化器(后面的 Into方法 就是使用它完成 反序列化 的)
config.NegotiatedSerializer = scheme.Codecs
// 设置 API 根的子路径(我们操作的是pod,属于core资源,所以设置为/api)
config.APIPath = "/api"
// 创建一个 RESTClient
restClient, err := rest.RESTClientFor(config)
if err != nil {
panic(err)
}
// 创建一个Pod,用于接收请求结果
pods := v1.PodList{}
// 链式编程 发送请求,并反序列化结果到pod中
err = restClient.Get().Namespace(v1.NamespaceDefault).Resource("pods").Do(context.TODO()).Into(&pods)
if err != nil {
panic(err)
}
// 打印pod名称
for _, pod := range pods.Items {
println(pod.Name)
}
}
cassandra-5hbf7
liveness-exec
mysql-87pgn
myweb-7f8rh
myweb-rjblc
nginx-pod-node1
/kubernetes/clientset.go
中type Clientset struct {
......
appsV1 *appsv1.AppsV1Client
appsV1beta1 *appsv1beta1.AppsV1beta1Client
appsV1beta2 *appsv1beta2.AppsV1beta2Client
authenticationV1 *authenticationv1.AuthenticationV1Client
authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client
authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client
authorizationV1 *authorizationv1.AuthorizationV1Client
authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client
autoscalingV1 *autoscalingv1.AutoscalingV1Client
autoscalingV2 *autoscalingv2.AutoscalingV2Client
autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client
autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client
batchV1 *batchv1.BatchV1Client
batchV1beta1 *batchv1beta1.BatchV1beta1Client
certificatesV1 *certificatesv1.CertificatesV1Client
certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client
certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client
coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client
coordinationV1 *coordinationv1.CoordinationV1Client
coreV1 *corev1.CoreV1Client
......
}
appsv1
的类型 *appsv1.AppsV1Client
举例:可以看到,内部包含了一个 restClient。这也进一步认证,Clientset 就是一系列 RESTClient 的集合。type AppsV1Client struct {
restClient rest.Interface
}
/kubernetes/clientset.go
中,所以可以直接使用 kubernetes.NewForConfig() 使用func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
// 这个方法,就完成了所有 RESTClient 的创建
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
// 下面就是创建各种 RESTClient 了,创建结果,被保存到 cs 中
cs.admissionregistrationV1, err = admissionregistrationv1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
......
return &cs, nil
}
type Interface interface {
......
AppsV1() appsv1.AppsV1Interface
AppsV1beta1() appsv1beta1.AppsV1beta1Interface
AppsV1beta2() appsv1beta2.AppsV1beta2Interface
AuthenticationV1() authenticationv1.AuthenticationV1Interface
AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface
AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface
AuthorizationV1() authorizationv1.AuthorizationV1Interface
AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface
AutoscalingV1() autoscalingv1.AutoscalingV1Interface
AutoscalingV2() autoscalingv2.AutoscalingV2Interface
AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface
AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface
BatchV1() batchv1.BatchV1Interface
BatchV1beta1() batchv1beta1.BatchV1beta1Interface
CertificatesV1() certificatesv1.CertificatesV1Interface
CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface
CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface
CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface
CoordinationV1() coordinationv1.CoordinationV1Interface
CoreV1() corev1.CoreV1Interface
......
}
AppsV1()
方法为例,返回值是接口 appsv1.AppsV1Interface
的实现类 appsv1.AppsV1Client
的对象// 接口
type AppsV1Interface interface {
RESTClient() rest.Interface
ControllerRevisionsGetter
DaemonSetsGetter
DeploymentsGetter
ReplicaSetsGetter
StatefulSetsGetter
}
// 实现类
type AppsV1Client struct {
restClient rest.Interface
}
// AppsV1Client 实现 AppsV1Interface 接口的方法
func (c *AppsV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}
appsv1.AppsV1Client
的其他实例方法// 返回值是DeploymentInterface
func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface {
// 实际上,返回值是 DeploymentInterface 的实现类 deployments 的对象
return newDeployments(c, namespace)
}
// 构造一个 deployments 的对象
func newDeployments(c *AppsV1Client, namespace string) *deployments {
return &deployments{
client: c.RESTClient(),
ns: namespace,
}
}
DeploymentInterface
接口源码,可以看到包含操作Deployment的各种方法type DeploymentInterface interface {
Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error)
Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error)
Apply(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
DeploymentExpansion
}
func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) {
result = &v1.Deployment{}
err = c.client.Post().
Namespace(c.ns).
Resource("deployments").
VersionedParams(&opts, scheme.ParameterCodec).
Body(deployment).
Do(ctx).
Into(result)
return
}
staging/src/k8s.io/api/core/v1/types.go
中,可以看到 type Pod struct 注释上,就使用了 genclient 的标记// +genclient
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
// ......
type Pod struct {
......
}
// +genclient - 生成默认的客户端动作函数(create, update, delete, get, list, update, patch, watch以及 是否生成updateStatus取决于.Status字段是否存在)。
// +genclient:nonNamespaced - 所有动作函数都是在没有名称空间的情况下生成
// +genclient:onlyVerbs=create,get - 指定的动作函数被生成.
// +genclient:skipVerbs=watch - 生成watch以外所有的动作函数.
// +genclient:noStatus - 即使.Status字段存在也不生成updateStatus动作函数
package main
import (
"context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
// 同样是先 创建一个客户端配置config
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
if err != nil {
panic(err)
}
// 使用 kubernetes.NewForConfig(),创建一个ClientSet对象
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
// 1、从 clientSet 中调用操作pod的 RESTClient,获取default命名空间下的pod列表
pods, err := clientSet.CoreV1().Pods(v1.NamespaceDefault).List(context.TODO(), v1.ListOptions{})
if err != nil {
panic(err)
}
// 打印pod名称
for _, pod := range pods.Items {
println(pod.Name)
}
println("------")
// 2、从 clientSet 中调用操作 deploy 的 RESTClient,获取kube-system命名空间下的deploy列表
deploys, err := clientSet.AppsV1().Deployments("kube-system").List(context.TODO(), v1.ListOptions{})
if err != nil {
panic(err)
}
// 打印 deploy 名称
for _, deploy := range deploys.Items {
println(deploy.Name)
}
}
cassandra-5hbf7
liveness-exec
mysql-87pgn
myweb-7f8rh
myweb-rjblc
nginx-pod-node1
------
coredns
default-http-backend
metrics-server
/client-go/dynamic/simple.go
rest.Interface
,即为 RESTClient实现的那个接口,可以在4.2.2中看到type DynamicClient struct {
client rest.Interface
}
/client-go/dynamic/simple.go
,是一个函数,位于dynamic包下,所以直接点就可以使用*rest.Config
类型func NewForConfig(inConfig *rest.Config) (*DynamicClient, error) {
config := ConfigFor(inConfig)
httpClient, err := rest.HTTPClientFor(config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(config, httpClient)
}
Resource(resource schema.GroupVersionResource)
,用于指定当前 DynamicClient 要操作的究竟是什么类型。dynamic.Interface
type Interface interface {
Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface
}
你所指定资源
的 Client 了func (c *DynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface {
return &dynamicResourceClient{client: c, resource: resource}
}
type dynamicResourceClient struct {
client *DynamicClient
namespace string
resource schema.GroupVersionResource
}
func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface {
ret := *c
ret.namespace = ns
return &ret
}
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unsructured/unsructured.go
中type Unstructured struct {
// Object is a JSON compatible map with string, float, int, bool, []interface{}, or
// map[string]interface{}
// children.
Object map[string]interface{}
}
4.4.3.2.DynamicClient 的 实例方法
中我们知道,我们最终操作资源,其实使用的是 dynamicResourceClient
type ResourceInterface interface {
Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error)
Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error)
UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error)
Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error
DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error
Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error)
List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error)
Apply(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions, subresources ...string) (*unstructured.Unstructured, error)
ApplyStatus(ctx context.Context, name string, obj *unstructured.Unstructured, options metav1.ApplyOptions) (*unstructured.Unstructured, error)
}
obj *unstructured.Unstructured
*unstructured.Unstructured
*unstructured.UnstructuredList
。这个结构里面包含一个 []unstructured.Unstructured
资源对象-->Unstructured
和 Unstructured-->资源对象
staging/src/k8s.io/apimachinery/pkg/runtime/converter.go
type UnstructuredConverter interface {
ToUnstructured(obj interface{}) (map[string]interface{}, error)
FromUnstructured(u map[string]interface{}, obj interface{}) error
}
staging/src/k8s.io/apimachinery/pkg/runtime/converter.go
type unstructuredConverter struct {
// If true, we will be additionally running conversion via json
// to ensure that the result is true.
// This is supposed to be set only in tests.
mismatchDetection bool
// comparison is the default test logic used to compare
comparison conversion.Equalities
}
func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error)
:资源对象–>Unstructuredfunc (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error
:Unstructured–>资源对象staging/src/k8s.io/apimachinery/pkg/runtime/converter.go
var (
......
// DefaultUnstructuredConverter performs unstructured to Go typed object conversions.
DefaultUnstructuredConverter = &unstructuredConverter{
mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")),
comparison: conversion.EqualitiesOrDie(
func(a, b time.Time) bool {
return a.UTC() == b.UTC()
},
),
}
)
runtime.DefaultUnstructuredConverter
,调用它的 ToUnstructured 或 FromUnstructured 方法,就可以实现 Unstructured 与 资源对象的相互转换 了package main
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
// 同样是先 创建一个客户端配置config
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
if err != nil {
panic(err)
}
// 使用 dynamic.NewForConfig(),创建一个 DynamicClient 对象
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
panic(err)
}
// 使用 DynamicClient.Resource(),指定要操作的资源对象,获取到该资源的 Client
dynamicResourceClient := dynamicClient.Resource(schema.GroupVersionResource{
Group: "apps",
Version: "v1",
Resource: "deployments",
})
// 先为该Client指定ns,然后调用 Client 的 Get() 方法,获取到该资源对象
unstructured, err := dynamicResourceClient.
Namespace("kube-system").
Get(context.TODO(), "coredns", metav1.GetOptions{})
if err != nil {
panic(err)
}
// 调用 runtime.DefaultUnstructuredConverter.FromUnstructured(),将 unstructured 反序列化成 Deployment 对象
deploy := &appsv1.Deployment{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(unstructured.UnstructuredContent(), deploy)
if err != nil {
panic(err)
}
// 打印 deploy 名称和命名空间
fmt.Printf("deploy.Name: %s\ndeploy.namespace: %s", deploy.Name, deploy.Namespace)
}
deploy.Name: coredns
deploy.namespace: kube-system
staging/src/k8s.io/client-go/discovery/discovery_client.go
type DiscoveryClient struct {
restClient restclient.Interface
LegacyPrefix string
// Forces the client to request only "unaggregated" (legacy) discovery.
UseLegacyDiscovery bool
}
/client-go/discovery/discovery_client.go
,是一个函数,位于discovery包下,所以直接点就可以使用*rest.Config
类型func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) {
config := *c
if err := setDiscoveryDefaults(&config); err != nil {
return nil, err
}
httpClient, err := restclient.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewDiscoveryClientForConfigAndClient(&config, httpClient)
}
type DiscoveryInterface interface {
RESTClient() restclient.Interface
ServerGroupsInterface
ServerResourcesInterface
ServerVersionInterface
OpenAPISchemaInterface
OpenAPIV3SchemaInterface
// Returns copy of current discovery client that will only
// receive the legacy discovery format, or pointer to current
// discovery client if it does not support legacy-only discovery.
WithLegacy() DiscoveryInterface
}
DiscoveryClient
,还有一个实现类 CachedDiscoveryClient
func main() {
// 1、先创建一个客户端配置config
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
if err != nil {
panic(err.Error())
}
// 2、使用 discovery.NewDiscoveryClientForConfig(),创建一个 DiscoveryClient 对象
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
panic(err.Error())
}
// 3、使用 DiscoveryClient.ServerGroupsAndResources(),获取所有资源列表
_, resourceLists, err := discoveryClient.ServerGroupsAndResources()
if err != nil {
panic(err.Error())
}
// 4、遍历资源列表,打印出资源组和资源名称
for _, resource := range resourceLists {
fmt.Printf("resource groupVersion: %s\n", resource.GroupVersion)
for _, resource := range resource.APIResources {
fmt.Printf("resource name: %s\n", resource.Name)
}
fmt.Println("--------------------------")
}
}
resource groupVersion: v1
resource name: bindings
resource name: componentstatuses
resource name: configmaps
resource name: endpoints
resource name: events
resource name: limitranges
resource name: namespaces
resource name: namespaces/finalize
resource name: namespaces/status
resource name: nodes
resource name: nodes/proxy
resource name: nodes/status
resource name: persistentvolumeclaims
resource name: persistentvolumeclaims/status
resource name: persistentvolumes
resource name: persistentvolumes/status
resource name: pods
resource name: pods/attach
resource name: pods/binding
resource name: pods/eviction
resource name: pods/exec
resource name: pods/log
resource name: pods/portforward
resource name: pods/proxy
resource name: pods/status
resource name: podtemplates
resource name: replicationcontrollers
resource name: replicationcontrollers/scale
resource name: replicationcontrollers/status
resource name: resourcequotas
resource name: resourcequotas/status
resource name: secrets
resource name: serviceaccounts
resource name: services
resource name: services/proxy
resource name: services/status
--------------------------
resource groupVersion: apiregistration.k8s.io/v1
resource name: apiservices
resource name: apiservices/status
--------------------------
resource groupVersion: apiregistration.k8s.io/v1beta1
resource name: apiservices
resource name: apiservices/status
--------------------------
resource groupVersion: extensions/v1beta1
resource name: ingresses
resource name: ingresses/status
--------------------------
resource groupVersion: apps/v1
resource name: controllerrevisions
resource name: daemonsets
resource name: daemonsets/status
resource name: deployments
resource name: deployments/scale
resource name: deployments/status
resource name: replicasets
resource name: replicasets/scale
resource name: replicasets/status
resource name: statefulsets
resource name: statefulsets/scale
resource name: statefulsets/status
--------------------------
resource groupVersion: events.k8s.io/v1beta1
resource name: events
--------------------------
resource groupVersion: authentication.k8s.io/v1
resource name: tokenreviews
--------------------------
resource groupVersion: authentication.k8s.io/v1beta1
resource name: tokenreviews
--------------------------
resource groupVersion: authorization.k8s.io/v1
resource name: localsubjectaccessreviews
resource name: selfsubjectaccessreviews
resource name: selfsubjectrulesreviews
resource name: subjectaccessreviews
--------------------------
resource groupVersion: authorization.k8s.io/v1beta1
resource name: localsubjectaccessreviews
resource name: selfsubjectaccessreviews
resource name: selfsubjectrulesreviews
resource name: subjectaccessreviews
--------------------------
resource groupVersion: autoscaling/v1
resource name: horizontalpodautoscalers
resource name: horizontalpodautoscalers/status
--------------------------
resource groupVersion: autoscaling/v2beta1
resource name: horizontalpodautoscalers
resource name: horizontalpodautoscalers/status
--------------------------
resource groupVersion: autoscaling/v2beta2
resource name: horizontalpodautoscalers
resource name: horizontalpodautoscalers/status
--------------------------
resource groupVersion: batch/v1
resource name: jobs
resource name: jobs/status
--------------------------
resource groupVersion: batch/v1beta1
resource name: cronjobs
resource name: cronjobs/status
--------------------------
resource groupVersion: certificates.k8s.io/v1beta1
resource name: certificatesigningrequests
resource name: certificatesigningrequests/approval
resource name: certificatesigningrequests/status
--------------------------
resource groupVersion: networking.k8s.io/v1
resource name: networkpolicies
--------------------------
resource groupVersion: networking.k8s.io/v1beta1
resource name: ingresses
resource name: ingresses/status
--------------------------
resource groupVersion: policy/v1beta1
resource name: poddisruptionbudgets
resource name: poddisruptionbudgets/status
resource name: podsecuritypolicies
--------------------------
resource groupVersion: rbac.authorization.k8s.io/v1
resource name: clusterrolebindings
resource name: clusterroles
resource name: rolebindings
resource name: roles
--------------------------
resource groupVersion: rbac.authorization.k8s.io/v1beta1
resource name: clusterrolebindings
resource name: clusterroles
resource name: rolebindings
resource name: roles
--------------------------
resource groupVersion: storage.k8s.io/v1
resource name: csinodes
resource name: storageclasses
resource name: volumeattachments
resource name: volumeattachments/status
--------------------------
resource groupVersion: storage.k8s.io/v1beta1
resource name: csidrivers
resource name: csinodes
resource name: storageclasses
resource name: volumeattachments
--------------------------
resource groupVersion: admissionregistration.k8s.io/v1
resource name: mutatingwebhookconfigurations
resource name: validatingwebhookconfigurations
--------------------------
resource groupVersion: admissionregistration.k8s.io/v1beta1
resource name: mutatingwebhookconfigurations
resource name: validatingwebhookconfigurations
--------------------------
resource groupVersion: apiextensions.k8s.io/v1
resource name: customresourcedefinitions
resource name: customresourcedefinitions/status
--------------------------
resource groupVersion: apiextensions.k8s.io/v1beta1
resource name: customresourcedefinitions
resource name: customresourcedefinitions/status
--------------------------
resource groupVersion: scheduling.k8s.io/v1
resource name: priorityclasses
--------------------------
resource groupVersion: scheduling.k8s.io/v1beta1
resource name: priorityclasses
--------------------------
resource groupVersion: coordination.k8s.io/v1
resource name: leases
--------------------------
resource groupVersion: coordination.k8s.io/v1beta1
resource name: leases
--------------------------
resource groupVersion: node.k8s.io/v1beta1
resource name: runtimeclasses
--------------------------
resource groupVersion: discovery.k8s.io/v1beta1
resource name: endpointslices
--------------------------
func main() {
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
if err != nil {
klog.Fatalf("Failed to create config: %v", err)
}
// 初始化与apiserver通信的clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
klog.Fatalf("Failed to create client: %v", err)
}
// 初始化shared informer factory以及pod informer
factory := informers.NewSharedInformerFactory(clientset, 30*time.Second)
podInformer := factory.Core().V1().Pods()
informer := podInformer.Informer()
// 注册informer的自定义ResourceEventHandler
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: xxx,
UpdateFunc: xxx,
DeleteFunc: xxx,
})
// 启动shared informer factory,开始informer的list & watch操作
stopper := make(chan struct{})
go factory.Start(stopper)
// 等待informer从kube-apiserver同步资源完成,即informer的list操作获取的对象都存入到informer中的indexer本地缓存中
// 或者调用factory.WaitForCacheSync(stopper)
if !cache.WaitForCacheSync(stopper, informer.HasSynced) {
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
return
}
// 创建lister
podLister := podInformer.Lister()
// 从informer中的indexer本地缓存中获取对象
podList, err := podLister.List(labels.Everything())
if err != nil {
fmt.Println(err)
}
}
// 初始化shared informer factory以及pod informer
factory := informers.NewSharedInformerFactory(clientset, 30*time.Second)
podInformer := factory.Core().V1().Pods()
informer := podInformer.Informer()
......
go factory.Start(stopper)
......
podLister := podInformer.Lister()
podList, err := podLister.List(labels.Everything())
factory.Core().V1().Pods()
和 podInformer.Informer()
,最终就是创建了一个 pod 资源的 informer,该 informer 会自动保存在 factory 中factory.Start
就是把 factory 中已经创建的所有 informer,都启动起来,每个informer就是一个单独的协程,互不影响,各自进行 ListAndWatchpodInformer.Lister()
和 podLister.List()
就是从pod的这个informer中,获取缓存数据。可以看到获取缓存的时候,确定到了某一个具体的 informertype sharedInformerFactory struct {
// 这个client,是clientset类型的客户端,用于与apiserver交互
client kubernetes.Interface
// 限制 当前SharedInformerFactory 所创建的 Informer 只关注指定命名空间中的资源变化
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
// 缓存已经创建的全部informer
informers map[reflect.Type]cache.SharedIndexInformer
// 缓存已经启动的 informer,只存 类型:是否启动
startedInformers map[reflect.Type]bool
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
// 可以看到,每个informer,都使用一个单独的协程启动的
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
type SharedIndexInformer interface {
SharedInformer
// AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers Indexers) error
GetIndexer() Indexer
}
type sharedIndexInformer struct {
indexer Indexer
controller Controller
processor *sharedProcessor
cacheMutationDetector MutationDetector
listerWatcher ListerWatcher
// objectType is an example object of the type this informer is
// expected to handle. Only the type needs to be right, except
// that when that is `unstructured.Unstructured` the object's
// `"apiVersion"` and `"kind"` must also be right.
objectType runtime.Object
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
// shouldResync to check if any of our listeners need a resync.
resyncCheckPeriod time.Duration
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
// value).
defaultEventHandlerResyncPeriod time.Duration
// clock allows for testability
clock clock.Clock
started, stopped bool
startedLock sync.Mutex
// blockDeltas gives a way to stop all event distribution so that a late event handler
// can safely join the shared informer.
blockDeltas sync.Mutex
// Called whenever the ListAndWatch drops the connection with an error.
watchErrorHandler WatchErrorHandler
}
podInformer := factory.Core().V1().Pods()
informer := podInformer.Informer()
factory.Core().V1().Pods()
返回的是 PodInformer
接口类型,位于 informers/core/v1/pod.go
中type PodInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.PodLister
}
Informer() cache.SharedIndexInformer
方法,用于获取一个为该资源工作的 cache.SharedIndexInformer
看完 5.1 的概述,其他源码分析可以先直接看 下面的一些博客。我有时间也会继续分析
package main
import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"log"
"share-code-operator-study/addingress/pkg"
)
func main() {
// 创建一个 集群客户端配置
config, err := clientcmd.BuildConfigFromFlags("", clientcmd.RecommendedHomeFile)
if err != nil {
inClusterConfig, err := rest.InClusterConfig()
if err != nil {
log.Fatalln("can't get config")
}
config = inClusterConfig
}
// 创建一个 clientset 客户端,用于创建 informerFactory
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
// 创建一个 informerFactory
factory := informers.NewSharedInformerFactory(clientset, 0)
// 使用 informerFactory 创建Services资源的 informer对象
serviceInformer := factory.Core().V1().Services()
// 使用 informerFactory 创建Ingresses资源的 informer对象
ingressInformer := factory.Networking().V1().Ingresses()
// 创建一个自定义控制器
controller := pkg.NewController(clientset, serviceInformer, ingressInformer)
// 创建 停止channel信号
stopCh := make(chan struct{})
// 启动 informerFactory,会启动已经创建的 serviceInformer、ingressInformer
factory.Start(stopCh)
// 等待 所有informer 从 etcd 实现全量同步
factory.WaitForCacheSync(stopCh)
// 启动自定义控制器
controller.Run(stopCh)
}
package pkg
import (
"context"
corev1 "k8s.io/api/core/v1"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informercorev1 "k8s.io/client-go/informers/core/v1"
informernetv1 "k8s.io/client-go/informers/networking/v1"
"k8s.io/client-go/kubernetes"
listercorev1 "k8s.io/client-go/listers/core/v1"
listernetv1 "k8s.io/client-go/listers/networking/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"reflect"
"time"
)
const (
// worker 数量
workNum = 5
// service 指定 ingress 的 annotation key
annoKey = "ingress/http"
// 调谐失败的最大重试次数
maxRetry = 10
)
// 自定义控制器
type controller struct {
client kubernetes.Interface
serviceLister listercorev1.ServiceLister
ingressLister listernetv1.IngressLister
queue workqueue.RateLimitingInterface
}
// NewController 创建一个自定义控制器
func NewController(clientset *kubernetes.Clientset, serviceInformer informercorev1.ServiceInformer, ingressInformer informernetv1.IngressInformer) *controller {
// 控制器中,包含一个clientset、service和ingress的缓存监听器、一个workqueue
c := controller{
client: clientset,
serviceLister: serviceInformer.Lister(),
ingressLister: ingressInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ingressManager"),
}
// 为 serviceInformer 添加 ResourceEventHandler
serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
// 添加service时触发
AddFunc: c.addService,
// 修改service时触发
UpdateFunc: c.updateService,
// 这里没有删除service的逻辑,因为我们会使用 OwnerReferences 将service+ingress关联起来。
// 因此删除service,会由kubernetes的ControllerManager中的特殊Controller,自动完成ingress的gc
})
// 为 ingressInformer 添加 ResourceEventHandler
ingressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
// 删除ingress时触发
DeleteFunc: c.deleteIngress,
})
return &c
}
// 添加service时触发
func (c *controller) addService(obj interface{}) {
// 将 添加service 的 key 加入 workqueue
c.enqueue(obj)
}
// 修改service时触发
func (c *controller) updateService(oldObj interface{}, newObj interface{}) {
// 如果两个对象一致,就无需触发修改逻辑
if reflect.DeepEqual(oldObj, newObj) {
return
}
// todo 比较annotation
// 将 修改service 的 key 加入 workqueue
c.enqueue(newObj)
}
// 删除ingress时触发
func (c *controller) deleteIngress(obj interface{}) {
// 将对象转成ingress,并获取到它的 ownerReference
ingress := obj.(*netv1.Ingress)
ownerReference := metav1.GetControllerOf(ingress)
// 如果ingress的 ownerReference 没有绑定到service,则无需处理
if ownerReference == nil || ownerReference.Kind != "Service" {
return
}
// 如果ingress的 ownerReference 已经绑定到service,则需要处理
c.enqueue(obj)
}
// enqueue 将 待添加service 的 key 加入 workqueue
func (c *controller) enqueue(obj interface{}) {
// 调用工具方法,获取 kubernetes资源对象的 key(默认是 ns/name,或 name)
key, err := cache.MetaNamespaceKeyFunc(obj)
// 获取失败,不加入队列,即本次事件不予处理
if err != nil {
runtime.HandleError(err)
return
}
// 将 key 加入 workqueue
c.queue.Add(key)
}
// dequeue 将处理完成的 key 出队
func (c *controller) dequeue(item interface{}) {
c.queue.Done(item)
}
// Run 启动controller
func (c *controller) Run(stopCh chan struct{}) {
// 启动多个worker,同时对workqueue中的事件进行处理,效率提升5倍
for i := 0; i < workNum; i++ {
// 每个worker都是一个协程,使用同一个停止信号
go wait.Until(c.worker, time.Minute, stopCh)
}
// 启动完成后,Run函数就停止在这里,等待停止信号
<-stopCh
}
// worker方法
func (c *controller) worker() {
// 死循环,worker处理完一个,再去处理下一个
for c.processNextItem() {
}
}
// processNextItem 处理下一个
func (c *controller) processNextItem() bool {
// 从 workerqueue 取出一个key
item, shutdown := c.queue.Get()
// 如果已经收到停止信号了,则返回false,worker就会停止处理
if shutdown {
return false
}
// 处理完成后,将这个key出队
defer c.dequeue(item)
// 转成string类型的key
key := item.(string)
// 处理service逻辑的核心方法
err := c.syncService(key)
// 处理过程出错,进入错误统一处理逻辑
if err != nil {
c.handleError(key, err)
}
// 处理结束,返回true
return true
}
// handleError 错误统一处理逻辑
func (c *controller) handleError(key string, err error) {
// 如果当前key的处理次数,还不到最大重试次数,则再次加入队列
if c.queue.NumRequeues(key) < maxRetry {
c.queue.AddRateLimited(key)
return
}
// 运行时统一处理错误
runtime.HandleError(err)
// 不再处理这个key
c.queue.Forget(key)
}
// syncService 处理service逻辑的核心方法
func (c *controller) syncService(key string) error {
// 将 key 切割为 ns 和 name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
// 从indexer中,获取service
service, err := c.serviceLister.Services(namespace).Get(name)
// 没有service,直接返回
if errors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
// 检查service的annotation,是否包含 key: "ingress/http"
_, ok := service.Annotations[annoKey]
// 从indexer缓存中,获取ingress
ingress, err := c.ingressLister.Ingresses(namespace).Get(name)
if ok && errors.IsNotFound(err) {
// ingress不存在,但是service有"ingress/http",需要创建ingress
// 创建ingress
ig := c.createIngress(service)
// 调用controller中的client,完成ingress的创建
_, err := c.client.NetworkingV1().Ingresses(namespace).Create(context.TODO(), ig, metav1.CreateOptions{})
if err != nil {
return err
}
} else if !ok && ingress != nil {
// ingress存在,但是service没有"ingress/http",需要删除ingress
// 调用controller中的client,完成ingress的删除
err := c.client.NetworkingV1().Ingresses(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil {
return err
}
}
return nil
}
// createIngress 创建ingress
func (c *controller) createIngress(service *corev1.Service) *netv1.Ingress {
icn := "ingress"
pathType := netv1.PathTypePrefix
return &netv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: service.Name,
Namespace: service.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(service, corev1.SchemeGroupVersion.WithKind("Service")),
},
},
Spec: netv1.IngressSpec{
IngressClassName: &icn,
Rules: []netv1.IngressRule{
{
Host: "example.com",
IngressRuleValue: netv1.IngressRuleValue{
HTTP: &netv1.HTTPIngressRuleValue{
Paths: []netv1.HTTPIngressPath{
{
Path: "/",
PathType: &pathType,
Backend: netv1.IngressBackend{
Service: &netv1.IngressServiceBackend{
Name: service.Name,
Port: netv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
},
},
},
},
},
}
}