3. Controller
此处分析的文件位置在
client-go/tools/cache/controller.go
3.1 接口
type Controller interface {
Run(stopCh <-chan struct{})
HasSynced() bool
LastSyncResourceVersion() string
}
Controller
接口有三个方法.
3.2 实现类controller
type controller struct {
config Config
reflector *Reflector
reflectorMutex sync.RWMutex
clock clock.Clock
}
type Config struct {
// 是一个DeltaFIFO
Queue
// Something that can list and watch your objects.
ListerWatcher
// 自定义的处理逻辑
Process ProcessFunc
// 该controller针对的类型
ObjectType runtime.Object
// resync时间
FullResyncPeriod time.Duration
ShouldResync ShouldResyncFunc
// 发生错误的时候是否需要重新进到队列中
RetryOnError bool
}
type ShouldResyncFunc func() bool
type ProcessFunc func(obj interface{}) error
// 根据Config生成一个controller对象
func New(c *Config) Controller {
ctlr := &controller{
config: *c,
clock: &clock.RealClock{},
}
return ctlr
}
关于
Reflector
在 [k8s源码分析][client-go] informer之reflector 中已经分析过了.
Queue
基本上都是DeltaFIFO
.
方法
func (c *controller) HasSynced() bool {
return c.config.Queue.HasSynced()
}
func (c *controller) LastSyncResourceVersion() string {
c.reflectorMutex.RLock()
defer c.reflectorMutex.RUnlock()
if c.reflector == nil {
return ""
}
return c.reflector.LastSyncResourceVersion()
}
1.
HasSynced
调用的是DeltaFIFO
的方法. 在 [k8s源码分析][client-go] informer之delta_fifo 已经分析过了.
2.LastSyncResourceVersion
调用的是reflector
的方法, [k8s源码分析][client-go] informer之reflector 中已经分析过了.
func (c *controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
go func() {
<-stopCh
c.config.Queue.Close()
}()
// 构造一个reflector
r := NewReflector(
c.config.ListerWatcher,
c.config.ObjectType,
c.config.Queue,
c.config.FullResyncPeriod,
)
r.ShouldResync = c.config.ShouldResync
r.clock = c.clock
c.reflectorMutex.Lock()
c.reflector = r
c.reflectorMutex.Unlock()
var wg wait.Group
defer wg.Wait()
// goroutine启动reflector.Run方法
// 所有从listwatcher中的数据会存到DeltaFIFO 也就是r.store=c.config.Queue
wg.StartWithChannel(stopCh, r.Run)
// 循环执行processLoop
wait.Until(c.processLoop, time.Second, stopCh)
}
func (c *controller) processLoop() {
for {
// 从DeltaFIFO出队列的逻辑已经分析过了
// 从DeltaFIFO出队列执行用户逻辑c.config.Process方法
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
if err != nil {
if err == ErrFIFOClosed {
// 如果deltaFIFO已经关闭 则返回
return
}
if c.config.RetryOnError {
// This is the safe way to re-enqueue.
// 如果设置了重试 则重新加入到deltaFIFO中
c.config.Queue.AddIfNotPresent(obj)
}
}
}
}
所以
controller
的run
的主要目的是reflector
一直在往DeltaFIFO
中存数据, 另外一边是一直从DeltaFIFO
中出队并且给自定义用户逻辑c.config.Process
处理.
4. SharedInformer
最外层的类, 也是与用户接触的类, 用户通过该接口的方法来进行自定义配置.
4.1 接口
type SharedInformer interface {
// 增加用户自己的自定义处理逻辑
AddEventHandler(handler ResourceEventHandler)
// 增加用户自己的自定义处理逻辑 带有resyncPeriod时间
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
// 获得Store 也就是DeltaFIFO
GetStore() Store
// 获得Controller 也就是controller
GetController() Controller
Run(stopCh <-chan struct{})
// HasSynced returns true if the shared informer's store has been
// informed by at least one full LIST of the authoritative state
// of the informer's object collection. This is unrelated to "resync".
HasSynced() bool
// 该SharedInformer对应的类型的上一次处理的ResourceVersion
LastSyncResourceVersion() string
}
type SharedIndexInformer interface {
SharedInformer
// AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers Indexers) error
GetIndexer() Indexer
}
4.2 实现类
type sharedIndexInformer struct {
// 本地缓存
indexer Indexer
controller Controller
processor *sharedProcessor
cacheMutationDetector MutationDetector
listerWatcher ListerWatcher
// 该sharedIndexInformers监控的类型
objectType runtime.Object
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
// shouldResync to check if any of our listeners need a resync.
// 在reflector中每隔resyncCheckPeriod时间会调用shouldResync方法来判断是否有任何一个listener需要resync操作
resyncCheckPeriod time.Duration
defaultEventHandlerResyncPeriod time.Duration
clock clock.Clock
started, stopped bool
startedLock sync.Mutex
// 可以停止分发obj给各个listeners
// 因为HandleDeltas方法需要得到该锁, 如果失去了该锁, 就只能等到再次获得锁之后再分发
blockDeltas sync.Mutex
}
func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
}
func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
realClock := &clock.RealClock{}
sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{clock: realClock},
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
listerWatcher: lw,
objectType: objType,
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
clock: realClock,
}
return sharedIndexInformer
}
4.3 方法
AddEventHandler 和 AddEventHandlerWithResyncPeriod
增加一个用户自定义的
EventHandler
实现类.
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) {
s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
}
const minimumResyncPeriod = 1 * time.Second
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.stopped {
// 该sharedIndexInformer已经结束
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
return
}
if resyncPeriod > 0 {
// 如果比最小的resync时间minimumResyncPeriod还要小 就取最小的minimumResyncPeriod
if resyncPeriod < minimumResyncPeriod {
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
resyncPeriod = minimumResyncPeriod
}
if resyncPeriod < s.resyncCheckPeriod {
// 如果比该sharedIndexInformer的resyncCheckPeriod小
// 1. 如果该sharedIndexInformer已经启动 那把resyncPeriod变为resyncCheckPeriod时间
// 2. 如果该sharedIndexInformer没有启动 那就尽量让resyncCheckPeriod变小点 改成resyncPeriod时间 再重新计算各个listeners的resync时间
if s.started {
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
resyncPeriod = s.resyncCheckPeriod
} else {
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
// accordingly
s.resyncCheckPeriod = resyncPeriod
s.processor.resyncCheckPeriodChanged(resyncPeriod)
}
}
}
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
if !s.started {
s.processor.addListener(listener)
return
}
// in order to safely join, we have to
// 1. stop sending add/update/delete notifications
// 2. do a list against the store
// 3. send synthetic "Add" events to the new handler
// 4. unblock
// 1. 如果获得锁 意味着HandleDeltas方法会失去该锁 无法分发消息了(stop sending add/update/delete notifications)
// 2. 从本地缓存中取出所有对象(do a list against the store)
// 3. 将这些对象发送一个Add事件给这个listener并且交给新的handler处理(send synthetic "Add" events to the new handler)
// 4. 解锁(unblock)
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
s.processor.addListener(listener)
for _, item := range s.indexer.List() {
// 把本地缓存中的数据往该listener中加入一遍
listener.add(addNotification{newObj: item})
}
}
1. 根据相关规则计算出一个的
resyncPeriod
.
2. 根据该eventHandler
生成一个ProcessListener
.
3. 如果当前还没有sharedIndexInformer
还没有启动, 则直接加入到sharedIndexInformer.processor
中即可. 如果已经启动, 那说明该listener
已经错过了之前的那些event
, 所以在加入到sharedIndexInformer.processor
后还需要将本地缓存中数据统一发一个addNotification
到现在要加入的listener
.
Run
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
// 生成一个DeltaFIFO 并且knowObjects是s.indexer 也就是本地缓存
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
// 配置controller的Config对象
cfg := &Config{
Queue: fifo,
ListerWatcher: s.listerWatcher,
ObjectType: s.objectType,
FullResyncPeriod: s.resyncCheckPeriod,
RetryOnError: false,
// 对应的是sharedProcessor的shouldResync 会去计算所有的listeners是否有谁到了resync的时间
ShouldResync: s.processor.shouldResync,
// 出DeltaFIFO队列的时候 调用用户自定义的处理逻辑 在这里是HandleDeltas
Process: s.HandleDeltas,
}
func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
// 生成controller
s.controller = New(cfg)
s.controller.(*controller).clock = s.clock
s.started = true
}()
// Separate stop channel because Processor should be stopped strictly after controller
processorStopCh := make(chan struct{})
var wg wait.Group
defer wg.Wait() // Wait for Processor to stop
defer close(processorStopCh) // Tell Processor to stop
// 启动cacheMutationDetector
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
// 启动所有的listeners进行监听
wg.StartWithChannel(processorStopCh, s.processor.run)
defer func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.stopped = true // Don't want any new listeners
}()
// 启动controller
s.controller.Run(stopCh)
}
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
// 先获得blockDeltas锁
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
isSync := d.Type == Sync
// 往cacheMutationDetector增加
s.cacheMutationDetector.AddObject(d.Object)
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
// 更新本地缓存
if err := s.indexer.Update(d.Object); err != nil {
return err
}
// 根据isSync分发给对应的listeners
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
} else {
// 增加到本地缓存
if err := s.indexer.Add(d.Object); err != nil {
return err
}
// 根据isSync分发给对应的listeners
s.processor.distribute(addNotification{newObj: d.Object}, isSync)
}
case Deleted:
// 从本地缓存中删除
if err := s.indexer.Delete(d.Object); err != nil {
return err
}
// 分发给所有的listeners
s.processor.distribute(deleteNotification{oldObj: d.Object}, false)
}
}
return nil
}
在
controller
中已经看到controller.Run
的功能. 然而sharedIndexInformer
中配置了controller.config.process = HandleDeltas
. 另外还启动了一些listeners
来监听.
从图中就可以
HandleDeltas
从DeltaFIFO
的pop
方法中得到了Delta
, (具体可以参考 [k8s源码分析][client-go] informer之delta_fifo.), 然后做了两件事情:
1. 根据Add/Update/Delete
类型操作本地存储Indexer
.
2. 将当前obj
构造成notification
类型分发给所有的listeners
, 然后每个listener
都会调用用户的ResouceEventHandler
进行处理.
其他方法
func (s *sharedIndexInformer) HasSynced() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return false
}
// 实际上调用的是DeltaFIFO的HasSynced方法
return s.controller.HasSynced()
}
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return ""
}
// 实际上调用的是reflector的LastSyncResourceVersion方法
// 返回的是上一次reflector往deltaFIFO中的obj的resourceversion
return s.controller.LastSyncResourceVersion()
}
func (s *sharedIndexInformer) GetStore() Store {
return s.indexer
}
func (s *sharedIndexInformer) GetIndexer() Indexer {
return s.indexer
}
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
// 只能在没有启动前加入
return s.indexer.AddIndexers(indexers)
}
informer整体
整个
informer
体系在k8s
代码中占有重要一环, 理解informer
可以更好理解k8s
的工作机制.
1. [k8s源码分析][client-go] informer之store和index
2. [k8s源码分析][client-go] informer之delta_fifo
3. [k8s源码分析][client-go] informer之reflector
4. [k8s源码分析][client-go] informer之controller和shared_informer(1)
5. [k8s源码分析][client-go] informer之controller和shared_informer(2)
6. [k8s源码分析][client-go] informer之SharedInformerFactory