--controllers strings:配置需要enable的列表
這裏也包括podgc
All controllers: attachdetach, bootstrapsigner, clusterrole-aggregation, cronjob, csrapproving,
csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, garbagecollector,
horizontalpodautoscaling, job, namespace, nodeipam, nodelifecycle, persistentvolume-binder,
persistentvolume-expander, podgc, pv-protection, pvc-protection, replicaset, replicationcontroller,
resourcequota, route, service, serviceaccount, serviceaccount-token, statefulset, tokencleaner, ttl,
ttl-after-finished
Disabled-by-default controllers: bootstrapsigner, tokencleaner (default [*])
type PodGCController struct {
kubeClient clientset.Interface
podLister corelisters.PodLister
podListerSynced cache.InformerSynced
deletePod func(namespace, name string) error
terminatedPodThreshold int
}
初始化注册podgc:controllers["podgc"] = startPodGCController
// NewControllerInitializers is a public map of named controller groups (you can start more than one in an init func)
// paired to their InitFunc. This allows for structured downstream composition and subdivision.
func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc {
controllers := map[string]InitFunc{}
controllers["endpoint"] = startEndpointController
controllers["replicationcontroller"] = startReplicationController
controllers["podgc"] = startPodGCController
return controllers
}
调用NewPodGC初始化,执行Run方法
func startPodGCController(ctx ControllerContext) (http.Handler, bool, error) {
go podgc.NewPodGC(
ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"),
ctx.InformerFactory.Core().V1().Pods(),
int(ctx.ComponentConfig.PodGCController.TerminatedPodGCThreshold),
).Run(ctx.Stop)
return nil, true, nil
}
路径:pkg/controller/podgc/gc_controller.go
初始化PodGCController结构体,结构体如上
kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0),表示立即删除(Use zero to indicate immediate deletion)
func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInformer, terminatedPodThreshold int) *PodGCController {
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("gc_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
}
gcc := &PodGCController{
kubeClient: kubeClient,
terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
return kubeClient.CoreV1().Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
},
}
gcc.podLister = podInformer.Lister()
gcc.podListerSynced = podInformer.Informer().HasSynced
return gcc
}
启动goruntine协程,间隔20s执行完一次gcc.gc进行Pod回收
func (gcc *PodGCController) Run(stop <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Infof("Starting GC controller")
defer glog.Infof("Shutting down GC controller")
if !controller.WaitForCacheSync("GC", stop, gcc.podListerSynced) {
return
}
go wait.Until(gcc.gc, gcCheckPeriod, stop)
<-stop
}
func (gcc *PodGCController) gc() {
pods, err := gcc.podLister.List(labels.Everything())
if err != nil {
glog.Errorf("Error while listing all Pods: %v", err)
return
}
if gcc.terminatedPodThreshold > 0 {
gcc.gcTerminated(pods)
}
gcc.gcOrphaned(pods)
gcc.gcUnscheduledTerminating(pods)
}
这个比较简单,对于终止的进行直接删除,中间有一些小逻辑进行处理
func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
terminatedPods := []*v1.Pod{}
for _, pod := range pods {
if isPodTerminated(pod) {
terminatedPods = append(terminatedPods, pod)
}
}
terminatedPodCount := len(terminatedPods)
sort.Sort(byCreationTimestamp(terminatedPods))
deleteCount := terminatedPodCount - gcc.terminatedPodThreshold
if deleteCount > terminatedPodCount {
deleteCount = terminatedPodCount
}
if deleteCount > 0 {
glog.Infof("garbage collecting %v pods", deleteCount)
}
var wait sync.WaitGroup
for i := 0; i < deleteCount; i++ {
wait.Add(1)
go func(namespace string, name string) {
defer wait.Done()
if err := gcc.deletePod(namespace, name); err != nil {
// ignore not founds
defer utilruntime.HandleError(err)
}
}(terminatedPods[i].Namespace, terminatedPods[i].Name)
}
wait.Wait()
}
删除已经绑定到node的,但是又不再node列表中的pod,就是孤儿
// gcOrphaned deletes pods that are bound to nodes that don't exist.
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing orphaned")
// We want to get list of Nodes from the etcd, to make sure that it's as fresh as possible.
nodes, err := gcc.kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return
}
nodeNames := sets.NewString()
for i := range nodes.Items {
nodeNames.Insert(nodes.Items[i].Name)
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
continue
}
if nodeNames.Has(pod.Spec.NodeName) {
continue
}
glog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName)
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
utilruntime.HandleError(err)
} else {
glog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name)
}
}
}
删除状态是删除状态的,或者没有被调度到node的pod
// gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node.
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
for _, pod := range pods {
if pod.DeletionTimestamp == nil || len(pod.Spec.NodeName) > 0 {
continue
}
glog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name)
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
utilruntime.HandleError(err)
} else {
glog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name)
}
}
}