eviction,即驱赶的意思,意思是当节点出现异常时,kubernetes将有相应的机制驱赶该节点上的Pod。eviction在openstack的nova组件中也存在。
目前kubernetes中存在两种eviction机制,分别由kube-controller-manager和kubelet实现
kube-controller-manager主要由多个控制器构成,而eviction的功能主要由node controller这个控制器实现。
kube-controller-manager提供了以下启动参数控制eviction
node-controller代码主要位于pkg/controller/node目录下
为了控制eviction,kubernete将节点划分为不同的zone,主要通过给节点加label实现
zone名称由上述的zone和region标签组合而成,两个节点zone和region分别相同,则位于同一个zone,否则不同zone。假如二者都为空,就位于default zone
zone有四种不同的状态
初始化状态比较好理解,假如节点刚刚加入集群,它所在的zone刚刚被发现,则该zone的状态是initial,这是一个非常短暂的时间,其余的状态,由以下函数决定
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) (int, zoneState) {
readyNodes := 0
notReadyNodes := 0
for i := range nodeReadyConditions {
if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == v1.ConditionTrue {
readyNodes++
} else {
notReadyNodes++
}
}
switch {
case readyNodes == 0 && notReadyNodes > 0:
return notReadyNodes, stateFullDisruption
case notReadyNodes > 2 && float32(notReadyNodes)/float32(notReadyNodes+readyNodes) >= nc.unhealthyZoneThreshold:
return notReadyNodes, statePartialDisruption
default:
return notReadyNodes, stateNormal
}
}
注意这里统计的某个zone下面的节点状态,而不是所有。当该zone下面ready的节点为0,而notReady节点大于0时,即认为所有节点都宕机了,所以状态为stateFullDisruption;当notReady节点大于两个,而且notReady节点占比超过unhealthyZoneThreshold,即0.55时,认为是statePartialDisruption,其他情况则认为stateNormal
这四种状态会如何影响eviction速度呢?看如下函数
func (nc *NodeController) setLimiterInZone(zone string, zoneSize int, state zoneState) {
switch state {
case stateNormal:
if nc.useTaintBasedEvictions {
nc.zoneNotReadyOrUnreachableTainer[zone].SwapLimiter(nc.evictionLimiterQPS)
} else {
nc.zonePodEvictor[zone].SwapLimiter(nc.evictionLimiterQPS)
}
case statePartialDisruption:
if nc.useTaintBasedEvictions {
nc.zoneNotReadyOrUnreachableTainer[zone].SwapLimiter(
nc.enterPartialDisruptionFunc(zoneSize))
} else {
nc.zonePodEvictor[zone].SwapLimiter(
nc.enterPartialDisruptionFunc(zoneSize))
}
case stateFullDisruption:
if nc.useTaintBasedEvictions {
nc.zoneNotReadyOrUnreachableTainer[zone].SwapLimiter(
nc.enterFullDisruptionFunc(zoneSize))
} else {
nc.zonePodEvictor[zone].SwapLimiter(
nc.enterFullDisruptionFunc(zoneSize))
}
}
}
其中enterPartialDisruptionFunc就是函数ReducedQPSFunc
func (nc *NodeController) ReducedQPSFunc(nodeNum int) float32 {
if int32(nodeNum) > nc.largeClusterThreshold {
return nc.secondaryEvictionLimiterQPS
}
return 0
}
而enterFullDisruptionFunc是函数HealthyQPSFunc
func (nc *NodeController) HealthyQPSFunc(nodeNum int) float32 {
return nc.evictionLimiterQPS
}
即加入zone状态是normal,那么速率为0.1,假如zone状态是FullDisruption,速率也是0.1;假如zone是PartialDisruption,假如是大集群,速率为0.0.1,小集群则直接降为0
目前node controller存在两种不同的eviction方法,即通过taint或者传统方法
if nc.useTaintBasedEvictions {
go wait.Until(nc.doTaintingPass, nodeEvictionPeriod, wait.NeverStop)
} else {
go wait.Until(nc.doEvictionPass, nodeEvictionPeriod, wait.NeverStop)
}
其中nodeEvictionPeriod为100ms,即每隔100ms就会执行doEvictionPass或doTaintingPass
1.2.1 传统eviction方法
zonePodEvictor类型为map[string]*RateLimitedTimedQueue,即每个zone都有一个队列,队列带了流控算法,里面存储的是unready的节点,节点上的pod需要被eviction
func (nc *NodeController) doEvictionPass() {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
for k := range nc.zonePodEvictor {
nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
node, err := nc.nodeLister.Get(value.Value)
...
nodeUid, _ := value.UID.(string)
remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
...
if remaining {
glog.Infof("Pods awaiting deletion due to NodeController eviction")
}
return true, 0
})
}
}
deletePods是驱逐节点的主要函数
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore extensionslisters.DaemonSetLister) (bool, error) {
remaining := false
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
options := metav1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(metav1.NamespaceAll).List(options)
var updateErrList []error
...
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
// 设置Pod终止理由
if _, err = setPodTerminationReason(kubeClient, &pod, nodeName); err != nil {
if errors.IsConflict(err) {
updateErrList = append(updateErrList,
fmt.Errorf("update status failed for pod %q: %v", format.Pod(&pod), err))
continue
}
}
// 该Pod正在被删除,忽略
if pod.DeletionGracePeriodSeconds != nil {
remaining = true
continue
}
// 假如该节点是又daemonset管理,则忽略
_, err := daemonStore.GetPodDaemonSets(&pod)
if err == nil {
continue
}
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
remaining = true
}
...
return remaining, nil
}
底层其实就是delete节点上的Pod,假如Pod是由daemonset管理,则忽略,因为即使删除了,daemonset还是会在该节点上重建
1.2.2 taint机制 taint机制还处于试验状态,默认不开启,假如要开始,则要在所有组件上设置–feature-gates TaintNodesByCondition=true
当节点状态为unready时,打上node.alpha.kubernetes.io/notReady的taint 当节点状态为unknown时,打上node.alpha.kubernetes.io/unreachable的taint
打上taint后,必然有相应的控制器去处理
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
func (tc *NoExecuteTaintManager) Run(stopCh <-chan struct{}) {
go func(stopCh <-chan struct{}) {
for {
item, shutdown := tc.nodeUpdateQueue.Get()
if shutdown {
break
}
nodeUpdate := item.(*nodeUpdateItem)
select {
case <-stopCh:
break
case tc.nodeUpdateChannel <- nodeUpdate:
}
}
}(stopCh)
go func(stopCh <-chan struct{}) {
for {
item, shutdown := tc.podUpdateQueue.Get()
if shutdown {
break
}
podUpdate := item.(*podUpdateItem)
select {
case <-stopCh:
break
case tc.podUpdateChannel <- podUpdate:
}
}
}(stopCh)
for {
select {
case <-stopCh:
break
case nodeUpdate := <-tc.nodeUpdateChannel:
tc.handleNodeUpdate(nodeUpdate)
case podUpdate := <-tc.podUpdateChannel:
// If we found a Pod update we need to empty Node queue first.
priority:
for {
select {
case nodeUpdate := <-tc.nodeUpdateChannel:
tc.handleNodeUpdate(nodeUpdate)
default:
break priority
}
}
// After Node queue is emptied we process podUpdate.
tc.handlePodUpdate(podUpdate)
}
}
}
func deletePodHandler(c clientset.Interface, emitEventFunc func(types.NamespacedName)) func(args *WorkArgs) error {
return func(args *WorkArgs) error {
ns := args.NamespacedName.Namespace
name := args.NamespacedName.Name
glog.V(0).Infof("NoExecuteTaintManager is deleting Pod: %v", args.NamespacedName.String())
if emitEventFunc != nil {
emitEventFunc(args.NamespacedName)
}
var err error
for i := 0; i < retries; i++ {
err = c.Core().Pods(ns).Delete(name, &metav1.DeleteOptions{})
if err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
return err
}
}
本质上还是讲带eviction节点上的pod加入到删除队列上
kube-controller-manager的eviction机制是粗粒度的,即驱赶一个节点上的所有pod,而kubelet则是细粒度的,它驱赶的是节点上的某些Pod,驱赶哪些Pod与之前讲过的Pod的Qos机制有关。
kubelet的eviction机制,只有当节点内存和磁盘资源紧张时,才会开启,他的目的就是为了回收node节点的资源。之前提过,kubelet还有oom-killer可以回收资源,那为什么还需要eviction呢?这是因为oom-killer将Pod杀掉后,假如Pod的RestartPolicy设置为Always,则kubelet隔段时间后,仍然会在该节点上启动该Pod。而kublet eviction则会将该Pod从该节点上删除。
kubelet提供了以下参数控制eviction
kubelet eviction的核心代码就是如下,里面的synchronize就是核心函数
func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, nodeProvider NodeProvider, monitoringInterval time.Duration) {
// start the eviction manager monitoring
go func() {
for {
if evictedPods := m.synchronize(diskInfoProvider, podFunc, nodeProvider); evictedPods != nil {
glog.Infof("eviction manager: pods %s evicted, waiting for pod to be cleaned up", format.Pods(evictedPods))
m.waitForPodsCleanup(podCleanedUpFunc, evictedPods)
} else {
time.Sleep(monitoringInterval)
}
}
}()
}
目前主要由两种机制检测触发eviction的条件
if m.config.KernelMemcgNotification && !m.notifiersInitialized {
glog.Infof("eviction manager attempting to integrate with kernel memcg notification api")
m.notifiersInitialized = true
// start soft memory notification
err = startMemoryThresholdNotifier(m.config.Thresholds, observations, false, func(desc string) {
glog.Infof("soft memory eviction threshold crossed at %s", desc)
// TODO wait grace period for soft memory limit
m.synchronize(diskInfoProvider, podFunc, nodeProvider)
})
if err != nil {
glog.Warningf("eviction manager: failed to create hard memory threshold notifier: %v", err)
}
// start hard memory notification
err = startMemoryThresholdNotifier(m.config.Thresholds, observations, true, func(desc string) {
glog.Infof("hard memory eviction threshold crossed at %s", desc)
m.synchronize(diskInfoProvider, podFunc, nodeProvider)
})
if err != nil {
glog.Warningf("eviction manager: failed to create soft memory threshold notifier: %v", err)
}
}
kubelet的eviction主要会回收两种资源,内存和磁盘
eviction manager会获取该节点上所有的容器,然后根据一定的算法对Pod进行排序,这里看看针对内存如何排序
// rankMemoryPressure orders the input pods for eviction in response to memory pressure.
func rankMemoryPressure(pods []*v1.Pod, stats statsFunc) {
orderedBy(qosComparator, memory(stats)).Sort(pods)
}
// qosComparator compares pods by QoS (BestEffort < Burstable < Guaranteed)
func qosComparator(p1, p2 *v1.Pod) int {
qosP1 := v1qos.GetPodQOS(p1)
qosP2 := v1qos.GetPodQOS(p2)
// its a tie
if qosP1 == qosP2 {
return 0
}
// if p1 is best effort, we know p2 is burstable or guaranteed
if qosP1 == v1.PodQOSBestEffort {
return -1
}
// we know p1 and p2 are not besteffort, so if p1 is burstable, p2 must be guaranteed
if qosP1 == v1.PodQOSBurstable {
if qosP2 == v1.PodQOSGuaranteed {
return -1
}
return 1
}
// ok, p1 must be guaranteed.
return 1
}
从qosComparator函数可以看出,Pod排列顺序为
PodQOSBestEffort < PodQOSBurstable < PodQOSGuaranteed
即首先会回收BestEffort的Pod,然后回收Burstable,最后才会回收Guranteed
// we kill at most a single pod during each eviction interval
for i := range activePods {
pod := activePods[i]
// If the pod is marked as critical and static, and support for critical pod annotations is enabled,
// do not evict such pods. Static pods are not re-admitted after evictions.
// https://github.com/kubernetes/kubernetes/issues/40573 has more details.
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) &&
kubelettypes.IsCriticalPod(pod) && kubepod.IsStaticPod(pod) {
continue
}
status := v1.PodStatus{
Phase: v1.PodFailed,
Message: fmt.Sprintf(message, resourceToReclaim),
Reason: reason,
}
// record that we are evicting the pod
m.recorder.Eventf(pod, v1.EventTypeWarning, reason, fmt.Sprintf(message, resourceToReclaim))
gracePeriodOverride := int64(0)
if softEviction {
gracePeriodOverride = m.config.MaxPodGracePeriodSeconds
}
// this is a blocking call and should only return when the pod and its containers are killed.
err := m.killPodFunc(pod, status, &gracePeriodOverride)
if err != nil {
glog.Warningf("eviction manager: error while evicting pod %s: %v", format.Pod(pod), err)
}
return []*v1.Pod{pod}
}
每次最多回收1个Pod
假如是hardEviction,则PodDeleteGracePeriod设置为0,即立即删除,否则设置为MaxPodGracePeriodSeconds。然后调用killPodFunc删除Pod
值得注意的是,当kubernetes驱赶Pod的时候,kubernetes并不会重新创建Pod,假如要重新创建Pod,需要借助replicationcontroller、relicaset和deployment等机制。也就是说假如,你直接创建一个Pod,当它被kubernetes驱赶时,该Pod直接被删除了,不会重建。而利用replicationcontroller等机制,由于少了一个Pod,这些控制器就会重新创建一个Pod
文章转自:http://licyhust.com/%E5%AE%B9%E5%99%A8%E6%8A%80%E6%9C%AF/2017/10/24/eviction/