本文将对Kubernetes controller-manager中的deployment处理的源码进行分析。采用主干的代码,commitId为2ab7ad14b4fad378a4a69a64c587497d77e60f44
。代码的时间为2017年08月19日。
Deployment Controller 创建其实可以理解从controllers["deployment"] = startDeploymentController
开始的。
在startDeploymentController
函数中,首先判断deployments
是否开启。然后创建并初始化一个NewDeploymentController
对象。最后启动NewDeploymentController
对象的Run时,进入循环处理流程。
//这里的流程与其他Controller的流程一样,不再进行进一步分析
func startDeploymentController(ctx ControllerContext) (bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
return false, nil
}
go deployment.NewDeploymentController(
ctx.InformerFactory.Extensions().V1beta1().Deployments(),
ctx.InformerFactory.Extensions().V1beta1().ReplicaSets(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.ClientBuilder.ClientOrDie("deployment-controller"),
).Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop)
return true, nil
}
创建Replicaset Controller 对象的过程,在NewReplicaSetController
完成。具体的步骤包括以下几个。(1)创建eventBroadcaster
并设置对应的属性。(2)设置速率控制RateLimiter
(3)创建DeploymentController
对象(4)设置dInformer
,rsInformer
和podInformer
对应的事件回调函数(5)设置rsc.syncHandler
对象为rsc.syncDeployment
函数
// NewDeploymentController creates a new DeploymentController.
func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, rsInformer extensionsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) *DeploymentController {
//创建并设置eventBroadcaster属性
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(client.Core().RESTClient()).Events("")})
//设置速率控制`RateLimiter`
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
}
//创建`DeploymentController`对象
dc := &DeploymentController{
client: client,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
}
dc.rsControl = controller.RealRSControl{
KubeClient: client,
Recorder: dc.eventRecorder,
}
//设置`dInformer`,`rsInformer`和`podInformer`对应的事件回调函数
dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addDeployment,
UpdateFunc: dc.updateDeployment,
// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
DeleteFunc: dc.deleteDeployment,
})
rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: dc.addReplicaSet,
UpdateFunc: dc.updateReplicaSet,
DeleteFunc: dc.deleteReplicaSet,
})
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: dc.deletePod,
})
//设置`rsc.syncHandler`对象为`rsc.syncDeployment`函数
dc.syncHandler = dc.syncDeployment
dc.enqueueDeployment = dc.enqueue
dc.dLister = dInformer.Lister()
dc.rsLister = rsInformer.Lister()
dc.podLister = podInformer.Lister()
dc.dListerSynced = dInformer.Informer().HasSynced
dc.rsListerSynced = rsInformer.Informer().HasSynced
dc.podListerSynced = podInformer.Informer().HasSynced
return dc
}
在Deployment Controller
中Run函数的处理与Replicaset Controller
完全一致,基本调用路径是Run->work->processNextWorkItem->syncHandler->syncDeployment。更多过于Run函数中处理的流程分析,可以参考Replicaset Controller 源码分析。
// Run begins watching and syncing.
func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer dc.queue.ShutDown()
glog.Infof("Starting deployment controller")
defer glog.Infof("Shutting down deployment controller")
if !controller.WaitForCacheSync("deployment", stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(dc.worker, time.Second, stopCh)
}
<-stopCh
}
和Replicaset Controller
中syncReplicaSet类似,Deployment Controller
中在 syncDeployment
函数中处理主要的流程,其代码如下:
主要的步骤包括:
(1) 名称解析
(2) 获取deployment对象,并拷贝副本
(3) 获取deployment对应的RS列表
(4) 获取deployment对应的Pod的列表
(5) 检测deployment是否处于停滞状态
(6) 检测deployment是否出现异常,处理是否出现超时
(7) 如果处于暂停状态,执行一次同步
(8) 如果需要回滚,则执行回滚
(9) 判断是不是只是执行规模调整,如果是则执行同步
(10) 判断执行的更新操作是直接更新还是滚动更新,并分别执行对应操作
// syncDeployment will sync the deployment with the given key.
// This function is not meant to be invoked concurrently with the same key.
func (dc *DeploymentController) syncDeployment(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing deployment %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing deployment %q (%v)", key, time.Now().Sub(startTime))
}()
//名称解析
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
//获取deployment对象
deployment, err := dc.dLister.Deployments(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(2).Infof("Deployment %v has been deleted", key)
return nil
}
if err != nil {
return err
}
//全量深度拷贝
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
d, err := util.DeploymentDeepCopy(deployment)
if err != nil {
return err
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(d.Spec.Selector, &everything) {
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
}
return nil
}
//获取deployment对应的RS列表
// List ReplicaSets owned by this Deployment, while reconciling ControllerRef
// through adoption/orphaning.
rsList, err := dc.getReplicaSetsForDeployment(d)
if err != nil {
return err
}
// List all Pods owned by this Deployment, grouped by their ReplicaSet.
// Current uses of the podMap are:
//
// * check if a Pod is labeled correctly with the pod-template-hash label.
// * check that no old Pods are running in the middle of Recreate Deployments.
//获取deployment对应的Pod的列表
podMap, err := dc.getPodMapForDeployment(d, rsList)
if err != nil {
return err
}
if d.DeletionTimestamp != nil {
return dc.syncStatusOnly(d, rsList, podMap)
}
// Update deployment conditions with an Unknown condition when pausing/resuming
// a deployment. In this way, we can be sure that we won't timeout when a user
// resumes a Deployment with a set progressDeadlineSeconds.
//检测deployment是否处于停滞状态
if err = dc.checkPausedConditions(d); err != nil {
return err
}
//检测deployment是否出现异常,处理是否出现超时
_, err = dc.hasFailed(d, rsList, podMap)
if err != nil {
return err
}
// TODO: Automatically rollback here if we failed above. Locate the last complete
// revision and populate the rollback spec with it.
// See https://github.com/kubernetes/kubernetes/issues/23211.
//如果处于暂停状态,执行一次同步
if d.Spec.Paused {
return dc.sync(d, rsList, podMap)
}
// rollback is not re-entrant in case the underlying replica sets are updated with a new
// revision so we should ensure that we won't proceed to update replica sets until we
// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
//如果需要回滚,则执行回滚
if d.Spec.RollbackTo != nil {
return dc.rollback(d, rsList, podMap)
}
//判断是不是只是执行规模调整,如果是则执行同步
scalingEvent, err := dc.isScalingEvent(d, rsList, podMap)
if err != nil {
return err
}
if scalingEvent {
return dc.sync(d, rsList, podMap)
}
//判断执行的更新操作是直接更新还是滚动更新,并分别执行对应操作
switch d.Spec.Strategy.Type {
case extensions.RecreateDeploymentStrategyType:
return dc.rolloutRecreate(d, rsList, podMap)
case extensions.RollingUpdateDeploymentStrategyType:
return dc.rolloutRolling(d, rsList, podMap)
}
return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
}
getReplicaSetsForDeployment
函数中,通过deployment的信息,获取对应的ReplicaSets。其代码如下:
// getReplicaSetsForDeployment uses ControllerRefManager to reconcile
// ControllerRef by adopting and orphaning.
// It returns the list of ReplicaSets that this Deployment should manage.
func (dc *DeploymentController) getReplicaSetsForDeployment(d *extensions.Deployment) ([]*extensions.ReplicaSet, error) {
// List all ReplicaSets to find those we own but that no longer match our
// selector. They will be orphaned by ClaimReplicaSets().
//获取命名空间下所有的RS
rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
if err != nil {
return nil, err
}
deploymentSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("deployment %s/%s has invalid label selector: %v", d.Namespace, d.Name, err)
}
// If any adoptions are attempted, we should first recheck for deletion with
// an uncached quorum read sometime after listing ReplicaSets (see #42639).
//设置RS的适配函数
canAdoptFunc := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := dc.client.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != d.UID {
return nil, fmt.Errorf("original Deployment %v/%v is gone: got uid %v, wanted %v", d.Namespace, d.Name, fresh.UID, d.UID)
}
return fresh, nil
})
//创建ReplicaSetControllerRefManager对象
cm := controller.NewReplicaSetControllerRefManager(dc.rsControl, d, deploymentSelector, controllerKind, canAdoptFunc)
//获取这个deployment对应的RS
return cm.ClaimReplicaSets(rsList)
}
//Claim的过程与RS中Pod Cliam的过程基本相似。大致的逻辑为:
//先设置match,adopt,release函数。然后依次进行match匹配,release释放和adopt
//匹配处理。输出这个Deployment对应的RS
func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*extensions.ReplicaSet) ([]*extensions.ReplicaSet, error) {
var claimed []*extensions.ReplicaSet
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.AdoptReplicaSet(obj.(*extensions.ReplicaSet))
}
release := func(obj metav1.Object) error {
return m.ReleaseReplicaSet(obj.(*extensions.ReplicaSet))
}
for _, rs := range sets {
ok, err := m.ClaimObject(rs, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, rs)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
getPodMapForDeployment
函数中,通过deployment和RS的信息,获取对应的Pod。其代码如下:
// getPodMapForDeployment returns the Pods managed by a Deployment.
//
// It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
// according to the Pod's ControllerRef.
func (dc *DeploymentController) getPodMapForDeployment(d *extensions.Deployment, rsList []*extensions.ReplicaSet) (map[types.UID]*v1.PodList, error) {
// Get all Pods that potentially belong to this Deployment.
selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
if err != nil {
return nil, err
}
//根据标签获取命名空间下,符合标签筛选的Pod
pods, err := dc.podLister.Pods(d.Namespace).List(selector)
if err != nil {
return nil, err
}
//初始化RS UID的Map表,key为RS的UID,value为PodList
// Group Pods by their controller (if it's in rsList).
podMap := make(map[types.UID]*v1.PodList, len(rsList))
for _, rs := range rsList {
podMap[rs.UID] = &v1.PodList{}
}
//遍历所有的Pod,判断Pod的controllerRef,并放入对应的UID的列表中
for _, pod := range pods {
// Do not ignore inactive Pods because Recreate Deployments need to verify that no
// Pods from older versions are running before spinning up new Pods.
controllerRef := metav1.GetControllerOf(pod)
if controllerRef == nil {
continue
}
// Only append if we care about this UID.
if podList, ok := podMap[controllerRef.UID]; ok {
podList.Items = append(podList.Items, *pod)
}
}
return podMap, nil
}
在checkPausedConditions
函数中,通过deployment的状态判断是否需要进入暂停状态。
// checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition.
// These conditions are needed so that we won't accidentally report lack of progress for resumed deployments
// that were paused for longer than progressDeadlineSeconds.
func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error {
//如果设置Dead的最长时间参数没有设置,则不进行后续判断
if d.Spec.ProgressDeadlineSeconds == nil {
return nil
}
//获取deployment当前的进行状态
cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
//如果状态已经为Timeout则不进行后续判断,避免重复处理
if cond != nil && cond.Reason == deploymentutil.TimedOutReason {
// If we have reported lack of progress, do not overwrite it with a paused condition.
return nil
}
//判断状态是否为PausedDeploy
pausedCondExists := cond != nil && cond.Reason == deploymentutil.PausedDeployReason
//如果设置Paused状态,并且当前状态不是Pause状态
//则设置需要更新状态,状态更新为Paused
needsUpdate := false
if d.Spec.Paused && !pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
} else if !d.Spec.Paused && pausedCondExists {
//如果未设置Paused状态,但状态已经为Paused
//则设置状态需要更新,状态更新为Paused
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true
}
//如果状态不需要更新,则直接返回
if !needsUpdate {
return nil
}
//如果状态需要更新,则更新相应的状态
var err error
d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
return err
}
在hasFailed
函数中判断deployment
操作是否已经失败。具体的代码如下:
func (dc *DeploymentController) hasFailed(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) (bool, error) {
//如果ProgressDeadlineSeconds参数没有设置,则跳过此步骤
//如果状态已经为RollbackTo或者Paused,也跳过此步骤
if d.Spec.ProgressDeadlineSeconds == nil || d.Spec.RollbackTo != nil || d.Spec.Paused {
return false, nil
}
//获取这个deployment下所有的RS,如果newRS不存在,这个接口中是不创建的
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return false, err
}
//如果newRS为空,则说明Template发生了改变,跳出这里的逻辑
// There is a template change so we don't need to check for any progress right now.
if newRS == nil {
return false, nil
}
// Look at the status of the deployment - if there is already a NewRSAvailableReason
// then we don't need to estimate any progress. This is needed in order to avoid
// estimating progress for scaling events after a rollout has finished.
//获取deployment的状态
cond := util.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing)
if cond != nil && cond.Reason == util.NewRSAvailableReason {
return false, nil
}
// TODO: Look for permanent failures here.
// See https://github.com/kubernetes/kubernetes/issues/18568
//根据新旧RS评估deployment当前的状态,包括:有效的副本数 ,当前的服副本数,更新的副本数,是否Ready等。
allRSs := append(oldRSs, newRS)
newStatus := calculateStatus(allRSs, newRS, d)
// If the deployment is complete or it is progressing, there is no need to check if it
// has timed out.
//如果状态是已完成,不进行超时判断
//如果状态是正在进行中,也不进行超时判断
if util.DeploymentComplete(d, &newStatus) || util.DeploymentProgressing(d, &newStatus) {
return false, nil
}
// Check if the deployment has timed out.
//判断是否超时,如果上一次状态更新的时间,大于ProgressDeadlineSeconds
//则判断为超时,返回True
return util.DeploymentTimedOut(d, &newStatus), nil
}
如果deployment
的状态被设置成了rollback,则调用rollback函数进行处理。在该函数中会根据指定的Revision,回滚到某一个特定的版本。具体的代码如下:
// rollback the deployment to the specified revision. In any case cleanup the rollback spec.
func (dc *DeploymentController) rollback(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
newRS, allOldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs := append(allOldRSs, newRS)
toRevision := &d.Spec.RollbackTo.Revision
// If rollback revision is 0, rollback to the last revision
//如果没有指定会回滚的版本,默认回滚到最后一个版本,
//设置回滚的版本号为最后一个版本的版本号
if *toRevision == 0 {
if *toRevision = deploymentutil.LastRevision(allRSs); *toRevision == 0 {
// If we still can't find the last revision, gives up rollback
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find last revision.")
// Gives up rollback
//异常情况,没有找到最后一个版本。发送回滚失败事件。
//然后清空回滚的操作
return dc.updateDeploymentAndClearRollbackTo(d)
}
}
for _, rs := range allRSs {
//根据版本号,找到对应的RS,进行回滚
v, err := deploymentutil.Revision(rs)
if err != nil {
glog.V(4).Infof("Unable to extract revision from deployment's replica set %q: %v", rs.Name, err)
continue
}
if v == *toRevision {
glog.V(4).Infof("Found replica set %q with desired revision %d", rs.Name, v)
// rollback by copying podTemplate.Spec from the replica set
// revision number will be incremented during the next getAllReplicaSetsAndSyncRevision call
// no-op if the the spec matches current deployment's podTemplate.Spec
performedRollback, err := dc.rollbackToTemplate(d, rs)
if performedRollback && err == nil {
dc.emitRollbackNormalEvent(d, fmt.Sprintf("Rolled back deployment %q to revision %d", d.Name, *toRevision))
}
return err
}
}
//同样的如果回滚失败,则发送对应的事件,并情况回滚信息
dc.emitRollbackWarningEvent(d, deploymentutil.RollbackRevisionNotFound, "Unable to find the revision to rollback to.")
// Gives up rollback
return dc.updateDeploymentAndClearRollbackTo(d)
}
如果deployment为设置更新的类型为直接更新,则调用rolloutRecreate
函数进行处理。`rolloutRecreate函数处理的基本步骤包括,(1) 将oldRs实例数量降为0.然后创建新的RS,设置新的RS的实例数量。
// rolloutRecreate implements the logic for recreating a replica set.
func (dc *DeploymentController) rolloutRecreate(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
// Don't create a new RS if not already existed, so that we avoid scaling up before scaling down.
//获取oldRS和newRS,如果这时newRS不存在,并不创建newRS
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, false)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
//过滤出oldRS里面,实例数不为0的RS
activeOldRSs := controller.FilterActiveReplicaSets(oldRSs)
// scale down old replica sets.
//oldRs的实例数,设置为0
scaledDown, err := dc.scaleDownOldReplicaSetsForRecreate(activeOldRSs, d)
if err != nil {
return err
}
//进行scaleDown后,更新状态并返回
if scaledDown {
// Update DeploymentStatus.
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// Do not process a deployment when it has old pods running.
//判断是否就的Pod还在运行,如果还在运行则更新状态后返回
if oldPodsRunning(newRS, oldRSs, podMap) {
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// If we need to create a new RS, create it now.
//判断是否需要创建新的RS,如果需要则创建
if newRS == nil {
newRS, oldRSs, err = dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs = append(oldRSs, newRS)
}
// scale up new replica set.
//设置新的RS副本数为指定的实例数
if _, err := dc.scaleUpNewReplicaSetForRecreate(newRS, d); err != nil {
return err
}
//完成更新
if util.DeploymentComplete(d, &d.Status) {
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
return err
}
}
// Sync deployment status.
//同步状态
return dc.syncRolloutStatus(allRSs, newRS, d)
}
如果deployment为设置更新的类型为滚动更新,则调用rolloutRolling
函数进行处理。rolloutRolling
函数处理的基本步骤包括: Scaling Up 新的RS,Scaling down 旧的RS。具体的代码如下:
// rolloutRolling implements the logic for rolling a new replica set.
func (dc *DeploymentController) rolloutRolling(d *extensions.Deployment, rsList []*extensions.ReplicaSet, podMap map[types.UID]*v1.PodList) error {
// 获取oldRs和newRs,如果newRS不存在,则创建
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, rsList, podMap, true)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
// Scale up, if we can.
//扩容新的RS
scaledUp, err := dc.reconcileNewReplicaSet(allRSs, newRS, d)
if err != nil {
return err
}
//扩容后,同步事件状态,并退出
if scaledUp {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, d)
}
// Scale down, if we can.
//缩容老的RS
scaledDown, err := dc.reconcileOldReplicaSets(allRSs, controller.FilterActiveReplicaSets(oldRSs), newRS, d)
if err != nil {
return err
}
//缩容后设置状态并退出
if scaledDown {
// Update DeploymentStatus
return dc.syncRolloutStatus(allRSs, newRS, d)
}
//判断操作是否完成,如果完成则清理old的RS
if deploymentutil.DeploymentComplete(d, &d.Status) {
if err := dc.cleanupDeployment(oldRSs, d); err != nil {
return err
}
}
// Sync deployment status
//更新deployment状态
return dc.syncRolloutStatus(allRSs, newRS, d)
}
Deployment Controller
的处理过程,在流程上与其他Controller并没有太多的差别。实际处理过程中,主要根据Rs和Pod的状态最决策,趋近与Deployment
中参数设定的状态。