每个cpu代表一个device

在arch/arm64/kernel/setup.c 中的topology_init函数中会为每个cpu注册一个device,每个device都在可以在/sys/devices/system/cpu 这个路径下看到.
static int __init topology_init(void)
{
	int i;
	for_each_possible_cpu(i) {
		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
		cpu->hotpluggable = 1;
		register_cpu(cpu, i);
	}

	return 0;
}
subsys_initcall(topology_init);
可以看到是在kernel初始化阶段通过subsys_initcall来调用topology_init。在topology_init 中会定义一个per_cpu 来为每个cpu编号,最终通过register_cpu来为每个cpu在/sys/devices/system/cpu路径下注册一个device
int register_cpu(struct cpu *cpu, int num)
{
	int error;
//通过cpu id 拿到cpu对应的node id
	cpu->node_id = cpu_to_node(num);
	memset(&cpu->dev, 0x00, sizeof(struct device));
	cpu->dev.id = num;
	cpu->dev.bus = &cpu_subsys;
	cpu->dev.release = cpu_device_release;
	cpu->dev.offline_disabled = !cpu->hotpluggable;
	cpu->dev.offline = !cpu_online(num);
	cpu->dev.of_node = of_get_cpu_node(num, NULL);
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
	cpu->dev.bus->uevent = cpu_uevent;
#endif
//为sys device添加属性
	cpu->dev.groups = common_cpu_attr_groups;
	if (cpu->hotpluggable)
		cpu->dev.groups = hotplugable_cpu_attr_groups;
//从cpu->dev.bus = &cpu_subsys可以知道这个device是挂在name为cpu 的这个bus上,最后通过device_register注册这个device
	error = device_register(&cpu->dev);
	if (error)
		return error;

	per_cpu(cpu_sys_devices, num) = &cpu->dev;
	register_cpu_under_node(num, cpu_to_node(num));
	dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
	return 0;
}
这里将latency limit设置为0
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
{
	struct dev_pm_qos_request *req;
	int ret;
//这里判断这个device是否已经被注册过了,其判断的标准是return dev->kobj.state_in_sysfs;是否为0

	if (!device_is_registered(dev) || value < 0)
		return -EINVAL;

	req = kzalloc(sizeof(*req), GFP_KERNEL);
	if (!req)
		return -ENOMEM;
//继续调用dev_pm_qos_add_request
	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
	if (ret < 0) {
		kfree(req);
		return ret;
	}

}
dev_pm_qos_add_request->__dev_pm_qos_add_request->dev_pm_qos_constraints_allocate
static int dev_pm_qos_constraints_allocate(struct device *dev)
{
	struct dev_pm_qos *qos;
	struct pm_qos_constraints *c;
	struct blocking_notifier_head *n;

	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
	if (!qos)
		return -ENOMEM;

	n = kzalloc(sizeof(*n), GFP_KERNEL);
	if (!n) {
		kfree(qos);
		return -ENOMEM;
	}
	BLOCKING_INIT_NOTIFIER_HEAD(n);

	c = &qos->resume_latency;
	plist_head_init(&c->list);
	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
	c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
	c->type = PM_QOS_MIN;
	c->notifiers = n;

	c = &qos->latency_tolerance;
	plist_head_init(&c->list);
	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
	c->type = PM_QOS_MIN;

	INIT_LIST_HEAD(&qos->flags.list);

	spin_lock_irq(&dev->power.lock);
//从目前的flow来看这里的qos等于0,这是power.qos 后要调用apply_constraint 更新flag,通知别的模块等,但是由于这里的是PM_QOS_ADD_REQ新添加爱qos值,而非改变,因此apply_constraint 相当与空函数
	dev->power.qos = qos;
	spin_unlock_irq(&dev->power.lock);

	return 0;
}

每个cpu代表一个device_第1张图片

你可能感兴趣的:(Linux,源码分析)