Xenomai内核探秘
一直都在使用xenomai,平常或多或少都会看一些xenomai的技术文档,对xenomai也有一个大致了解。最近为了找工作也看了一些操作系统Linux内核相关的信息,下午本来是在看linuxCNC的,但是实在没明白他是如何动态的使用到xenomai实时内核。然后自己就不小心开始看了一眼xenomai的源码。不看不要紧,一看突然醍醐灌顶,好多之前懵懵懂懂的突然就有点通透了。所以决定从今天起把xenomai源码过一遍,顺便探析其实时的实现机理。希望能够坚持下去。
本文包含从0开始的xenomai个人学习见解,也会顺便将之前看到的相关文档抛出,也作为研究生学习生涯的一个小小总结。
基础略有薄弱,如果各位大佬发现什么问题欢迎指正。
xenomai 的启动函数为
xenomai_init(void)
源码中使用如下函数加载到内核的启动过程中。
device_initcall(xenomai_init);
如linux的initcall机制(针对编译进内核的驱动)中描述的情形。xenomai模块是再linux内核启动以后进行加载的,加载过程如下:
start_kernel
-> rest_init();
-> kernel_thread(kernel_init, NULL, CLONE_FS);
-> kernel_init()
-> kernel_init_freeable();
-> do_basic_setup();
-> do_initcalls();
->do_initcall_level(0);
````
->do_initcall_level(6);
自此启动xenomai内核;
static int __init xenomai_init(void)
{
int ret, __maybe_unused cpu;
setup_init_state();
if (!realtime_core_enabled()) {
printk(XENO_WARNING "disabled on kernel command line\n");
return 0;
}
#ifdef CONFIG_SMP
cpumask_clear(&xnsched_realtime_cpus);
for_each_online_cpu(cpu) {
if (supported_cpus_arg & (1UL << cpu))
cpumask_set_cpu(cpu, &xnsched_realtime_cpus);
}
if (cpumask_empty(&xnsched_realtime_cpus)) {
printk(XENO_WARNING "disabled via empty real-time CPU mask\n");
set_realtime_core_state(COBALT_STATE_DISABLED);
return 0;
}
cobalt_cpu_affinity = xnsched_realtime_cpus;
#endif /* CONFIG_SMP */
xnsched_register_classes();
ret = xnprocfs_init_tree();
if (ret)
goto fail;
ret = mach_setup();
if (ret)
goto cleanup_proc;
xnintr_mount();
ret = xnpipe_mount();
if (ret)
goto cleanup_mach;
ret = xnselect_mount();
if (ret)
goto cleanup_pipe;
ret = sys_init();
if (ret)
goto cleanup_select;
ret = mach_late_setup();
if (ret)
goto cleanup_sys;
ret = rtdm_init();
if (ret)
goto cleanup_sys;
ret = cobalt_init();
if (ret)
goto cleanup_rtdm;
rtdm_fd_init();
printk(XENO_INFO "Cobalt v%s %s%s%s%s\n",
XENO_VERSION_STRING,
boot_debug_notice,
boot_lat_trace_notice,
boot_evt_trace_notice,
boot_state_notice);
return 0;
cleanup_rtdm:
rtdm_cleanup();
cleanup_sys:
sys_shutdown();
cleanup_select:
xnselect_umount();
cleanup_pipe:
xnpipe_umount();
cleanup_mach:
mach_cleanup();
cleanup_proc:
xnprocfs_cleanup_tree();
fail:
set_realtime_core_state(COBALT_STATE_DISABLED);
printk(XENO_ERR "init failed, code %d\n", ret);
return ret;
}
主要包含以下函数
setup_init_state();
set_realtime_core_state();
xnsched_register_classes();
xnprocfs_init_tree();
mach_setup();
xnintr_mount();
xnpipe_mount();
xnselect_mount();
sys_init();
mach_late_setup();
rtdm_init();
cobalt_init();
rtdm_fd_init();
一个一个来
源码如下
static void __init setup_init_state(void)
{
static char warn_bad_state[] __initdata =
XENO_WARNING "invalid init state '%s'\n";
int n;
for (n = 0; n < ARRAY_SIZE(init_states); n++)
if (strcmp(init_states[n].label, init_state_arg) == 0) {
set_realtime_core_state(init_states[n].state);
return;
}
printk(warn_bad_state, init_state_arg);
}
这里其他相关参数的说明
static struct {
const char *label;
enum cobalt_run_states state;
} init_states[] __initdata = {
{ "disabled", COBALT_STATE_DISABLED },
{ "stopped", COBALT_STATE_STOPPED },
{ "enabled", COBALT_STATE_WARMUP },
};
enum cobalt_run_states {
COBALT_STATE_DISABLED,
COBALT_STATE_RUNNING,
COBALT_STATE_STOPPED,
COBALT_STATE_TEARDOWN,
COBALT_STATE_WARMUP,
};
xenomai对于其状态描述到枚举值类型的映射
static char init_state_arg[16] = "enabled";
默认情况下使能
比较到对应字符传时设置实时核的状态
static inline void set_realtime_core_state(enum cobalt_run_states state)
{
atomic_set(&cobalt_runstate, state);
}
这里的cobalt_runstate 是一个原子变量,其实就是一个int.
static inline void atomic_set(atomic_t *ptr, long v)
{
ptr->v = v;
}
typedef struct { int v; } atomic_t;
atomic_t cobalt_runstate = ATOMIC_INIT(COBALT_STATE_WARMUP);
#define ATOMIC_INIT(__n) { (__n) }
//这里的COBALT_STATE_WARMUP就是之前定义的xenomai状态的一种
关于启动状态的设置可以再/etc/default/grub设置
GRUB_CMDLINE_LINUX="isolcpus=0,1 xenomai.supported_cpus=0x03"
官网 Installing_Xenomai_3中有提到
NAME
xenomai.state=
DESCRIPTION
Set the initial state of the Cobalt core at boot up, which may be enabled, stopped or disabled. See the documentation about the corectl(1) utility for a description of these states.
DEFAULT
enabled
corectl 可以再用户级对内核状态进项操作,文档如下corectl - Cobalt core control interface
该工具默认安装在/usr/xenomai/sbin
设置xenomai状态之后就会检查一手,如果没设置成功或者别的问题。就退出,如下
if (!realtime_core_enabled()) {
printk(XENO_WARNING "disabled on kernel command line\n");
return 0;
}
我之前文章有提到虚拟机安装ubuntu +xenomai3.1补丁
现在的计算机基本都是多核,这个选项默认是要开启的.
#ifdef CONFIG_SMP
cpumask_clear(&xnsched_realtime_cpus);
for_each_online_cpu(cpu) {
if (supported_cpus_arg & (1UL << cpu))
cpumask_set_cpu(cpu, &xnsched_realtime_cpus);
}
if (cpumask_empty(&xnsched_realtime_cpus)) {
printk(XENO_WARNING "disabled via empty real-time CPU mask\n");
set_realtime_core_state(COBALT_STATE_DISABLED);
return 0;
}
cobalt_cpu_affinity = xnsched_realtime_cpus;
#endif /* CONFIG_SMP */
cpumask_clear顾名思义掩码清零,定义如下
/**
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
* @dstp: the cpumask pointer
*/
static inline void cpumask_clear(struct cpumask *dstp)
{
bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
}
#define cpumask_bits(maskp) ((maskp)->bits)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
然后设置xenomai支持的cpu掩码
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
这里不是很懂为什么cpu只遍历了0,然后设置cpu掩码.
这里也可以设置grub参数传入见官网 [Installing_Xenomai_3]。
cobalt_cpu_affinity = xnsched_realtime_cpus;
然后xenomai的cpu亲和性就会绑定在设置的cpu上面.linux就不会使用这两个核了。
这里可能还有问题,之后再补充.
void xnsched_register_classes(void)
{
xnsched_register_class(&xnsched_class_idle);
#ifdef CONFIG_XENO_OPT_SCHED_WEAK
xnsched_register_class(&xnsched_class_weak);
#endif
#ifdef CONFIG_XENO_OPT_SCHED_TP
xnsched_register_class(&xnsched_class_tp);
#endif
#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
xnsched_register_class(&xnsched_class_sporadic);
#endif
#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
xnsched_register_class(&xnsched_class_quota);
#endif
xnsched_register_class(&xnsched_class_rt);
}
dmesg 显示xenomai 只注册了以下两个
[ 1.629946] [Xenomai] scheduling class idle registered.
[ 1.629946] [Xenomai] scheduling class rt registered.
我们查看一下xnsched_register_class函数
static void xnsched_register_class(struct xnsched_class *sched_class)
{
sched_class->next = xnsched_class_highest;
xnsched_class_highest = sched_class;
/*
* Classes shall be registered by increasing priority order,
* idle first and up.
*/
XENO_BUG_ON(COBALT, sched_class->next &&
sched_class->next->weight > sched_class->weight);
printk(XENO_INFO "scheduling class %s registered.\n", sched_class->name);
}
本身有一个对xnsched_class_highest的声明
static struct xnsched_class *xnsched_class_highest;
没有初始化的指针??不懂为啥,默认为null把先。那就很容易理解了。其实是形成了一个链表:xnsched_class_rt->xnsched_class_idle->nullptr.
xnsched_class_highest指向xnsched_class_rt.这应该就是之后运行时侯xenomai线程调度器了。现在先大致看一下。
我们来看一下xnsched_class 的定义。
struct xnsched_class {
void (*sched_init)(struct xnsched *sched);
void (*sched_enqueue)(struct xnthread *thread);
void (*sched_dequeue)(struct xnthread *thread);
void (*sched_requeue)(struct xnthread *thread);
struct xnthread *(*sched_pick)(struct xnsched *sched);
void (*sched_tick)(struct xnsched *sched);
void (*sched_rotate)(struct xnsched *sched,
const union xnsched_policy_param *p);
void (*sched_migrate)(struct xnthread *thread,
struct xnsched *sched);
int (*sched_chkparam)(struct xnthread *thread,
const union xnsched_policy_param *p);
/**
* Set base scheduling parameters. This routine is indirectly
* called upon a change of base scheduling settings through
* __xnthread_set_schedparam() -> xnsched_set_policy(),
* exclusively.
*
* The scheduling class implementation should do the necessary
* housekeeping to comply with the new settings.
* thread->base_class is up to date before the call is made,
* and should be considered for the new weighted priority
* calculation. On the contrary, thread->sched_class should
* NOT be referred to by this handler.
*
* sched_setparam() is NEVER involved in PI or PP
* management. However it must deny a priority update if it
* contradicts an ongoing boost for @a thread. This is
* typically what the xnsched_set_effective_priority() helper
* does for such handler.
*
* @param thread Affected thread.
* @param p New base policy settings.
*
* @return True if the effective priority was updated
* (thread->cprio).
*/
bool (*sched_setparam)(struct xnthread *thread,
const union xnsched_policy_param *p);
void (*sched_getparam)(struct xnthread *thread,
union xnsched_policy_param *p);
void (*sched_trackprio)(struct xnthread *thread,
const union xnsched_policy_param *p);
void (*sched_protectprio)(struct xnthread *thread, int prio);
int (*sched_declare)(struct xnthread *thread,
const union xnsched_policy_param *p);
void (*sched_forget)(struct xnthread *thread);
void (*sched_kick)(struct xnthread *thread);
#ifdef CONFIG_XENO_OPT_VFILE
int (*sched_init_vfile)(struct xnsched_class *schedclass,
struct xnvfile_directory *vfroot);
void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
#endif
int nthreads;
struct xnsched_class *next;
int weight;
int policy;
const char *name;
};
上面的函数指针都是一些调度器的操作。主要拥有的资源就是
int nthreads;
struct xnsched_class *next;
int weight;
int policy;
const char *name;
第一个是线程数目,第二个是形成调度器的链表。第三个是权重,第四个是策略。最后一个是名字。我们看看代码对其的赋值:
首先rt
struct xnsched_class xnsched_class_rt = {
.sched_init = xnsched_rt_init,
.sched_enqueue = xnsched_rt_enqueue,
.sched_dequeue = xnsched_rt_dequeue,
.sched_requeue = xnsched_rt_requeue,
.sched_pick = xnsched_rt_pick,
.sched_tick = xnsched_rt_tick,
.sched_rotate = xnsched_rt_rotate,
.sched_forget = NULL,
.sched_kick = NULL,
.sched_declare = NULL,
.sched_setparam = xnsched_rt_setparam,
.sched_trackprio = xnsched_rt_trackprio,
.sched_protectprio = xnsched_rt_protectprio,
.sched_getparam = xnsched_rt_getparam,
#ifdef CONFIG_XENO_OPT_VFILE
.sched_init_vfile = xnsched_rt_init_vfile,
.sched_cleanup_vfile = xnsched_rt_cleanup_vfile,
#endif
.weight = XNSCHED_CLASS_WEIGHT(4),
.policy = SCHED_FIFO,
.name = "rt"
};
这里权重=4*1024,策略为SCHED_FIFO,一个宏定义值为1.名称为rt。
这边先这样,后面再单独写关于进程调度的。
看看另一个idle
struct xnsched_class xnsched_class_idle = {
.sched_init = NULL,
.sched_enqueue = NULL,
.sched_dequeue = NULL,
.sched_requeue = NULL,
.sched_tick = NULL,
.sched_rotate = NULL,
.sched_forget = NULL,
.sched_kick = NULL,
.sched_declare = NULL,
.sched_pick = xnsched_idle_pick,
.sched_setparam = xnsched_idle_setparam,
.sched_getparam = xnsched_idle_getparam,
.sched_trackprio = xnsched_idle_trackprio,
.sched_protectprio = xnsched_idle_protectprio,
.weight = XNSCHED_CLASS_WEIGHT(0),
.policy = SCHED_IDLE,
.name = "idle"
};
接下来是这个,从名称上看是proc下面xenomai文件目录的初初始化函数.
int __init xnprocfs_init_tree(void)
{
int ret;
ret = xnvfile_init_root();
if (ret)
return ret;
ret = xnsched_init_proc();
if (ret)
return ret;
xnclock_init_proc();
xnheap_init_proc();
xnintr_init_proc();
xnvfile_init_regular("latency", &latency_vfile, &cobalt_vfroot);
xnvfile_init_regular("version", &version_vfile, &cobalt_vfroot);
xnvfile_init_regular("faults", &faults_vfile, &cobalt_vfroot);
xnvfile_init_regular("apc", &apc_vfile, &cobalt_vfroot);
#ifdef CONFIG_XENO_OPT_DEBUG
xnvfile_init_dir("debug", &cobalt_debug_vfroot, &cobalt_vfroot);
#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
xnvfile_init_regular("lock", &lock_vfile, &cobalt_debug_vfroot);
#endif
#endif
return 0;
}
先看xnvfile_init_root;
定义:
int __init xnvfile_init_root(void)
{
struct xnvfile_directory *vdir = &cobalt_vfroot;
struct proc_dir_entry *pde;
pde = proc_mkdir("xenomai", NULL);
if (pde == NULL)
return -ENOMEM;
vdir->entry.pde = pde;
vdir->entry.lockops = NULL;
vdir->entry.private = NULL;
return 0;
}
先看一下第一个变量*vdir = &cobalt_vfroot
struct xnvfile {
struct proc_dir_entry *pde;
struct file *file;
struct xnvfile_lock_ops *lockops;
int refcnt;
void *private;
};
```c
struct xnvfile_lock_ops {
/**
* @anchor lockops_get
* This handler should grab the desired lock.
*
* @param vfile A pointer to the virtual file which needs
* locking.
*
* @return zero should be returned if the call
* succeeds. Otherwise, a negative error code can be returned;
* upon error, the current vfile operation is aborted, and the
* user-space caller is passed back the error value.
*/
int (*get)(struct xnvfile *vfile);
/**
* @anchor lockops_put This handler should release the lock
* previously grabbed by the @ref lockops_get "get() handler".
*
* @param vfile A pointer to the virtual file which currently
* holds the lock to release.
*/
void (*put)(struct xnvfile *vfile);
};
struct xnvfile {
struct proc_dir_entry *pde;
struct file *file;
struct xnvfile_lock_ops *lockops;
int refcnt;
void *private;
};
struct xnvfile_directory {
struct xnvfile entry;
};
struct xnvfile_directory cobalt_vfroot;
系统定义了xnvfile_directory 类型的全局变量cobalt_vfroot.
xnvfile_directory 类型包含了xnvfile 。xnvfile 内部有一个proc_dir_entry 变量。这个是linux内核定义的一个.proc下的入口.后面可以看到调用pde = proc_mkdir(“xenomai”, NULL);并将其对proc_dir_entry 赋值了。proc_mkdir大致就是向linux内核注册一个节点。用于我们的xenomai目录。
用户在运行过程中可以进入/proc/xenomai查看。树结构如下
xenomai/
├── affinity
├── apc
├── clock
│ └── coreclk
├── faults
├── heap
├── irq
├── latency
├── registry
│ └── usage
├── sched
│ ├── acct
│ ├── rt
│ │ └── threads
│ ├── stat
│ └── threads
├── timer
│ └── coreclk
└── version
5 directories, 14 files
这边就不过多展开了,整个函数的目的就是为了建立刚刚展示的这个xenomai的目录树。把子节点什么的都创建出来并初始化。
这里应该是涉及到adeos,adoes底层对中断进行了封装,linux和xenomai所有的中断都来自于adoes,xenomai和linux之间的通信也来自于adoes。
这里不单独展开了,之后对这里单独整理一篇文章。
static int __init mach_setup(void)
{
struct ipipe_sysinfo sysinfo;
int ret, virq;
ret = ipipe_select_timers(&xnsched_realtime_cpus);
if (ret < 0)
return ret;
ipipe_get_sysinfo(&sysinfo);
if (timerfreq_arg == 0)
timerfreq_arg = sysinfo.sys_hrtimer_freq;
if (clockfreq_arg == 0)
clockfreq_arg = sysinfo.sys_hrclock_freq;
if (clockfreq_arg == 0) {
printk(XENO_ERR "null clock frequency? Aborting.\n");
return -ENODEV;
}
cobalt_pipeline.timer_freq = timerfreq_arg;
cobalt_pipeline.clock_freq = clockfreq_arg;
if (cobalt_machine.init) {
ret = cobalt_machine.init();
if (ret)
return ret;
}
ipipe_register_head(&xnsched_realtime_domain, "Xenomai");
ret = -EBUSY;
virq = ipipe_alloc_virq();
if (virq == 0)
goto fail_apc;
cobalt_pipeline.apc_virq = virq;
ipipe_request_irq(ipipe_root_domain,
cobalt_pipeline.apc_virq,
apc_dispatch,
NULL, NULL);
virq = ipipe_alloc_virq();
if (virq == 0)
goto fail_escalate;
cobalt_pipeline.escalate_virq = virq;
ipipe_request_irq(&xnsched_realtime_domain,
cobalt_pipeline.escalate_virq,
(ipipe_irq_handler_t)__xnsched_run_handler,
NULL, NULL);
ret = xnclock_init(cobalt_pipeline.clock_freq);
if (ret)
goto fail_clock;
return 0;
fail_clock:
ipipe_free_irq(&xnsched_realtime_domain,
cobalt_pipeline.escalate_virq);
ipipe_free_virq(cobalt_pipeline.escalate_virq);
fail_escalate:
ipipe_free_irq(ipipe_root_domain,
cobalt_pipeline.apc_virq);
ipipe_free_virq(cobalt_pipeline.apc_virq);
fail_apc:
ipipe_unregister_head(&xnsched_realtime_domain);
if (cobalt_machine.cleanup)
cobalt_machine.cleanup();
return ret;
}
int __init xnintr_mount(void)
{
int i;
for (i = 0; i < IPIPE_NR_IRQS; ++i)
xnlock_init(&vectors[i].lock);
return 0;
}
这里对xenomai获得所有中断的锁做一个初始化。
int xnpipe_mount(void)
{
struct xnpipe_state *state;
struct device *cldev;
int i;
for (state = &xnpipe_states[0];
state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
state->status = 0;
state->asyncq = NULL;
INIT_LIST_HEAD(&state->inq);
state->nrinq = 0;
INIT_LIST_HEAD(&state->outq);
state->nroutq = 0;
}
xnpipe_class = class_create(THIS_MODULE, "rtpipe");
if (IS_ERR(xnpipe_class)) {
printk(XENO_ERR "error creating rtpipe class, err=%ld\n",
PTR_ERR(xnpipe_class));
return -EBUSY;
}
for (i = 0; i < XNPIPE_NDEVS; i++) {
cldev = device_create(xnpipe_class, NULL,
MKDEV(XNPIPE_DEV_MAJOR, i),
NULL, "rtp%d", i);
if (IS_ERR(cldev)) {
printk(XENO_ERR
"can't add device class, major=%d, minor=%d, err=%ld\n",
XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
class_destroy(xnpipe_class);
return -EBUSY;
}
}
if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
printk(XENO_ERR
"unable to reserve major #%d for message pipe support\n",
XNPIPE_DEV_MAJOR);
return -EBUSY;
}
xnpipe_wakeup_apc =
xnapc_alloc("pipe_wakeup", &xnpipe_wakeup_proc, NULL);
return 0;
}
这边还有xnselect_mount()都是于ipipe有关,之后统一做一份。
static __init int sys_init(void)
{
void *heapaddr;
int ret;
if (sysheap_size_arg == 0)
sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
heapaddr = xnheap_vmalloc(sysheap_size_arg * 1024);
if (heapaddr == NULL ||
xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)) {
return -ENOMEM;
}
xnheap_set_name(&cobalt_heap, "system heap");
xnsched_init_all();
xnregistry_init();
/*
* If starting in stopped mode, do all initializations, but do
* not enable the core timer.
*/
if (realtime_core_state() == COBALT_STATE_WARMUP) {
ret = xntimer_grab_hardware();
if (ret) {
sys_shutdown();
return ret;
}
set_realtime_core_state(COBALT_STATE_RUNNING);
}
return 0;
}
首先是向linux申请自己的堆内存.
if (sysheap_size_arg == 0)
sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
这个CONFIG_XENO_OPT_SYS_HEAPSZ应该是make config时配置.
CONFIG_XENO_OPT_SYS_HEAPSZ:
The system heap is used for various internal allocations by
the Cobalt kernel. The size is expressed in Kilobytes.
Symbol: XENO_OPT_SYS_HEAPSZ [=4096]
Type : integer
Prompt: Size of system heap (Kb)
Location:
-> Xenomai/cobalt (XENOMAI [=y])
-> Sizes and static limits
Defined at kernel/xenomai/Kconfig:262
Depends on: XENOMAI [=y]
这里可以看到默认4096。
heapaddr = xnheap_vmalloc(sysheap_size_arg * 1024);
void *xnheap_vmalloc(size_t size)
{
/*
* We want memory used in real-time context to be pulled from
* ZONE_NORMAL, however we don't need it to be physically
* contiguous.
*
* 32bit systems which would need HIGHMEM for running a Cobalt
* configuration would also be required to support PTE
* pinning, which not all architectures provide. Moreover,
* pinning PTEs eagerly for a potentially (very) large amount
* of memory may quickly degrade performance.
*
* If using a different kernel/user memory split cannot be the
* answer for those configs, it's likely that basing such
* software on a 32bit system had to be wrong in the first
* place anyway.
*/
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
* GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
}
从内核虚拟地址申请一片连续的内存,大小为4g.
然后初始化xneomai的堆
xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)
int xnheap_init(struct xnheap *heap, void *membase, size_t size)
{
int n, nrpages;
spl_t s;
secondary_mode_only();
if (size > XNHEAP_MAX_HEAPSZ || !PAGE_ALIGNED(size))
return -EINVAL;
/* Reset bucket page lists, all empty. */
for (n = 0; n < XNHEAP_MAX_BUCKETS; n++)
heap->buckets[n] = -1U;
xnlock_init(&heap->lock);
nrpages = size >> XNHEAP_PAGE_SHIFT;
heap->pagemap = kzalloc(sizeof(struct xnheap_pgentry) * nrpages,
GFP_KERNEL);
if (heap->pagemap == NULL)
return -ENOMEM;
heap->membase = membase;
heap->usable_size = size;
heap->used_size = 0;
/*
* The free page pool is maintained as a set of ranges of
* contiguous pages indexed by address and size in rbtrees.
* Initially, we have a single range in those trees covering
* the whole memory we have been given for the heap. Over
* time, that range will be split then possibly re-merged back
* as allocations and deallocations take place.
*/
heap->size_tree = RB_ROOT;
heap->addr_tree = RB_ROOT;
release_page_range(heap, membase, size);
/* Default name, override with xnheap_set_name() */
ksformat(heap->name, sizeof(heap->name), "(%p)", heap);
xnlock_get_irqsave(&nklock, s);
list_add_tail(&heap->next, &heapq);
nrheaps++;
xnvfile_touch_tag(&vfile_tag);
xnlock_put_irqrestore(&nklock, s);
return 0;
}
xnheap的定义
struct xnheap {
void *membase;
struct rb_root addr_tree;
struct rb_root size_tree;
struct xnheap_pgentry *pagemap;
size_t usable_size;
size_t used_size;
u32 buckets[XNHEAP_MAX_BUCKETS];
char name[XNOBJECT_NAME_LEN];
DECLARE_XNLOCK(lock);
struct list_head next;
};
struct xnheap cobalt_heap;
这里应该是类似内存分页那样把xenomai向Linux申请的内存分成一块一块的,这里存放这些块的内存也要向linux申请.
nrpages = size >> XNHEAP_PAGE_SHIFT;
heap->pagemap = kzalloc(sizeof(struct xnheap_pgentry) * nrpages,
GFP_KERNEL);
之后初始化并进行管理,这里内存分配后面也单独写把。
然后是任务调度初始化和注册表服务初始化、cpu上的硬件计时器初始化。后面可能都需要单独写
##剩下的
剩下的mach_late_setup()
rtdm_init()
cobalt_init()
rtdm_fd_init()
搞不定。我先看内存分配和中断去了。