还是不会从blog导入文章,只好复制粘贴了,原文在my blog:http://blog.chinaunix.net/u3/94771/showart_1945422.html 基于NETLINK的内核与用户空间共享内存的实现 author:bripengandre Email:[email protected] 一、前言前些日子,开发中用到了netlink来实现内核与用户空间共享内存,写点笔记与大家分享。因为我对这块也不了解,写出来的东西一定存在很多错误,请大家批评指正~ 内核与用户空间共享内存的关键是,用户空间必须得知共享内存的起始地址,这就要求内核空间应该有一种通信机制来通知用户空间。已经有Godbach版主等人 用proc文件系统实现了(可以google '共享内存 内核 用户空间'),很显然任何内核空间与用户空间的通信方法都可资利用。本文主要讲基于NETLINK机制的实现。 二、NETLINK简介 netlink在linux的内核与用户空间通信中用得很多(但具体例子我举不出,因为我不清楚~~请google之),其最大优势是接口与网络编程中的socket相似,且内核要主动发信息给用户空间很方便。 但通过实践,我发现netlink通信机制的最大弊病在于其在各内核版本中接口变化太大,让人难以适从(可从后文列出的源码中的kernel_receive的声明窥一斑)。 既然涉及到内核与用户空间两个空间,就应该在两个空间各有一套接口。用户空间的接口很简单,与一般的socket接口相似,内核空间则稍先复杂,但简单 的应用只需简单地了解即可:首先也是建立描述符,建立描述符时会注册一个回调函数(源码中的kernel_receive即是),然后当用户空间有消息发 过来时,我们的函数将被调用,显然在这个函数里我们可做相应的处理;当内核要主动发消息给用户进程时,直接调用一个类send函数即可 (netlink_unicast系列函数)。当然这个过程中,有很多结构体变量需要填充。具体用法请google,我差不多忘光了~。 三、基于netlink的共享内存 这里的共享内存是指内核与用户空间,而不是通常的用户进程间的。 大概流程如下。 内核:__get_free__pages分配连续的物理内存页(貌似返回的其实是虚拟地址)-->SetPageReserved每一页(每一 页都需这个操作,参见源码)-->如果用户空间通过netlink要求获取共享内存的起始物理地址,将__get_free__pages返回的地 址__pa下发给用户空间。 用户空间:open "/dev/shm"(一个读写物理内存的设备,具体请google"linux读写物理内存")-->发netlink消息给内核,得到共享内存的起始物理地址-->mmap上步得到的物理地址。 四、源码说明 正如二中提到的,netlink接口在各版本中接口变化很大,本人懒惰及时间紧,只实验了比较新的内核2.6.25,源码如需移植到老版本上,需要一些改动,敬请原谅。 另外,由于本源码是从一个比较大的程序里抠出来的,所以命名什么的可能有点怪异~ 源码包括shm_k.c(内核模块)和用户空间程序(shm_u.c)两部分。shm_k.c模块的工作是:分配8KB内存供共享,并把前几十个字节置 为“hello, use share memory with netlink"字样。shm_u.c工作是:读取共享内存的前几十个字节,将内容输出在stdout上。 特别说明: 该程序只适用于2.6.25左右的新版本! 用__get_free_pages分配连续内存时,宜先用get_order获取页数,然后需将各页都SetPageReserved下,同样地,释放内存时,需要对每一页调用ClearPageReserved。 我成功用该程序分配了4MB共享内存,运行还比较稳定。 因为linux内核的默认设置,一般情况下用get_free_pages只能分配到4MB内存左右,如需增大,可能需改相应的参数并重新编译内核。 五、内核源码 1、common.h(内核与用户空间都用到的头文件) #ifndef _COMMON_H_ #define _COMMON_H_ /* protocol type */ #define SHM_NETLINK 30 /* message type */ #define SHM_GET_SHM_INFO 1 /* you can add othe message type here */ #define SHM_WITH_NETLINK "hello, use share memory with netlink" typedef struct _nlk_msg { union _data { struct _shm_info { uint32_t mem_addr; uint32_t mem_size; }shm_info; /* you can add other content here */ }data; }nlk_msg_t; #endif /* _COMMON_H_ */ 2、shm_k.c(内核模块) #include #include #include #include #include #include #include #include #include "common.h" #define SHM_TEST_DEBUG #ifdef SHM_TEST_DEBUG #define SHM_DBG(args...) printk(KERN_DEBUG "SHM_TEST: " args) #else #define SHM_DBG(args...) #endif #define SHM_ERR(args...) printk(KERN_ERR "SHM_TEST: " args) static struct _glb_para { struct _shm_para { uint32_t mem_addr; /* memory starting address */ uint32_t mem_size; /* memory size */ uint32_t page_cnt; /* memory page count*/ uint16_t order; uint8_t mem_init_flag; /* 0, init failed; 1, init successful */ }shm_para; struct sock *nlfd; /* netlink descriptor */ uint32_t pid; /* user-space process's pid */ rwlock_t lock; }glb_para; static void init_glb_para(void); static int init_netlink(void); static void kernel_receive(struct sk_buff* __skb); static int nlk_get_mem_addr(struct nlmsghdr *pnhdr); static void clean_netlink(void); static int init_shm(void); static void clean_shm(void); static int __init init_shm_test(void); static void clean_shm_test(void); static void init_glb_para(void) { memset(&glb_para, 0, sizeof(glb_para)); } static int init_netlink(void) { rwlock_init(&glb_para.lock); SHM_DBG("linux version:%08x/n", LINUX_VERSION_CODE); #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,1 ) glb_para.nlfd = netlink_kernel_create(SHM_NETLINK, kernel_receive); #elif(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) glb_para.nlfd = netlink_kernel_create(SHM_NETLINK, 0, kernel_receive, THIS_MODULE)); #else glb_para.nlfd = netlink_kernel_create(&init_net, SHM_NETLINK, 0, kernel_receive, NULL, THIS_MODULE); #endif if(glb_para.nlfd == NULL) { SHM_ERR("init_netlink::netlink_kernel_create error/n" ; return (-1); } return (0); } static void kernel_receive(struct sk_buff* __skb) { struct sk_buff *skb; struct nlmsghdr *nlh = NULL; int invalid; SHM_DBG("begin kernel_receive/n" ; skb = skb_get(__skb); invalid = 0; if(skb->len >= sizeof(struct nlmsghdr)) { nlh = (struct nlmsghdr *)skb->data; if((nlh->nlmsg_len >= sizeof(struct nlmsghdr)) && (skb->len >= nlh->nlmsg_len)) { switch(nlh->nlmsg_type) { case SHM_GET_SHM_INFO: SHM_DBG("receiv TA_GET_SHM_INFO/n" ; nlk_get_mem_addr(nlh); break; default: break; } } } kfree_skb(skb); } static int nlk_get_mem_addr(struct nlmsghdr *pnhdr) { int ret, size; unsigned char *old_tail; struct sk_buff *skb; struct nlmsghdr *nlh; struct _nlk_msg *p; glb_para.pid = pnhdr->nlmsg_pid; /* get the user-space process's pid */ size = NLMSG_SPACE(sizeof(struct _nlk_msg)); /* compute the needed memory size */ if( (skb = alloc_skb(size, GFP_ATOMIC)) == NULL) /* allocate memory */ { SHM_DBG("nlk_hello_test::alloc_skb error./n" ; return (-1); } old_tail = skb->tail; nlh = NLMSG_PUT(skb, 0, 0, SHM_GET_SHM_INFO, size-sizeof(struct nlmsghdr)); /* put netlink message structure into memory */ p = NLMSG_DATA(nlh); /* get netlink message body pointer */ p->data.shm_info.mem_addr = __pa(glb_para.shm_para.mem_addr); /* __pa:convert virtual address to physical address, which needed by /dev/mem */ p->data.shm_info.mem_size = glb_para.shm_para.mem_size; nlh->nlmsg_len = skb->tail - old_tail; NETLINK_CB(skb).pid = 0; /* from kernel */ NETLINK_CB(skb).dst_group = 0; read_lock_bh(&glb_para.lock); ret = netlink_unicast(glb_para.nlfd, skb, glb_para.pid, MSG_DONTWAIT); /* send message to user-space process */ read_unlock_bh(&glb_para.lock); SHM_DBG("nlk_get_mem_addr ok./n" ; return (ret); nlmsg_failure: SHM_DBG("nlmsg_failure/n" ; if(skb) { kfree_skb(skb); } return (-1); } static void clean_netlink(void) { if(glb_para.nlfd != NULL) { sock_release(glb_para.nlfd->sk_socket); } } static int init_shm(void) { int i; char *p; uint32_t page_addr; glb_para.shm_para.order = get_order(1024* ; /* allocate 8kB */ glb_para.shm_para.mem_addr = __get_free_pages(GFP_KERNEL, glb_para.shm_para.order); if(glb_para.shm_para.mem_addr == 0) { SHM_ERR("init_mem_pool::__get_free_pages error./n" ; glb_para.shm_para.mem_init_flag = 0; return (-1); } else { glb_para.shm_para.page_cnt = (1< glb_para.shm_para.mem_init_flag = 1; page_addr = glb_para.shm_para.mem_addr; SHM_DBG("size=%08x, page_cnt=%d/n", glb_para.shm_para.mem_size, glb_para.shm_para.page_cnt); for(i = 0; i < glb_para.shm_para.page_cnt; i++) { SetPageReserved(virt_to_page(page_addr)); /* reserved for used */ page_addr += PAGE_SIZE; } p = (char *)glb_para.shm_para.mem_addr; strcpy(p, SHM_WITH_NETLINK); /* write */ SHM_DBG("__get_free_pages ok./n" ; } return (0); } static void clean_shm(void) { int i; uint32_t page_addr; if(glb_para.shm_para.mem_init_flag == 1) { page_addr = glb_para.shm_para.mem_addr; for(i = 0; i < glb_para.shm_para.page_cnt; i++) { ClearPageReserved(virt_to_page(page_addr)); page_addr += PAGE_SIZE; } free_pages(glb_para.shm_para.mem_addr, glb_para.shm_para.order); } } static int __init init_shm_test(void) { init_glb_para(); if(init_netlink() < 0) { SHM_ERR("init_shm_test::init_netlink error./n" ; return (-1); } SHM_DBG("init_netlink ok./n" ; if(init_shm() < 0) { SHM_ERR("init_shm_test::init_mem_pool error./n"); clean_shm_test(); return (-1); } SHM_DBG("init_mem_pool ok./n"); return (0); } static void clean_shm_test(void) { clean_shm(); clean_netlink(); SHM_DBG("ta_exit ok./n"); } module_init(init_shm_test); module_exit(clean_shm_test); MODULE_LICENSE("GPL"); MODULE_AUTHOR("bripengandre ( [email protected] )"); MODULE_DESCRIPTION("Memory Share between user-space and kernel-space with netlink."); 3、shm_u.c(用户进程) #include #include #include #include #include #include #include #include #include #include #include #include "common.h" /* netlink */ #define MAX_SEND_BUF_SIZE 2500 #define MAX_RECV_BUF_SIZE 2500 #define SHM_TEST_DEBUG #ifdef SHM_TEST_DEBUG #define SHM_DBG(args...) fprintf(stderr, "SHM_TEST: " args) #else #define SHM_DBG(args...) #endif #define SHM_ERR(args...) fprintf(stderr, "SHM_TEST: " args) struct _glb_para { struct _shm_para { uint32_t mem_addr; uint32_t mem_size; }shm_para; int nlk_fd; char send_buf[MAX_SEND_BUF_SIZE]; char recv_buf[MAX_RECV_BUF_SIZE]; }glb_para; static void init_glb_para(void); static int create_nlk_connect(void); static int nlk_get_shm_info(void); static int init_mem_pool(void); int main(int argc ,char *argv[]) { char *p; init_glb_para(); if(create_nlk_connect() < 0) { SHM_ERR("main::create_nlk_connect error./n"); return (1); } if(nlk_get_shm_info() < 0) { SHM_ERR("main::nlk_get_shm_info error./n"); return (1); } init_mem_pool(); /* printf the first 30 bytes */ p = (char *)glb_para.shm_para.mem_addr; p[strlen(SHM_WITH_NETLINK)] = '/0'; printf("the first 30 bytes of shm are: %s/n", p); return (0); } static void init_glb_para(void) { memset(&glb_para, 0, sizeof(glb_para)); } static int create_nlk_connect(void) { int sockfd; struct sockaddr_nl local; sockfd = socket(PF_NETLINK, SOCK_RAW, SHM_NETLINK); if(sockfd < 0) { SHM_ERR("create_nlk_connect::socket error:%s/n", strerror(errno)); return (-1); } memset(&local, 0, sizeof(local)); local.nl_family = AF_NETLINK; local.nl_pid = getpid(); local.nl_groups = 0; if(bind(sockfd, (struct sockaddr*)&local, sizeof(local)) != 0) { SHM_ERR("create_nlk_connect::bind error: %s/n", strerror(errno)); return -1; } glb_para.nlk_fd = sockfd; return (sockfd); } static int nlk_get_shm_info(void) { struct nlmsghdr *nlh; struct _nlk_msg *p; struct sockaddr_nl kpeer; int recv_len, kpeerlen; memset(&kpeer, 0, sizeof(kpeer)); kpeer.nl_family = AF_NETLINK; kpeer.nl_pid = 0; kpeer.nl_groups = 0; memset(glb_para.send_buf, 0, sizeof(glb_para.send_buf)); nlh = (struct nlmsghdr *)glb_para.send_buf; nlh->nlmsg_len = NLMSG_SPACE(0); nlh->nlmsg_flags = 0; nlh->nlmsg_type = SHM_GET_SHM_INFO; nlh->nlmsg_pid = getpid(); sendto(glb_para.nlk_fd, nlh, nlh->nlmsg_len, 0, (struct sockaddr*)&kpeer, sizeof(kpeer)); memset(glb_para.send_buf, 0, sizeof(glb_para.send_buf)); kpeerlen = sizeof(struct sockaddr_nl); recv_len = recvfrom(glb_para.nlk_fd, glb_para.recv_buf, sizeof(glb_para.recv_buf), 0, (struct sockaddr*)&kpeer, &kpeerlen); p = NLMSG_DATA((struct nlmsghdr *) glb_para.recv_buf); SHM_DBG("%d, errno=%d.%s, %08x, %08x/n", recv_len, errno, strerror(errno), p->data.shm_info.mem_addr, p->data.shm_info.mem_size); glb_para.shm_para.mem_addr = p->data.shm_info.mem_addr; glb_para.shm_para.mem_size = p->data.shm_info.mem_size; return (0); } static int init_mem_pool(void) { int map_fd; void *map_addr; map_fd = open("/dev/mem", O_RDWR); if(map_fd < 0) { SHM_ERR("init_mem_pool: pen %s error: %s/n", "/dev/mem", strerror(errno)); return (-1); } map_addr = mmap(0, glb_para.shm_para.mem_size, PROT_READ|PROT_WRITE, MAP_SHARED, map_fd, glb_para.shm_para.mem_addr); if(map_addr == NULL) { SHM_ERR("init_mem_pool::mmap error: %s/n", strerror(errno)); return (-1); } glb_para.shm_para.mem_addr = (uint32_t)map_addr; return (0); } 4、Makefile #PREFIX = powerpc-e300c3-linux-gnu- CC ?= $(PREFIX)gcc KERNELDIR ?= /lib/modules/`uname -r`/build all: modules app obj-m:= shm_k.o module-objs := shm_k.c modules: make -C $(KERNELDIR) M=`pwd` modules app: shm_u.o $(CC) -o shm_u shm_u.c clean: rm -rf *.o Module.symvers modules.order shm_u shm_k.ko shm_k.mod.c .tmp_versions .shm_k.* |