如果没啥思路的时候,就写写小代码吧。先理论,后实践。再看测试结果。理论全靠抄,代码自己敲。好像还挺押韵。实验计划如下:
第一篇,写一个通用框架,做到拿来就能用。
第二篇,实现mmap功能,内核中的read_buf和write_buf都映射到用户空间,然后呢,他们两个互相赋值,然后内核中通过定时器来改他们值,看他们会不会死掉,目的就是为了让他们死掉。
第三篇,实现mmap功能,内核中read_buf和write_buf使用DMA互相复制数据,然后呢,多开几个线程再写数据,争取写死它,然后再救活它。
并不是所有的DMA缓冲区都是驱动申请的,如果是驱动申请的,用一致性DMA缓冲区自然最方便,这直接考虑了Cache一致性问题。但是,在许多情况下,缓冲区来自内核的较上层(如网卡驱动中的网络报文、块设备驱动中要写入设备的数据等),上层很可能用普通的kmalloc()、__get_free_pages()等方法申请,这时候就要使用流式DMA映射。流式DMA缓冲区使用的一般步骤如下。
1)进行流式DMA映射。
2)执行DMA操作。
3)进行流式DMA去映射。
流式DMA映射操作在本质上大多就是进行Cache的使无效或清除操作,以解决Cache一致性问题。相对于一致性DMA映射而言,流式DMA映射的接口较为复杂。对于单个已经分配的缓冲区而言,使用dma_map_single()可实现流式DMA映射,该函数原型为:
dma_addr_t dma_map_single(struct device *dev, void *buffer, size_t size,
enum dma_data_direction direction);
如果映射成功,返回的是总线地址,否则,返回NULL。第4个参数为DMA的方向,可能的值包括DMA_TO_DEVICE、DMA_FROM_DEVICE、DMA_BIDIRECTIONAL和DMA_NONE。dma_map_single()的反函数为dma_unmap_single(),原型是:
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction);
通常情况下,设备驱动不应该访问unmap的流式DMA缓冲区,如果一定要这么做,可先使用如下函数获得DMA缓冲区的拥有权:
void dma_sync_single_for_cpu(struct device *dev, dma_handle_t bus_addr,
size_t size, enum dma_data_direction direction);
在驱动访问完DMA缓冲区后,应该将其所有权返还给设备,这可通过如下函数完成:
void dma_sync_single_for_device(struct device *dev, dma_handle_t bus_addr,
size_t size, enum dma_data_direction direction);
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define DEBUG_CSS(format,...)\
printk("info:%s:%s:%d: "format"\n",\
__FILE__,__func__,__LINE__,\
##__VA_ARGS__)
#define CSS_DMA_IMAGE_SIZE (1280*800)
struct _css_dev_{
struct file_operations _css_fops;
struct miscdevice misc;
void *read_buf;
void *write_buf;
dma_addr_t read_phys;
dma_addr_t write_phys;
struct spinlock slock;
struct mutex open_lock;
char name[10];
};
#define _to_css_dev_(file) (struct _css_dev_ *)container_of(file->f_op,struct _css_dev_,_css_fops)
static int _css_open(struct inode *inode, struct file *file)
{
struct _css_dev_ *css_dev = _to_css_dev_(file);
DEBUG_CSS("css_dev->name = %s",css_dev->name);
return 0;
}
static ssize_t _css_read(struct file *file, char __user *ubuf, size_t size, loff_t *ppos)
{
struct _css_dev_ *css_dev = _to_css_dev_(file);
DEBUG_CSS("css_dev->name = %s",css_dev->name);
return 0;
}
static int _css_mmap (struct file *file, struct vm_area_struct *vma)
{
return 0;
}
static ssize_t _css_write(struct file *file, const char __user *ubuf, size_t size, loff_t *ppos)
{
struct _css_dev_ *css_dev = _to_css_dev_(file);
DEBUG_CSS("css_dev->name = %s",css_dev->name);
return size;
}
static int _css_release (struct inode *inode, struct file *file)
{
struct _css_dev_ *css_dev = _to_css_dev_(file);
DEBUG_CSS("css_dev->name = %s",css_dev->name);
return 0;
}
static struct _css_dev_ _global_css_dev = {
.name = "lkmao",
._css_fops = {
.owner = THIS_MODULE,
.mmap = _css_mmap,
.open = _css_open,
.release = _css_release,
.read = _css_read,
.write = _css_write,
},
.misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "css_dma",
}
};
static int __init css_init(void)
{
struct _css_dev_ *css_dev = (struct _css_dev_ *)&_global_css_dev;
unsigned long flags;
css_dev->misc.fops = &css_dev->_css_fops;
mutex_init(&css_dev->open_lock);
spin_lock_init(&css_dev->slock);
if(misc_register(&css_dev->misc) != 0){
DEBUG_CSS("misc_register error");
return -EINVAL;
}
spin_lock_irqsave(&css_dev->slock,flags);
css_dev->read_buf = devm_kmalloc(css_dev->misc.this_device,CSS_DMA_IMAGE_SIZE,GFP_DMA|GFP_KERNEL);
if(css_dev->read_buf == NULL || IS_ERR(css_dev->read_buf)){
spin_unlock_irqrestore(&css_dev->slock,flags);
DEBUG_CSS("devm_kmalloc error");
return -ENOMEM;
}
css_dev->write_buf = devm_kmalloc(css_dev->misc.this_device,CSS_DMA_IMAGE_SIZE,GFP_DMA|GFP_KERNEL);
if(css_dev->write_buf == NULL || IS_ERR(css_dev->write_buf)){
spin_unlock_irqrestore(&css_dev->slock,flags);
DEBUG_CSS("devm_kmalloc error");
return -ENOMEM;
}
DEBUG_CSS("css_dev->read_buf = %p,css_dev->write_buf = %p",
css_dev->read_buf,css_dev->write_buf);
spin_unlock_irqrestore(&css_dev->slock,flags);
DEBUG_CSS("init ok");
return 0;
}
static void __exit css_exit(void)
{
struct _css_dev_ *css_dev = &_global_css_dev;
misc_deregister(&css_dev->misc);
DEBUG_CSS("exit ok");
}
module_init(css_init);
module_exit(css_exit);
MODULE_LICENSE("GPL");
insmod模块,得到94c00010和94d00010两个地址,这两个地址都不是页对齐的。mmap时要求内存必须是页对齐的,下一篇实验会分配页对齐的内存块。
root@ATK-IMX6U:~# insmod csi_single.ko
[21059.466174] info:/big/csi_driver/css_dma/csi_single.c:css_init:105: css_dev->read_buf = 94c00010,css_dev->write_buf = 94d00010
[21059.484515] info:/big/csi_driver/css_dma/csi_single.c:css_init:108: init ok
root@ATK-IMX6U:~#
在/dev目录下看到设备,主设备号是10,次设备号是53。名字叫css_dma
root@ATK-IMX6U:~# ls -ls /dev/css_dma
0 crw------- 1 root root 10, 53 Oct 13 18:07 /dev/css_dma
使用lsmod,看到新的模块csi_single。
root@ATK-IMX6U:~# lsmod
Module Size Used by
csi_single 2346 0
root@ATK-IMX6U:~#
/proc/misc文件,该文件中都是当前已经注册的misc设备,我们的css_dma次设备号是53。
root@ATK-IMX6U:/proc# cat /proc/misc
53 css_dma
56 ubi_ctrl
57 pxp_device
58 memory_bandwidth
59 network_throughput
60 network_latency
61 cpu_dma_latency
62 mxc_asrc
130 watchdog
237 loop-control
183 hw_random
134 apm_bios
229 fuse
235 autofs
63 rfkill
root@ATK-IMX6U:/proc#
读/dev/css_dma设备,看到open,read,release三个函数被依次调用了。
root@ATK-IMX6U:~# cat /dev/css_dma
[21376.955683] info:/big/csi_driver/css_dma/csi_single.c:_css_open:35: css_dev->name = lkmao
[21376.963997] info:/big/csi_driver/css_dma/csi_single.c:_css_read:41: css_dev->name = lkmao
[21376.973368] info:/big/csi_driver/css_dma/csi_single.c:_css_release:58: css_dev->name = lkmao
root@ATK-IMX6U:~#
写设备/dev/css_dma设备,看到open,write,release三个函数被依次调用了。
root@ATK-IMX6U:~# echo "hello" > /dev/css_dma
[ 34.225581] info:/big/csi_driver/css_dma/csi_single.c:_css_open:35: css_dev->name = lkmao
[ 34.233885] info:/big/csi_driver/css_dma/csi_single.c:_css_write:51: css_dev->name = lkmao
[ 34.243082] info:/big/csi_driver/css_dma/csi_single.c:_css_release:58: css_dev->name = lkmao
root@ATK-IMX6U:~#
思考一下,如果我在测试代码里,read函数返回size,write返回0,cat和echo测试都会产生什么效果?