DMA又称直接内存访问,是嵌入式Linux使用的一种控制器。外设存储器直接与主存之间独立传输,而不需要CPU参与,当DMA传输完毕,DMA控制器向CPU发送一个中断。
使用的场景是,当数据块非常大或频繁传送数据时,数据传送会消耗很大一部分CPU处理时间,此时可以使用DMA。
高速缓存中使用DMA最大的问题是缓存内容和存储器的内容可能不一致,分为两类:
一致性体系结构: (arm_coherent_dma_ops)一些处理器包含一种称为总线侦听或缓存侦听机制;
非一致性体系结构: (arm_dma_ops)因为非一致性体系结构不为一致性管理提供额外的硬件支持,因此需要软件来特殊处理。
DMA控制器接口定义到真实的DMA硬件功能接口,以下介绍DMA客户端的使用步骤:
DMA客户端的使用步骤:
1、分配DMA客户通道 ———— dma_request_chan();
2、设置客户端设备和控制器特定参数 ———— dmaengine_slave_config();
3、获取事务描述符 ———— dmaengine_prep_slave_sg();
4、提交事务 ———— dmaengine_submit()
5、发出待处理的请求并等待回调通知 ———— dma_sync_issue_pending()
驱动使用虚拟地址,通过ioremap()把物理地址映射到虚拟地址,DMA访问的内存在物理地址上是连续的,因此由kmalloc()(最高申请128K)申请或__get_free_pages()(最高申请8MB),而不能用vmalloc()。
另外还有一个概念,连续内存分配器(CMA)是为了分配大的、物理上连续的内存块而开发的。
DMA映射:指的是DMA缓存区及缓存器生产设备访问的地址组合。缓冲区通常在驱动程序初始化时候进行映射,在结束时候解除映射。
DMA映射分类:
dma_alloc_coherent() //一致性DMA映射:使用进行分配
dma_map_single() //流式DMA映射:使用进行分配
dma_free_coherent() //解除映射并释放这样的DMA区域函数
dma_unmap_single() //流式DMA清除,当DMA活动完成时,例如从中断中得知DMA传输完成,需要调用该函数。
dma_map_single() 函数调用dma_map_single_attrs(),后者又调用arm_dma_map_page(),来确保缓存缓存中的任何数据将被适当的丢弃或写回。
流式DMA映射规则:
缓冲区只能在指定的放下上使用;
被映射缓存区属于设备,不属于CPU;
在映射之前,用于向设备发送数据的缓存区必须包含的数据;
在DMA任然处于活动时,解除DMA缓存区的映射,将导致严重的系统不稳定问题。
内核DMA模块,驱动分配两个缓冲区,wbuf、rbuf。
驱动从用户态接收字符,并将它们存储在wbuf中。然后将设置一个DMA传输事务(内存到内存),将值从wbuf复制到rbuf。
#include
#include
#include
#include
#include
#include
#include
/* private structure */
//创建一个私有结构体,存储DMA设备的特有信息。
struct dma_private
{
struct miscdevice dma_misc_device; //创建misc数据结构
struct device *dev;
char *wbuf; //保存分配的缓存区
char *rbuf; //保存分配的缓存区
struct dma_chan *dma_m2m_chan; //将持有与dev设备关联的设备
struct completion dma_m2m_ok; //内核编程一种常见的模式是,在当前线程外启动某些活动,然后等待该活动完成。外部任务可以可以在其工作完成后调用(up信号量)
};
/* set the buffer size */
#define SDMA_BUF_SIZE (1024*63)
/* callback notification handling */
//请求等待回调函数
static void dma_m2m_callback(void *data)
{
struct dma_private *dma_priv = data;
dev_info(dma_priv->dev, "%s\n finished DMA transaction" ,__func__);
complete(&dma_priv->dma_m2m_ok); //创建一个回调函数来通知DMA事务完成情况。
if (*(dma_priv->rbuf) != *(dma_priv->wbuf)) {
dev_err(dma_priv->dev, "buffer copy failed!\n");
return -EINVAL;
}
dev_info(dma_priv->dev, "buffer copy passed!\n");
dev_info(dma_priv->dev, "wbuf is %s\n", dma_priv->wbuf);
dev_info(dma_priv->dev, "rbuf is %s\n", dma_priv->rbuf);
}
//与用户态进行通信,使用copy_from_user()获取写入字符设备的字符,将其存储在wbuf缓冲区。
static ssize_t sdma_write(struct file * file, const char __user * buf,
size_t count, loff_t * offset)
{
struct dma_async_tx_descriptor *dma_m2m_desc;
struct dma_device *dma_dev;
struct dma_private *dma_priv;
dma_cookie_t cookie;
dma_addr_t dma_src;
dma_addr_t dma_dst;
/* retrieve the private structure */
dma_priv = container_of(file->private_data,
struct dma_private, dma_misc_device);
dma_dev = dma_priv->dma_m2m_chan->device;
if(copy_from_user(dma_priv->wbuf, buf, count)){
return -EFAULT;
}
dev_info(dma_priv->dev, "The wbuf string is %s\n", dma_priv->wbuf);
dma_src = dma_map_single(dma_priv->dev, dma_priv->wbuf,
SDMA_BUF_SIZE, DMA_TO_DEVICE); //获取DMA地址dma_src 和 dma_dst,存储container_of()函数在sdma_write()中检索这些虚拟地址
dev_info(dma_priv->dev, "dma_src map obtained");
dma_dst = dma_map_single(dma_priv->dev, dma_priv->rbuf,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
dev_info(dma_priv->dev, "dma_dst map obtained");
dma_m2m_desc = dma_dev->device_prep_dma_memcpy(dma_priv->dma_m2m_chan,
dma_dst,
dma_src,
SDMA_BUF_SIZE,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT); //获取事务描述符,获取描述符后,必须使用dmaengine_submit
dev_info(dma_priv->dev, "successful descriptor obtained");
dma_m2m_desc->callback = dma_m2m_callback; //请求回调函数
dma_m2m_desc->callback_param = dma_priv;
init_completion(&dma_priv->dma_m2m_ok);
cookie = dmaengine_submit(dma_m2m_desc);
if (dma_submit_error(cookie)){
dev_err(dma_priv->dev, "Failed to submit DMA\n");
return -EINVAL;
};
dma_async_issue_pending(dma_priv->dma_m2m_chan);
wait_for_completion(&dma_priv->dma_m2m_ok);
dma_async_is_tx_complete(dma_priv->dma_m2m_chan, cookie, NULL, NULL);
dev_info(dma_priv->dev, "The rbuf string is %s\n", dma_priv->rbuf);
dma_unmap_single(dma_priv->dev, dma_src,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dma_priv->dev, dma_dst,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
return count;
}
struct file_operations dma_fops = {
write: sdma_write,
};
static int __init my_probe(struct platform_device *pdev)
{
int retval;
struct dma_private *dma_device;
dma_cap_mask_t dma_m2m_mask;
dev_info(&pdev->dev, "platform_probe enter\n");
dma_device = devm_kzalloc(&pdev->dev, sizeof(struct dma_private), GFP_KERNEL);
dma_device->dma_misc_device.minor = MISC_DYNAMIC_MINOR;
dma_device->dma_misc_device.name = "sdma_test";
dma_device->dma_misc_device.fops = &dma_fops;
dma_device->dev = &pdev->dev;
dma_device->wbuf = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!dma_device->wbuf) {
dev_err(&pdev->dev, "error allocating wbuf !!\n");
return -ENOMEM;
}
dma_device->rbuf = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!dma_device->rbuf) {
dev_err(&pdev->dev, "error allocating rbuf !!\n");
return -ENOMEM;
}
dma_cap_zero(dma_m2m_mask);
dma_cap_set(DMA_MEMCPY, dma_m2m_mask);
dma_device->dma_m2m_chan = dma_request_channel(dma_m2m_mask, 0, NULL);
if (!dma_device->dma_m2m_chan) {
dev_err(&pdev->dev, "Error opening the SDMA memory to memory channel\n");
return -EINVAL;
}
retval = misc_register(&dma_device->dma_misc_device);
if (retval) return retval;
platform_set_drvdata(pdev, dma_device);
dev_info(&pdev->dev, "platform_probe exit\n");
return 0;
}
static int __exit my_remove(struct platform_device *pdev)
{
struct dma_private *dma_device = platform_get_drvdata(pdev);
dev_info(&pdev->dev, "platform_remove enter\n");
misc_deregister(&dma_device->dma_misc_device);
dma_release_channel(dma_device->dma_m2m_chan);
dev_info(&pdev->dev, "platform_remove exit\n");
return 0;
}
static const struct of_device_id my_of_ids[] = {
{ .compatible = "arrow,sdma_m2m"},
{},
};
MODULE_DEVICE_TABLE(of, my_of_ids);
static struct platform_driver my_platform_driver = {
.probe = my_probe,
.remove = my_remove,
.driver = {
.name = "sdma_m2m",
.of_match_table = my_of_ids,
.owner = THIS_MODULE,
}
};
static int demo_init(void)
{
int ret_val;
pr_info("demo_init enter\n");
ret_val = platform_driver_register(&my_platform_driver);
if (ret_val !=0)
{
pr_err("platform value returned %d\n", ret_val);
return ret_val;
}
pr_info("demo_init exit\n");
return 0;
}
static void demo_exit(void)
{
pr_info("demo_exit enter\n");
platform_driver_unregister(&my_platform_driver);
pr_info("demo_exit exit\n");
}
module_init(demo_init);
module_exit(demo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("");
MODULE_DESCRIPTION("This is a SDMA memory to memory driver");
sdma_m2m {
compatible = "arrow,sdma_m2m";
};
nsmod sdma_imx_m2m.ko
echo abcdefg > /dev/sdma_test #写值到wbuf
rmmod sdma_imx_m2m.ko
有不同方式可以通过DMA发送多个缓冲区的内容。它们可以一次发送一个映射,也可以用分散/聚集方式,一次全部发送。
#include
#include
#include
#include
#include
#include
#include
#include
//创建分散列表结构和缓冲区指针变量
static dma_addr_t dma_dst;
static dma_addr_t dma_src;
static char *dma_dst_coherent;
static char *dma_src_coherent;
static unsigned int *wbuf, *wbuf2, *wbuf3;
static unsigned int *rbuf, *rbuf2, *rbuf3;
static struct dma_chan *dma_m2m_chan;
static struct completion dma_m2m_ok;
static struct scatterlist sg3[1],sg4[1];
static struct scatterlist sg[3],sg2[3];
#define SDMA_BUF_SIZE (63*1024)
static bool dma_m2m_filter(struct dma_chan *chan, void *param)
{
if (!imx_dma_is_general_purpose(chan))
return false;
chan->private = param;
return true;
}
static void dma_sg_callback(void *data)
{
pr_info("%s\n finished SG DMA transaction\n",__func__);
complete(&dma_m2m_ok);
}
static void dma_m2m_callback(void *data)
{
pr_info("%s\n finished DMA coherent transaction\n" ,__func__);
complete(&dma_m2m_ok);
}
//将DMA操作添加到挂起队列,发出待处理DMA请求,设置并等待回调通知。
static ssize_t sdma_write(struct file * filp, const char __user * buf,
size_t count, loff_t * offset)
{
unsigned int *index1, *index2, *index3, i;
struct dma_async_tx_descriptor *dma_m2m_desc;
struct dma_device *dma_dev;
dma_dev = dma_m2m_chan->device;
pr_info("sdma_write is called.\n");
index1 = wbuf;
index2 = wbuf2;
index3 = wbuf3;
for (i=0; i<SDMA_BUF_SIZE/4; i++) {
*(index1 + i) = 0x12345678;
}
for (i=0; i<SDMA_BUF_SIZE/4; i++) {
*(index2 + i) = 0x87654321;
}
for (i=0; i<SDMA_BUF_SIZE/4; i++) {
*(index3 + i) = 0xabcde012;
}
init_completion(&dma_m2m_ok);
if(copy_from_user(dma_src_coherent, buf, count)){
return -EFAULT;
}
pr_info ("The string is %s\n", dma_src_coherent);
sg_init_table(sg, 3);
sg_set_buf(&sg[0], wbuf, SDMA_BUF_SIZE);
sg_set_buf(&sg[1], wbuf2, SDMA_BUF_SIZE);
sg_set_buf(&sg[2], wbuf3, SDMA_BUF_SIZE);
dma_map_sg(dma_dev->dev, sg, 3, DMA_TO_DEVICE);
sg_init_table(sg2, 3);
sg_set_buf(&sg2[0], rbuf, SDMA_BUF_SIZE);
sg_set_buf(&sg2[1], rbuf2, SDMA_BUF_SIZE);
sg_set_buf(&sg2[2], rbuf3, SDMA_BUF_SIZE);
dma_map_sg(dma_dev->dev, sg2, 3, DMA_FROM_DEVICE);
sg_init_table(sg3, 1);
sg_set_buf(sg3, dma_src_coherent, SDMA_BUF_SIZE);
dma_map_sg(dma_dev->dev, sg3, 1, DMA_TO_DEVICE);
sg_init_table(sg4, 1);
sg_set_buf(sg4, dma_dst_coherent, SDMA_BUF_SIZE);
dma_map_sg(dma_dev->dev, sg4, 1, DMA_FROM_DEVICE);
dma_m2m_desc = dma_dev->device_prep_dma_sg(dma_m2m_chan, sg2, 3, sg, 3, 0);
dma_m2m_desc->callback = dma_sg_callback;
dmaengine_submit(dma_m2m_desc);
dma_async_issue_pending(dma_m2m_chan);
wait_for_completion(&dma_m2m_ok);
dma_unmap_sg(dma_dev->dev, sg, 3, DMA_TO_DEVICE);
dma_unmap_sg(dma_dev->dev, sg2, 3, DMA_FROM_DEVICE);
for (i=0; i<SDMA_BUF_SIZE/4; i++) {
if (*(rbuf+i) != *(wbuf+i)) {
pr_info("buffer 1 copy failed!\n");
return -EINVAL;
}
}
pr_info("buffer 1 copy passed!\n");
for (i=0; i<SDMA_BUF_SIZE/4; i++) {
if (*(rbuf2+i) != *(wbuf2+i)) {
pr_info("buffer 2 copy failed!\n");
return -EINVAL;
}
}
pr_info("buffer 2 copy passed!\n");
for (i=0; i<SDMA_BUF_SIZE/4; i++) {
if (*(rbuf3+i) != *(wbuf3+i)) {
pr_info("buffer 3 copy failed!\n");
return -EINVAL;
}
}
pr_info("buffer 3 copy passed!\n");
reinit_completion(&dma_m2m_ok);
dma_m2m_desc = dma_dev->device_prep_dma_sg(dma_m2m_chan, sg4, 1, sg3, 1, 0);
dma_m2m_desc->callback = dma_m2m_callback;
dmaengine_submit(dma_m2m_desc);
dma_async_issue_pending(dma_m2m_chan);
wait_for_completion(&dma_m2m_ok);
dma_unmap_sg(dma_dev->dev, sg3, 1, DMA_TO_DEVICE);
dma_unmap_sg(dma_dev->dev, sg4, 1, DMA_FROM_DEVICE);
if (*(dma_src_coherent) != *(dma_dst_coherent)) {
pr_info("buffer copy failed!\n");
return -EINVAL;
}
pr_info("buffer coherent sg copy passed!\n");
pr_info("dma_src_coherent is %s\n", dma_src_coherent);
pr_info("dma_dst_coherent is %s\n", dma_dst_coherent);
return count;
}
struct file_operations dma_fops = {
write: sdma_write,
};
static struct miscdevice dma_miscdevice = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sdma_test",
.fops = &dma_fops,
};
static int __init my_probe(struct platform_device *pdev)
{
int retval;
dma_cap_mask_t dma_m2m_mask;
struct imx_dma_data m2m_dma_data = {0};
struct dma_slave_config dma_m2m_config = {0};
pr_info("platform_probe enter\n");
retval = misc_register(&dma_miscdevice);
if (retval) return retval;
pr_info("mydev: got minor %i\n",dma_miscdevice.minor);
wbuf = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL); //分配六个缓冲区
if(!wbuf) {
pr_info("error wbuf !!!!!!!!!!!\n");
return -ENOMEM;
}
wbuf2 = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!wbuf2) {
pr_info("error wbuf !!!!!!!!!!!\n");
return -ENOMEM;
}
wbuf3 = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!wbuf3) {
pr_info("error wbuf2 !!!!!!!!!!!\n");
return -ENOMEM;
}
rbuf = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!rbuf) {
pr_info("error rbuf !!!!!!!!!!!\n");
return -ENOMEM;
}
rbuf2 = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!rbuf2) {
pr_info("error rbuf2 !!!!!!!!!!!\n");
return -ENOMEM;
}
rbuf3 = devm_kzalloc(&pdev->dev, SDMA_BUF_SIZE, GFP_KERNEL);
if(!rbuf3) {
pr_info("error rbuf2 !!!!!!!!!!!\n");
return -ENOMEM;
}
dma_dst_coherent = dma_alloc_coherent(&pdev->dev, SDMA_BUF_SIZE,
&dma_dst, GFP_DMA);
if (dma_dst_coherent == NULL) {
pr_err("dma_alloc_coherent failed\n");
return -ENOMEM;
}
dma_src_coherent = dma_alloc_coherent(&pdev->dev, SDMA_BUF_SIZE,
&dma_src, GFP_DMA);
if (dma_src_coherent == NULL) {
dma_free_coherent(&pdev->dev, SDMA_BUF_SIZE,
dma_dst_coherent, dma_dst);
pr_err("dma_alloc_coherent failed\n");
return -ENOMEM;
}
dma_cap_zero(dma_m2m_mask);
dma_cap_set(DMA_MEMCPY, dma_m2m_mask);
m2m_dma_data.peripheral_type = IMX_DMATYPE_MEMORY;
m2m_dma_data.priority = DMA_PRIO_HIGH;
dma_m2m_chan = dma_request_channel(dma_m2m_mask, dma_m2m_filter, &m2m_dma_data);
if (!dma_m2m_chan) {
pr_err("Error opening the SDMA memory to memory channel\n");
return -EINVAL;
}
dma_m2m_config.direction = DMA_MEM_TO_MEM;
dma_m2m_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dmaengine_slave_config(dma_m2m_chan, &dma_m2m_config);
return 0;
}
static int __exit my_remove(struct platform_device *pdev)
{
misc_deregister(&dma_miscdevice);
dma_release_channel(dma_m2m_chan);
dma_free_coherent(&pdev->dev, SDMA_BUF_SIZE,
dma_dst_coherent, dma_dst);
dma_free_coherent(&pdev->dev, SDMA_BUF_SIZE,
dma_src_coherent, dma_src);
pr_info("platform_remove exit\n");
return 0;
}
static const struct of_device_id my_of_ids[] = {
{ .compatible = "arrow,sdma_m2m"},
{},
};
MODULE_DEVICE_TABLE(of, my_of_ids);
static struct platform_driver my_platform_driver = {
.probe = my_probe,
.remove = my_remove,
.driver = {
.name = "sdma_m2m",
.of_match_table = my_of_ids,
.owner = THIS_MODULE,
}
};
static int __init demo_init(void)
{
int ret_val;
ret_val = platform_driver_register(&my_platform_driver);
if (ret_val !=0)
{
pr_err("platform value returned %d\n", ret_val);
return ret_val;
}
return 0;
}
static void __exit demo_exit(void)
{
platform_driver_unregister(&my_platform_driver);
}
module_init(demo_init);
module_exit(demo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("");
MODULE_DESCRIPTION("This is a SDMA scatter/gather memory to memory driver");
对于较大的缓冲区,用copy_to_user()和copy_from_user()复制数据效率低,也没有利用DMA搬移数据优势。Linux允许内核态和用户态接口框架,用户态DMA被定义为能够访问缓冲区以进行DMA传输,并从用户态应用中进行控制DMA传输。
用户进程可以显示的进行内存映射,mmap()函数,mmap()文件操作允许将设备驱动程序的内存映射到用户态进程地址空间,将虚拟地址转换成物理地址空间。
sdma_ioctl()回调函数
sdma_open()函数
sdma_ioctl()函数
设备树:
sdma_m2m {
compatible = “arrow, sdma_m2m”,
};
#include
#include
#include
#include
#include
#include
#include
#include
struct dma_private
{
struct miscdevice dma_misc_device;
struct device *dev;
char *wbuf;
char *rbuf;
struct dma_chan *dma_m2m_chan;
struct completion dma_m2m_ok;
dma_addr_t dma_src;
dma_addr_t dma_dst;
};
#define SDMA_BUF_SIZE (1024*63)
static void dma_m2m_callback(void *data)
{
struct dma_private *dma_priv = data;
dev_info(dma_priv->dev, "%s\n finished DMA transaction" ,__func__);
complete(&dma_priv->dma_m2m_ok);
}
static int sdma_open(struct inode * inode, struct file * file)
{
struct dma_private *dma_priv;
dma_priv = container_of(file->private_data,
struct dma_private, dma_misc_device);
dma_priv->wbuf = kzalloc(SDMA_BUF_SIZE, GFP_DMA);
if(!dma_priv->wbuf) {
dev_err(dma_priv->dev, "error allocating wbuf !!\n");
return -ENOMEM;
}
dma_priv->rbuf = kzalloc(SDMA_BUF_SIZE, GFP_DMA);
if(!dma_priv->rbuf) {
dev_err(dma_priv->dev, "error allocating rbuf !!\n");
return -ENOMEM;
}
dma_priv->dma_src = dma_map_single(dma_priv->dev, dma_priv->wbuf,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
return 0;
}
static long sdma_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct dma_async_tx_descriptor *dma_m2m_desc;
struct dma_device *dma_dev;
struct dma_private *dma_priv;
dma_cookie_t cookie;
dma_priv = container_of(file->private_data,
struct dma_private, dma_misc_device);
dma_dev = dma_priv->dma_m2m_chan->device;
dma_priv->dma_src = dma_map_single(dma_priv->dev, dma_priv->wbuf,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
dma_priv->dma_dst = dma_map_single(dma_priv->dev, dma_priv->rbuf,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
dma_m2m_desc = dma_dev->device_prep_dma_memcpy(dma_priv->dma_m2m_chan,
dma_priv->dma_dst,
dma_priv->dma_src,
SDMA_BUF_SIZE,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
dev_info(dma_priv->dev, "successful descriptor obtained");
dma_m2m_desc->callback = dma_m2m_callback;
dma_m2m_desc->callback_param = dma_priv;
init_completion(&dma_priv->dma_m2m_ok);
cookie = dmaengine_submit(dma_m2m_desc);
if (dma_submit_error(cookie)){
dev_err(dma_priv->dev, "Failed to submit DMA\n");
return -EINVAL;
};
dma_async_issue_pending(dma_priv->dma_m2m_chan);
wait_for_completion(&dma_priv->dma_m2m_ok);
dma_async_is_tx_complete(dma_priv->dma_m2m_chan, cookie, NULL, NULL);
dma_unmap_single(dma_priv->dev, dma_priv->dma_src,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dma_priv->dev, dma_priv->dma_dst,
SDMA_BUF_SIZE, DMA_TO_DEVICE);
if (*(dma_priv->rbuf) != *(dma_priv->wbuf)) {
dev_err(dma_priv->dev, "buffer copy failed!\n");
return -EINVAL;
}
dev_info(dma_priv->dev, "buffer copy passed!\n");
dev_info(dma_priv->dev, "wbuf is %s\n", dma_priv->wbuf);
dev_info(dma_priv->dev, "rbuf is %s\n", dma_priv->rbuf);
kfree(dma_priv->wbuf);
kfree(dma_priv->rbuf);
return 0;
}
static int sdma_mmap(struct file *file, struct vm_area_struct *vma) {
struct dma_private *dma_priv;
dma_priv = container_of(file->private_data,
struct dma_private, dma_misc_device);
if(remap_pfn_range(vma, vma->vm_start, dma_priv->dma_src >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
struct file_operations dma_fops = {
.owner = THIS_MODULE,
.open = sdma_open,
.unlocked_ioctl = sdma_ioctl,
.mmap = sdma_mmap,
};
static int __init my_probe(struct platform_device *pdev)
{
int retval;
struct dma_private *dma_device;
dma_cap_mask_t dma_m2m_mask;
dev_info(&pdev->dev, "platform_probe enter\n");
dma_device = devm_kzalloc(&pdev->dev, sizeof(struct dma_private), GFP_KERNEL);
dma_device->dma_misc_device.minor = MISC_DYNAMIC_MINOR;
dma_device->dma_misc_device.name = "sdma_test";
dma_device->dma_misc_device.fops = &dma_fops;
dma_device->dev = &pdev->dev;
dma_cap_zero(dma_m2m_mask);
dma_cap_set(DMA_MEMCPY, dma_m2m_mask);
dma_device->dma_m2m_chan = dma_request_channel(dma_m2m_mask, 0, NULL);
if (!dma_device->dma_m2m_chan) {
dev_err(&pdev->dev, "Error opening the SDMA memory to memory channel\n");
return -EINVAL;
}
retval = misc_register(&dma_device->dma_misc_device);
if (retval) return retval;
platform_set_drvdata(pdev, dma_device);
dev_info(&pdev->dev, "platform_probe exit\n");
return 0;
}
static int __exit my_remove(struct platform_device *pdev)
{
struct dma_private *dma_device = platform_get_drvdata(pdev);
dev_info(&pdev->dev, "platform_remove enter\n");
misc_deregister(&dma_device->dma_misc_device);
dma_release_channel(dma_device->dma_m2m_chan);
dev_info(&pdev->dev, "platform_remove exit\n");
return 0;
}
static const struct of_device_id my_of_ids[] = {
{ .compatible = "arrow,sdma_m2m"},
{},
};
MODULE_DEVICE_TABLE(of, my_of_ids);
static struct platform_driver my_platform_driver = {
.probe = my_probe,
.remove = my_remove,
.driver = {
.name = "sdma_m2m",
.of_match_table = my_of_ids,
.owner = THIS_MODULE,
}
};
static int demo_init(void)
{
int ret_val;
pr_info("demo_init enter\n");
ret_val = platform_driver_register(&my_platform_driver);
if (ret_val !=0)
{
pr_err("platform value returned %d\n", ret_val);
return ret_val;
}
pr_info("demo_init exit\n");
return 0;
}
static void demo_exit(void)
{
pr_info("demo_exit enter\n");
platform_driver_unregister(&my_platform_driver);
pr_info("demo_exit exit\n");
}
module_init(demo_init);
module_exit(demo_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(" ");
MODULE_DESCRIPTION("This is a SDMA mmap memory to memory driver");
#include
#include
#include
#include
#include
#define SDMA_BUF_SIZE (1024*63)
int main(void)
{
char *virtaddr;
char phrase[128];
int my_dev = open("/dev/sdma_test", O_RDWR);
if (my_dev < 0) {
perror("Fail to open device file: /dev/sdma_test.");
} else {
printf("Enter phrase :\n");
scanf("%[^\n]%*c", phrase);
virtaddr = (char *)mmap(0, SDMA_BUF_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, my_dev, 0);
strcpy(virtaddr, phrase);
ioctl(my_dev, NULL);
close(my_dev);
}
return 0;
}
insmod sdma_imx_mmap.ko //加载模块
./sdma //DMA物理地址内存映射到用户虚拟空间
rmmod sdma_imx_mmap.ko //卸载模块
感谢阅读,祝君成功!
-by aiziyou