我的之前两篇文章有介绍到上位机软件的逻辑该如何控制,驱动代码的框架是怎样的,驱动的整体逻辑在linux系统中是如何实现的,感兴趣的小伙伴可以去考古。
Xilinx XDMA 上位机应用程序控制逻辑
Xilinx XDMA驱动代码分析及用法
XDMA 传输的核心部分代码是cdev_sgdma.c ,利用DMA进行数据传输,传输方式为sgdma的传输方式,
Scatter-Gather DMA ,分散/集中映射是流式 DMA 映射的一个特例。它将几个缓冲区集中到一起进行一次映射,并在一个 DMA 操作中传送所有数据。这些分散的缓冲区由分散表结构scatterlist来描述,多个分散的缓冲区的分散表结构组成缓冲区的struct scatterlist数组。
[DMA技术和及其SG模式](https://blog.csdn.net/chinamaoge/article/details/104606865)
cdev_sgdma.c 是xdma 驱动传输的主要接口,驱动正确安装后会生成xdma0_c2h_0 xdma0_h2c_0 的读写设备。
上位机程序。通过文件io.read\write 调用驱动的read\write,以触发sgdma进行数据传输,传输完成会有中断,之余中断时如何使用的,感兴趣的朋友看一下一起的文章。
char_sgdma_read 、char_sgdma_write 两个接口调用同一个接口进行数据输出, 接口以一个bool变量驱动,数据是读还是写。
函数原型:
static ssize_t char_sgdma_read_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos, bool write);
static ssize_t char_sgdma_read_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos, bool write)
{
int rv;
ssize_t res = 0;
struct xdma_cdev *xcdev = (struct xdma_cdev *)file->private_data;
struct xdma_dev *xdev;
struct xdma_engine *engine;
struct xdma_io_cb cb;
rv = xcdev_check(__func__, xcdev, 1);
if (rv < 0)
return rv;
xdev = xcdev->xdev;
engine = xcdev->engine;
dbg_tfr("file 0x%p, priv 0x%p, buf 0x%p,%llu, pos %llu, W %d, %s.\n",
file, file->private_data, buf, (u64)count, (u64)*pos, write,
engine->name);
if ((write && engine->dir != DMA_TO_DEVICE) ||
(!write && engine->dir != DMA_FROM_DEVICE)) {
pr_err("r/w mismatch. W %d, dir %d.\n",
write, engine->dir);
return -EINVAL;
}
rv = check_transfer_align(engine, buf, count, *pos, 1);
if (rv) {
pr_info("Invalid transfer alignment detected\n");
return rv;
}
memset(&cb, 0, sizeof(struct xdma_io_cb));
cb.buf = (char __user *)buf;
cb.len = count;
cb.ep_addr = (u64)*pos;
cb.write = write;
rv = char_sgdma_map_user_buf_to_sgl(&cb, write);
if (rv < 0)
return rv;
res = xdma_xfer_submit(xdev, engine->channel, write, *pos, &cb.sgt,
0, write ? h2c_timeout * 1000 :
c2h_timeout * 1000);
char_sgdma_unmap_user_buf(&cb, write);
return res;
}
这个接口的主要的逻辑就是:
check_transfer_align
,主要是检查AXi 总线 non-incremental addressing modechar_sgdma_map_user_buf_to_sgl
xdma_xfer_submit
char_sgdma_unmap_user_buf
我们先看char_sgdma_map_user_buf_to_sgl()
static int char_sgdma_map_user_buf_to_sgl(struct xdma_io_cb *cb, bool write)
{
struct sg_table *sgt = &cb->sgt;
unsigned long len = cb->len;
void __user *buf = cb->buf;
struct scatterlist *sg; /* sgdma 传输所需的 scatterlist */
/* 会先算出传输的数据有多少个Page */
unsigned int pages_nr = (((unsigned long)buf + len + PAGE_SIZE - 1) -
((unsigned long)buf & PAGE_MASK))
>> PAGE_SHIFT;
int i;
int rv;
if (pages_nr == 0)
return -EINVAL;
/*更具pages的个数,申请所需的sg_table,单个scatterlist 是没有意义的,*/
/*需要多个scatterlist组成一个数组,以表示在物理上不连续的虚拟地址空间*/
if (sg_alloc_table(sgt, pages_nr, GFP_KERNEL)) {
pr_err("sgl OOM.\n");
return -ENOMEM;
}
cb->pages = kcalloc(pages_nr, sizeof(struct page *), GFP_KERNEL);
if (!cb->pages) {
pr_err("pages OOM.\n");
rv = -ENOMEM;
goto err_out;
}
/*get_user_pages_fast调用这个函数进行获取当前进程的页,这个函数不锁mm*/
rv = get_user_pages_fast((unsigned long)buf, pages_nr, 1/* write */,
cb->pages);
/* No pages were pinned */
if (rv < 0) {
pr_err("unable to pin down %u user pages, %d.\n",
pages_nr, rv);
goto err_out;
}
/* Less pages pinned than wanted */
if (rv != pages_nr) {
pr_err("unable to pin down all %u user pages, %d.\n",
pages_nr, rv);
cb->pages_nr = rv;
rv = -EFAULT;
goto err_out;
}
for (i = 1; i < pages_nr; i++) {
if (cb->pages[i - 1] == cb->pages[i]) {
pr_err("duplicate pages, %d, %d.\n",
i - 1, i);
rv = -EFAULT;
cb->pages_nr = pages_nr;
goto err_out;
}
}
/*将page相关的data cache内容写回到内存*/
/*将page中指定offset、指定长度的内存赋给指定的scatterlist*/
sg = sgt->sgl;
for (i = 0; i < pages_nr; i++, sg = sg_next(sg)) {
unsigned int offset = offset_in_page(buf);
unsigned int nbytes =
min_t(unsigned int, PAGE_SIZE - offset, len);
flush_dcache_page(cb->pages[i]);
sg_set_page(sg, cb->pages[i], nbytes, offset);
buf += nbytes;
len -= nbytes;
}
if (len) {
pr_err("Invalid user buffer length. Cannot map to sgl\n");
return -EINVAL;
}
cb->pages_nr = pages_nr;
return 0;
err_out:
char_sgdma_unmap_user_buf(cb, write);
return rv;
}
这是将用户层的数据进行映射,接下来就是数据的传输
xdma_xfer_submit
:
xdma_xfer_submit 会调用 transform_queue进行数据传输,它将数据Add 到DMA的传输表中,进行数据传输。
static int transfer_queue(struct xdma_engine *engine,
struct xdma_transfer *transfer)
{
int rv = 0;
struct xdma_transfer *transfer_started;
struct xdma_dev *xdev;
unsigned long flags;
if (!engine) {
pr_err("dma engine NULL\n");
return -EINVAL;
}
if (!engine->xdev) {
pr_err("Invalid xdev\n");
return -EINVAL;
}
if (!transfer) {
pr_err("%s Invalid DMA transfer\n", engine->name);
return -EINVAL;
}
if (transfer->desc_num == 0) {
pr_err("%s void descriptors in the transfer list\n",
engine->name);
return -EINVAL;
}
dbg_tfr("%s (transfer=0x%p).\n", __func__, transfer);
xdev = engine->xdev;
if (xdma_device_flag_check(xdev, XDEV_FLAG_OFFLINE)) {
pr_info("dev 0x%p offline, transfer 0x%p not queued.\n", xdev,
transfer);
return -EBUSY;
}
/* lock the engine state */
spin_lock_irqsave(&engine->lock, flags);
engine->prev_cpu = get_cpu();
put_cpu();
/* engine is being shutdown; do not accept new transfers */
if (engine->shutdown & ENGINE_SHUTDOWN_REQUEST) {
pr_info("engine %s offline, transfer 0x%p not queued.\n",
engine->name, transfer);
rv = -EBUSY;
goto shutdown;
}
/* mark the transfer as submitted */
transfer->state = TRANSFER_STATE_SUBMITTED;
/* add transfer to the tail of the engine transfer queue */
list_add_tail(&transfer->entry, &engine->transfer_list);
/* engine is idle? */
if (!engine->running) {
/* start engine */
dbg_tfr("%s(): starting %s engine.\n", __func__, engine->name);
transfer_started = engine_start(engine);
if (!transfer_started) {
pr_err("Failed to start dma engine\n");
goto shutdown;
}
dbg_tfr("transfer=0x%p started %s engine with transfer 0x%p.\n",
transfer, engine->name, transfer_started);
} else {
dbg_tfr("transfer=0x%p queued, with %s engine running.\n",
transfer, engine->name);
}
shutdown:
/* unlock the engine state */
dbg_tfr("engine->running = %d\n", engine->running);
spin_unlock_irqrestore(&engine->lock, flags);
return rv;
}
/* transfer_queue() - Queue a DMA transfer on the engine
*
* @engine DMA engine doing the transfer
* @transfer DMA transfer submitted to the engine
*
* Takes and releases the engine spinlock
*/
static int transfer_queue(struct xdma_engine *engine,
struct xdma_transfer *transfer)
{
int rv = 0;
struct xdma_transfer *transfer_started;
struct xdma_dev *xdev;
unsigned long flags;
if (!engine) {
pr_err("dma engine NULL\n");
return -EINVAL;
}
if (!engine->xdev) {
pr_err("Invalid xdev\n");
return -EINVAL;
}
if (!transfer) {
pr_err("%s Invalid DMA transfer\n", engine->name);
return -EINVAL;
}
if (transfer->desc_num == 0) {
pr_err("%s void descriptors in the transfer list\n",
engine->name);
return -EINVAL;
}
dbg_tfr("%s (transfer=0x%p).\n", __func__, transfer);
xdev = engine->xdev;
if (xdma_device_flag_check(xdev, XDEV_FLAG_OFFLINE)) {
pr_info("dev 0x%p offline, transfer 0x%p not queued.\n", xdev,
transfer);
return -EBUSY;
}
/* lock the engine state */
spin_lock_irqsave(&engine->lock, flags);
engine->prev_cpu = get_cpu();
put_cpu();
/* engine is being shutdown; do not accept new transfers */
if (engine->shutdown & ENGINE_SHUTDOWN_REQUEST) {
pr_info("engine %s offline, transfer 0x%p not queued.\n",
engine->name, transfer);
rv = -EBUSY;
goto shutdown;
}
/* mark the transfer as submitted */
transfer->state = TRANSFER_STATE_SUBMITTED;
/* add transfer to the tail of the engine transfer queue */
list_add_tail(&transfer->entry, &engine->transfer_list);
/* engine is idle? */
if (!engine->running) {
/* start engine */
dbg_tfr("%s(): starting %s engine.\n", __func__, engine->name);
transfer_started = engine_start(engine);
if (!transfer_started) {
pr_err("Failed to start dma engine\n");
goto shutdown;
}
dbg_tfr("transfer=0x%p started %s engine with transfer 0x%p.\n",
transfer, engine->name, transfer_started);
} else {
dbg_tfr("transfer=0x%p queued, with %s engine running.\n",
transfer, engine->name);
}
shutdown:
/* unlock the engine state */
dbg_tfr("engine->running = %d\n", engine->running);
spin_unlock_irqrestore(&engine->lock, flags);
return rv;
}
接下来就是unmap, 这里的代码很简单。
以上就是传输一次数据所调用的接口,与基本逻辑。