struct dma_chan { int lock; const char *device_id; }; static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = { [4] = { 1, "cascade" }, };
dmabuf = ioremap( 0x1F00000 /* 31M */, 0x100000 /* 1M */);
int request_dma(unsigned int channel, const char *name); void free_dma(unsigned int channel);
static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); }
struct dma_coherent_mem { void *virt_base; u32 device_base; int size; int flags; unsigned long *bitmap; };
void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp) { void *ret; //若是设备,得到设备的dma内存区域,即mem= dev->dma_mem struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; int order = get_order(size);//将size转换成order,即忽略特定的区域,因而忽略这两个标识 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); if (mem) { //设备的DMA映射,mem= dev->dma_mem //找到mem对应的页 int page = bitmap_find_free_region(mem->bitmap, mem->size, order); if (page >= 0) { *dma_handle = mem->device_base + (page << PAGE_SHIFT); ret = mem->virt_base + (page << PAGE_SHIFT); memset(ret, 0, size); return ret; } if (mem->flags & DMA_MEMORY_EXCLUSIVE) return NULL; } //不是设备的DMA映射 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) gfp |= GFP_DMA; //分配空闲页 ret = (void *)__get_free_pages(gfp, order); if (ret != NULL) { memset(ret, 0, size); //清0 *dma_handle = virt_to_phys(ret); //得到物理地址 } return ret; }
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum ma_data_direction)direction); }
static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); //可能有些数据还会保留在处理器的高速缓冲存储器中,因此必须显式刷新 flush_write_buffers(); return virt_to_phys(ptr);//虚拟地址转化为物理地址 }
struct scatterlist { struct page *page; unsigned int offset; dma_addr_t dma_address; //用在分散/集中操作中的缓冲区地址 unsigned int length;//该缓冲区的长度 };
#define sg_dma_address(sg) ((sg)->dma_address) //从该分散表项中返回总线地址 #define sg_dma_len(sg) ((sg)->length) //返回该缓冲区的长度
static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) { return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); } //include/asm-i386/dma-mapping.h static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; BUG_ON(direction == DMA_NONE); for (i = 0; i < nents; i++ ) { BUG_ON(!sg[i].page); //将页及页偏移地址转化为物理地址 sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; } //可能有些数据还会保留在处理器的高速缓冲存储器中,因此必须显式刷新 flush_write_buffers(); return nents; }
struct dma_pool { /* the pool */ struct list_head page_list;//页链表 spinlock_t lock; size_t blocks_per_page;//每页的块数 size_t size; //DMA池里的一致内存块的大小 struct device *dev; //将做DMA的设备 size_t allocation; //分配的没有跨越边界的块数,是size的整数倍 char name [32];//池的名字 wait_queue_head_t waitq; //等待队列 struct list_head pools; };
struct dma_pool *dma_pool_create (const char *name, struct device *dev, size_t size, size_t align, size_t allocation) { struct dma_pool *retval; if (align == 0) align = 1; if (size == 0) return NULL; else if (size < align) size = align; else if ((size % align) != 0) {//对齐处理 size += align + 1; size &= ~(align - 1); } //如果一致内存块比页大,是分配为一致内存块大小,否则,分配为页大小 if (allocation == 0) { if (PAGE_SIZE < size)//页比一致内存块小 allocation = size; else allocation = PAGE_SIZE;//页大小 // FIXME: round up for less fragmentation } else if (allocation < size) return NULL; //分配dma_pool结构对象空间 if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) return retval; strlcpy (retval->name, name, sizeof retval->name); retval->dev = dev; //初始化dma_pool结构对象retval INIT_LIST_HEAD (&retval->page_list);//初始化页链表 spin_lock_init (&retval->lock); retval->size = size; retval->allocation = allocation; retval->blocks_per_page = allocation / size; init_waitqueue_head (&retval->waitq);//初始化等待队列 if (dev) { down (&pools_lock); if (list_empty (&dev->dma_pools)) //给设备创建sysfs文件系统属性文件 device_create_file (dev, &dev_attr_pools); /* note: not currently insisting "name" be unique */ list_add (&retval->pools, &dev->dma_pools);//将DMA池加到dev中 up (&pools_lock); } else INIT_LIST_HEAD (&retval->pools); return retval; }
void *dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle) { unsigned long flags; struct dma_page *page; int map, block; size_t offset; void *retval; restart: spin_lock_irqsave (&pool->lock, flags); list_for_each_entry(page, &pool->page_list, page_list) { int i; /* only cachable accesses here ... */ //遍历一页的每块,而每块又以32字节递增 for (map = 0, i = 0; i < pool->blocks_per_page;/*每页的块数*/ i += BITS_PER_LONG, map++) {// BITS_PER_LONG定义为32 if (page->bitmap [map] == 0) continue; block = ffz (~ page->bitmap [map]);//找出第一个0 if ((i + block) < pool->blocks_per_page) { clear_bit (block, &page->bitmap [map]); //得到相对于页边界的偏移 offset = (BITS_PER_LONG * map) + block; offset *= pool->size; goto ready; } } } //给DMA池分配dma_page结构空间,加入到pool->page_list链表,并作DMA一致映射,它包括分配给DMA池一页。 //SLAB_ATOMIC表示调用 kmalloc(GFP_ATOMIC)直到失败为止,然后它等待内核释放若干页面,接下来再一次进行分配。 if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) { if (mem_flags & __GFP_WAIT) { DECLARE_WAITQUEUE (wait, current); current->state = TASK_INTERRUPTIBLE; add_wait_queue (&pool->waitq, &wait); spin_unlock_irqrestore (&pool->lock, flags); schedule_timeout (POOL_TIMEOUT_JIFFIES); remove_wait_queue (&pool->waitq, &wait); goto restart; } retval = NULL; goto done; } clear_bit (0, &page->bitmap [0]); offset = 0; ready: page->in_use++; retval = offset + page->vaddr;//返回虚拟地址 *handle = offset + page->dma;//相对DMA地址 #ifdef CONFIG_DEBUG_SLAB memset (retval, POOL_POISON_ALLOCATED, pool->size); #endif done: spin_unlock_irqrestore (&pool->lock, flags); return retval; }
int dad_transfer(struct dad_dev *dev, int write, void *buffer, size_t count) { dma_addr_t bus_addr; unsigned long flags; /* Map the buffer for DMA */ dev->dma_dir = (write ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); dev->dma_size = count; //流式映射,将buffer的虚拟地址转化成物理地址 bus_addr = pci_map_single(dev->pci_dev, buffer, count, dev->dma_dir); dev->dma_addr = bus_addr; //DMA传送的buffer物理地址 //将操作控制写入到DMA控制器寄存器,从而建立起设备 writeb(dev->registers.command, DAD_CMD_DISABLEDMA); //设置传输方向--读还是写 writeb(dev->registers.command, write ? DAD_CMD_WR : DAD_CMD_RD); writel(dev->registers.addr, cpu_to_le32(bus_addr));//buffer物理地址 writel(dev->registers.len, cpu_to_le32(count)); //传输的字节数 //开始激活DMA进行数据传输操作 writeb(dev->registers.command, DAD_CMD_ENABLEDMA); return 0; } //函数dad_interrupt是中断处理函数,当DMA传输完时,调用这个中断函数来取消buffer上的DMA映射,从而让内核程序可以访问这个buffer。 void dad_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct dad_dev *dev = (struct dad_dev *) dev_id; /* Make sure it's really our device interrupting */ /* Unmap the DMA buffer */ pci_unmap_single(dev->pci_dev, dev->dma_addr, dev->dma_size, dev->dma_dir); /* Only now is it safe to access the buffer, copy to user, etc. */ ... } //函数dad_open打开设备,此时应申请中断号及DMA通道。 int dad_open (struct inode *inode, struct file *filp) { struct dad_device *my_device; // SA_INTERRUPT表示快速中断处理且不支持共享 IRQ 信号线 if ( (error = request_irq(my_device.irq, dad_interrupt, SA_INTERRUPT, "dad", NULL)) ) return error; /* or implement blocking open */ if ( (error = request_dma(my_device.dma, "dad")) ) { free_irq(my_device.irq, NULL); return error; /* or implement blocking open */ } return 0; } //在与open 相对应的 close 函数中应该释放DMA及中断号。 void dad_close (struct inode *inode, struct file *filp) { struct dad_device *my_device; free_dma(my_device.dma); free_irq(my_device.irq, NULL); …… } //函数dad_dma_prepare初始化DMA控制器,设置DMA控制器的寄存器的值,为 DMA 传输作准备。 int dad_dma_prepare(int channel, int mode, unsigned int buf, unsigned int count) { unsigned long flags; flags = claim_dma_lock(); disable_dma(channel); clear_dma_ff(channel); set_dma_mode(channel, mode); set_dma_addr(channel, virt_to_bus(buf)); set_dma_count(channel, count); enable_dma(channel); release_dma_lock(flags); return 0; } //函数dad_dma_isdone用来检查 DMA 传输是否成功结束。 int dad_dma_isdone(int channel) { int residue; unsigned long flags = claim_dma_lock (); residue = get_dma_residue(channel); release_dma_lock(flags); return (residue == 0); }