linux下DMA驱动

 /**
   * dma_request_channel - try to allocate an exclusive channel
   * @mask: capabilities that the channel must satisfy
   * @fn: optional callback to disposition available channels
   * @fn_param: opaque parameter to pass to dma_filter_fn
   */
  struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)

  void dma_release_channel(struct dma_chan *chan)

 /**
   * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
   * @chan: DMA channel to offload copy to
   * @dest: destination address (virtual)
   * @src: source address (virtual)
   * @len: length
   *
   * Both @dest and @src must be mappable to a bus address according to the
   * DMA mapping API rules for streaming mappings.
   * Both @dest and @src must stay memory resident (kernel memory or locked
   * user space pages).
   */
  dma_cookie_t
  dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
                          void *src, size_t len)

/**
  * dma_async_issue_pending - flush pending transactions to HW
  * @chan: target DMA channel
  *      
  * This allows drivers to push copies to HW in batches,
  * reducing MMIO writes where possible.
  */
 static inline void dma_async_issue_pending(struct dma_chan *chan)
 {               
         chan->device->device_issue_pending(chan);
 }                                       
                                         
 #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)                       

/**
  * dma_async_is_tx_complete - poll for transaction completion
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
  * @last: returns last completed cookie, can be NULL
  * @used: returns last issued cookie, can be NULL
  *      
  * If @last and @used are passed in, upon return they reflect the driver
  * internal state and can be used with dma_async_is_complete() to check
  * the status of multiple cookies without re-checking hardware state.
  */
 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
         dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
 {                                       
         struct dma_tx_state state;      
         enum dma_status status;         
                 
         status = chan->device->device_tx_status(chan, cookie, &state);
         if (last)                       
                 *last = state.last;
         if (used)       
                 *used = state.used;
         return status;
 }       
                                         
 #define dma_async_memcpy_complete(chan, cookie, last, used)\
         dma_async_is_tx_complete(chan, cookie, last, used)
/**
  * struct dma_chan - devices supply DMA channels, clients use them
  * @device: ptr to the dma device who supplies this channel, always !%NULL
  * @cookie: last cookie value returned to client
  * @chan_id: channel ID for sysfs
  * @dev: class device for sysfs
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
  * @client-count: how many clients are using this channel
  * @table_count: number of appearances in the mem-to-mem allocation table
  * @private: private data for certain client-channel associations
  */
 struct dma_chan {
         struct dma_device *device;
         dma_cookie_t cookie;
 
         /* sysfs */
         int chan_id;
         struct dma_chan_dev *dev;
 
         struct list_head device_node;
         struct dma_chan_percpu __percpu *local;
         int client_count;
         int table_count;
         void *private;
 };
 /**     
  * struct dma_async_tx_descriptor - async transaction descriptor
  * ---dma generic offload fields---
  * @cookie: tracking cookie for this transaction, set to -EBUSY if
  *      this tx is sitting on a dependency list
  * @flags: flags to augment operation preparation, control completion, and
  *      communicate status
  * @phys: physical address of the descriptor
  * @chan: target channel for this operation
  * @tx_submit: set the prepared descriptor(s) to be executed by the engine
  * @callback: routine to call after this operation is complete
  * @callback_param: general parameter to pass to the callback routine
  * ---async_tx api specific fields---
  * @next: at completion submit this descriptor
  * @parent: pointer to the next level up in the dependency chain
  * @lock: protect the parent and next pointers
  */     
 struct dma_async_tx_descriptor {
         dma_cookie_t cookie;
         enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
         dma_addr_t phys;
         struct dma_chan *chan;
         dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
         dma_async_tx_callback callback;
         void *callback_param;
 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
         struct dma_async_tx_descriptor *next;
         struct dma_async_tx_descriptor *parent;
         spinlock_t lock;
 #endif          
 };      


最近工作需要,要修改一些uboot相关的东西,里面需要在内核起来之前将数据从flash搬运到指定的内存空间去,这次主要的实现方法是通过DMA搬运,也正好借此机会把DMA的没有框架的驱动去熟悉了一把。

DMA有通道的概念,每个DMA通道有不同的优先级,通道0的优先级最高,还有从内存到内存,从内存到外设和从外设到内存。这些类型都是在相应的寄存器位写入相应的值。

还有希望不要把DMA请求和DMA通道弄混,同一个DMA通道可能有不同的DMA请求。

主要是分为三步:1.DMA初始化,主要是清除所有通道错误位和中断位,DMA使能。2.向DMA寄存器中写入目的地址,源地址,和大小,如果大小超过了阈值,那么就需要用到LLI链表,将每一块要传输的数据大小分块写入,中间数据的连续性需要通过LLI这个结构进行连接。3.DMA通道的终止,主要是清除相应通道的错误位和中断位,并且关闭该通道的使能位,

typedef struct dma_lli{
     uint32_t   srcaddr;
     uint32_t   destaddr;
     struct dma_lli *lliaddr;
     uint32_t   control;

 }dma_lli_t;//需要注意的是每个字段的内容都是写到每个通道自己独享的寄存器中去的。

注意DMA中所写的源目地址都是指总线地址,和物理地址还是有一点区别的,总线地址是从外设看内存的角度,而物理地址是CPU看内存的角度。

因为是在boot阶段写的DMA驱动,所以并没有linux下那一套DMA接口,那一部分的内容等以后接触到了再去了解,不过这也对以后有框架的DMA驱动打下了基础。
因为DMA控制器使用的是pl08x系列,同时在网上也看到一篇比较好的关于linux中pl08xDMA驱动的文章。
http://www.crifan.com/files/doc/docbook/dma_pl08x_analysis/release/html/dma_pl08x_analysis.html

你可能感兴趣的:(linux驱动)