Zynq MPSoC 官方Linux DMA驱动调试

Zynq MPSoC Linux官方DMA驱动调试

前言

Zynq平台下DMA驱动主要有官方在用户层控制的和某大神写的axi_dma驱动,今天主要用官方的进行测试。
环境
petalinux 19.1
vivado 19.1

开始

首先搭建逻辑,注意这里DMA用64地址线,不然4GB以上的DDR访问不到,然后输入输出就挂到FIFO上就行。
Zynq MPSoC 官方Linux DMA驱动调试_第1张图片
然后FIFO这样挂上去就行
Zynq MPSoC 官方Linux DMA驱动调试_第2张图片
然后编译,这是一个漫长的过程,主要是我有个自己的IP以及VCU,所以特别慢。

Linux驱动

1、首先下载驱动,地址如下:添加链接描述
这个也是我参考的
2、然后根据他的进行编译,注意这里编译完了之后,启动的时候会出现一个问题,驱动挂载失败,然后报错,这个是petalinux_19.1的问题,然后官方给的解决方案是打补丁,连接如下:添加链接描述
3、然后呢,还有个问题就是DMA传输一遍后time_out,这里的解决方案也比较简单,如下:

/* Close the file and there's nothing to do for it
 */
static int release(struct inode *ino, struct file *file)
{
	struct dma_proxy_channel *pchannel_p = (struct dma_proxy_channel *)file->private_data;
	struct dma_device *dma_device = pchannel_p->channel_p->device;

	/* Stop all the activity when the channel is closed assuming this
	 * may help if the application is aborted without normal closure
	 */
	//注释掉这里就ok
	//dma_device->device_terminate_all(pchannel_p->channel_p);
	return 0;
}

4、测试,测试也用官方的就行

自定义自己驱动

1、这里就是等待DMA读取完成,然后我们用来读取数据进行编码,如果超过5s,自行跳出

static ssize_t read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
	int ret_val;
	struct dma_proxy_channel *pchannel_p = (struct dma_proxy_channel *)file->private_data;
	//char* data = share_mem_dev.base_addr;
	unsigned int cnt = size;
	unsigned int p = *ppos;
	u64 phy_addr = pchannel_p->dma_handle;
	u64 off =  phy_addr - pchannel_p->reva_mem_start; //physcicl addr offset 
	char* data = pchannel_p->reva_mem_vaddr + off;
	//ensure read size right
	if(p >= MEM_SIZE)
		return cnt ? -ENXIO : 0;
	if(cnt > MEM_SIZE - p)
		cnt = MEM_SIZE - p;	
	
	#define WAIT_TIMEOUT_DURATION (HZ * 5)
	int err = wait_event_interruptible_timeout(pchannel_p->queue,
						   pchannel_p->dma_rx_finish_flag,
						   WAIT_TIMEOUT_DURATION);
	if(err = 0) {
		printk("[%s] read time out\r\n", DRIVER_NAME);
		return -EINVAL;
	}					   

	pchannel_p->dma_rx_finish_flag = 0;
	//start next transmit , and the transmit and encode can be run at the same time
	ret_val = copy_to_user(buf, data + p, cnt);
	if(ret_val < 0) {
		printk("[%s] err copy data to user\r\n", DRIVER_NAME);
		return -1;
	}else{
		*ppos += cnt;
	}

	return cnt;
}

2、然后在DMA回调函数那里修改:

static void sync_callback(void *pchannel_p)
{
	/* Indicate the DMA transaction completed to allow the other
	 * thread of control to finish processing
	 */
	struct dma_proxy_channel *chan = pchannel_p;
	chan->dma_rx_finish_flag = 1; //to tell read func i read finsh
	complete(&chan->cmp);
}

3、测试
就直接改的官方测试程序,然后我这个用fifo测试成功了,后面该到了自己的IP上,然后效果如下

root@RS-IPU:/mnt/bin# ./dma_test 1 1
DMA proxy test
[   26.797779] start ip
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 
DMA proxy test complete

其实上面的结果就是一幅图像的ROI图,有1代表该区域为ROI,哈哈,成功了,开森,就是下面这个图了
Zynq MPSoC 官方Linux DMA驱动调试_第3张图片

END

最后,我这里开源一下我封装好的驱动,用C++写的,因为在上层,所以比较方便调试:

#ifndef _DMA_BSP_H_
#define _DMA_BSP_H_

#include 
#include 
#include 
#include 
#include 
#include 
#include 

#define DMA_ALLOC_SIZE (3840 * 2160 * 5)

#define IO_TRANSFRER_TEST 0
#define IO_START_IP		  1
#define IO_SET_IMG_PARAM  2

enum {
	TYPE_VEDIO = 0x01,
	TYPE_IMG = 0x11
};

struct usr_param {
	unsigned char type;
	unsigned char fps;
	unsigned char Qp;
	unsigned char resv;
	unsigned int width;
	unsigned int height;
};

struct dma_proxy_channel_interface {
	unsigned char buffer[DMA_ALLOC_SIZE];
	enum proxy_status { PROXY_NO_ERROR = 0, PROXY_BUSY = 1, PROXY_TIMEOUT = 2, PROXY_ERROR = 3 } status;
	unsigned int length;
};


class dma_ctrl
{
private:
    struct dma_proxy_channel_interface* dma_proxy_interface_p;
    struct usr_param dev_ctrl_param;
    int dev_fd;
public:
    dma_ctrl(char* dev_name);
    ~dma_ctrl();
public:    
    void dma_set_transmit_data(unsigned char* buff, unsigned int size, unsigned int off);
    void dma_get_transmit_data(unsigned char* buff, unsigned int size, unsigned int off);
    int dev_set_param(struct usr_param* param);
    void dma_set_size(unsigned int size);
    int dma_start_transmit(void);
    int dma_test_transmit(void);
};
#endif

主程序:

#include "dma_bsp.h"
#include 

dma_ctrl::dma_ctrl(char* dev_name)
{
    dev_fd = open(dev_name, O_RDWR);
	if (dev_fd < 1) {
		printf("Unable to open device file, name: %s\r\n", dev_name);
        exit(EXIT_FAILURE);
	}

    dma_proxy_interface_p = (struct dma_proxy_channel_interface *)mmap(NULL, sizeof(struct dma_proxy_channel_interface),
									PROT_READ | PROT_WRITE, MAP_SHARED, dev_fd, 0);
    if (dma_proxy_interface_p == MAP_FAILED) {
        printf("Unable to mmap device file, name: %s\r\n", dev_name);
        exit(EXIT_FAILURE);
    }
}

dma_ctrl::~dma_ctrl()
{
    if(dev_fd) {
        close(dev_fd);
    }
}

void dma_ctrl::dma_set_transmit_data(unsigned char* buff, unsigned int size, unsigned int off)
{
    if(!dma_proxy_interface_p) {
        printf("err init mmap\r\n");
    }
    memcpy(dma_proxy_interface_p->buffer + off, buff, size);
}

void dma_ctrl::dma_set_size(unsigned int size)
{
    dma_proxy_interface_p->length = size;
}

void dma_ctrl::dma_get_transmit_data(unsigned char* buff, unsigned int size, unsigned int off)
{
    if(!dma_proxy_interface_p) {
        printf("err init mmap\r\n");
    }
    memcpy(buff, dma_proxy_interface_p->buffer + off, size);
    //dma_proxy_interface_p->length = size;
}

int dma_ctrl::dma_start_transmit()
{
    return ioctl(dev_fd, IO_START_IP, NULL);
}

int dma_ctrl::dev_set_param(struct usr_param* param)
{
    if(!param) return NULL;
    return ioctl(dev_fd, IO_SET_IMG_PARAM, param);
}

int dma_ctrl::dma_test_transmit()
{
    return ioctl(dev_fd, IO_TRANSFRER_TEST, NULL);
}

测试用例:

void safe_main_thread(int argc, char* argv[])
{
    dma_ctrl rs_ipu_img_rx_ctrl(RS_IPU_IMG_RX_DEV);
    dma_ctrl rs_ipu_img_tx_ctrl(RS_IPU_IMG_TX_DEV);
    dma_ctrl rs_ipu_bitstream_rx_ctrl(RS_IPU_BIT_RX_DEV);
    dma_ctrl rs_ipu_bitstream_tx_ctrl(RS_IPU_BIT_TX_DEV);

    unsigned char rx_data[128];
#define  TEST_SIZE (DMA_ALLOC_SIZE * 4 / 5)
    unsigned char* malloc_data = (unsigned char*)malloc(TEST_SIZE);  
    int index = 0;
    unsigned char gray = 0;
    for(int i = 0; i < 5; i++){
        for(int j = 0; j < 2160; j++) {
            for(int j = 0; j < 3840; j++) {
                malloc_data[index++] = gray++;
                if (gray >= 255)
                {
                    gray = 0;
                }
            }
        }
    }
    std::cout << "ipu_test" <<endl;
    rs_ipu_img_tx_ctrl.dma_set_transmit_data(malloc_data, 128, 0);
    rs_ipu_img_tx_ctrl.dma_set_size(128);
    rs_ipu_img_tx_ctrl.dma_test_transmit();
    std::cout << "ipu_test endl" <<endl;
    rs_ipu_bitstream_tx_ctrl.dma_set_transmit_data(malloc_data, 128, 0);
    rs_ipu_bitstream_tx_ctrl.dma_set_size(128);
    rs_ipu_bitstream_tx_ctrl.dma_test_transmit();
    std::cout << "bitstream_test endl" <<endl;
    rs_ipu_img_rx_ctrl.dma_set_size(128);
    rs_ipu_img_rx_ctrl.dma_test_transmit();
    rs_ipu_img_rx_ctrl.dma_get_transmit_data(rx_data, 128, 0);
    std::cout << "rx data : " << endl;
    for(int i = 0; i < 128; i++) {
        std::cout << (int)(rx_data[i]) << " ";
    }
    std::cout << endl;
    free(malloc_data);
}

tx过程就是先设置传输的data,然后设置大小,最后dma_test_transmit()
rx过程先设置大小,然后transmit,然后get_data

你可能感兴趣的:(zynq)