Linux串口驱动(4) - write详解

0. 前言        

        经过前面三个部分的初始化,后面的操作就是直接使用前面的配置了。

1. 用户空间write的操作实现

tty_write
	-->ld = tty_ldisc_ref_wait(tty);
		-->wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL);         //tty_ldisc_wait等待队列一直等待直到等待的条件成立,所以接下来的关键要看tty_ldisc_try(tty)这个函数
			-->tty_ldisc_try(tty)
				-->ld = tty->ldisc
	-->do_tty_write(ld->ops->write, tty, file, buf, count);
		-->n_tty_write		/*这里的ld->ops等于tty_ldisc_N_TTY,调用n_tty_write并将传进来的buf传给它*/		
			-->add_wait_queue(&tty->write_wait, &wait);
			-->tty->ops->write(tty, b, nr);				//即uart_write
				-->uart_start(tty);
					-->__uart_start(tty);
						-->port->ops->start_tx(port);   //即imx_start_tx
							-->schedule_delayed_work(&sport->tsk_dma_tx, 0);
								-->dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);      //流式DMA映射
								-->sg_set_buf(sgl, xmit->buf + xmit->tail,UART_XMIT_SIZE - xmit->tail);  //将环形缓冲区的地址配置到sgl。
								-->desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);   //初始化DMA描述符,将sgl中环形缓冲区的地址赋值到DMA的配置中
								-->desc->callback = dma_tx_callback;  //DMA完成一次搬运后,回调用这个回调函数
								-->dmaengine_submit(desc);           //将该描述符插入dmaengine驱动的传输队列
								-->dma_async_issue_pending(chan);    //启动对应DMA通道上的传输
static ssize_t tty_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
	struct tty_struct *tty = file_tty(file);
 	struct tty_ldisc *ld;
	ssize_t ret;

	ld = tty_ldisc_ref_wait(tty);    /* ---> */
	if (!ld->ops->write)
		ret = -EIO;
	else
		ret = do_tty_write(ld->ops->write, tty, file, buf, count);/* 调用n_tty_write并将传进来的buf传给它 ---> */
	tty_ldisc_deref(ld);
	return ret;
}

struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
	struct tty_ldisc *ld;
	/* wait_event is a macro 阻塞直至(ld = tty_ldisc_try(tty)) != NULL */
	wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL);
	return ld;
}

static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty)
{
	unsigned long flags;
	struct tty_ldisc *ld;

	if (test_bit(TTY_LDISC, &tty->flags) && tty->ldisc) {
		ld = tty->ldisc;  //从tty中取出线路规程ldisc,该ldisc是在《Linux串口驱动(3) - open详解》的分析线路2-1中放到tty中的
		atomic_inc(&ld->users);
	}
	return ld;
}
static inline ssize_t do_tty_write(
	ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
	struct tty_struct *tty,	struct file *file, const char __user *buf, size_t count)
{
	ssize_t ret, written = 0;
	unsigned int chunk;
	/* Do the write .. */
	for (;;) {
		if (copy_from_user(tty->write_buf, buf, size))	/* 把应用层要发送的内容复制到tty->write_buf */
			break;
		ret = write(tty, file, tty->write_buf, size);	/* 回调函数ld->ops->write,即调用n_tty_write()*/
	}
	return ret;
}

static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
			   const unsigned char *buf, size_t nr)
{
	const unsigned char *b = buf;
	DECLARE_WAITQUEUE(wait, current);	/*将等待队列项wait初始化为current对应的任务结构*/
	int c;
	ssize_t retval = 0;

	add_wait_queue(&tty->write_wait, &wait);	/*将当前进程加入write_wait队列*/
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {	/*OPOST:启用输出处理*/
			······
		} else {		
			struct n_tty_data *ldata = tty->disc_data;			/*使用原始输出,使数据能不经过处理、过滤地完整地输出到串口接口*/

			while (nr > 0) {
				c = tty->ops->write(tty, b, nr);		/*tty->ops即uart_ops(不明白的可以看前面几篇的注释),即调用uart_write() --->*/
				if (c < 0) {
					retval = c;
					goto break_out;
				}
				if (!c)
					break;
				b += c;
				nr -= c;		/*用户数据没写完,nr是剩余字节,当DMA传输完成一次以后就会唤醒这里,继续将剩余内容写到环形缓冲区*/
			}
		}
		schedule();		/*休眠*/
	}
}

static int uart_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
	struct uart_state *state = tty->driver_data;
	struct uart_port *port;
	struct circ_buf *circ;
	unsigned long flags;
	int c, ret = 0;

	port = state->uart_port;
	circ = &state->xmit;

	if (!circ->buf)		/*如果没有分配缓冲区空间就返回*/
		return 0;

	while (1) {
		c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);		/*得到环形缓冲区中剩余的空间 */
		if (count < c)
			c = count;		/*如果用户要写的数据小于剩余空间则全部写入,否则只写入部分*/
		if (c <= 0)
			break;
		memcpy(circ->buf + circ->head, buf, c);		/*将用户数据拷贝到uart_state.xmit环形缓冲区*/
		circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
		buf += c;
		count -= c;
		ret += c;			/*已经写入到环形缓冲区的字节数*/
	}

	uart_start(tty);		/*--->*/
	return ret;
}

static void uart_start(struct tty_struct *tty)
{
	__uart_start(tty);		/*--->*/
}

static void __uart_start(struct tty_struct *tty)
{
	struct uart_state *state = tty->driver_data;
	struct uart_port *port = state->uart_port;

	if (port->ops->wake_peer)			/*未定义*/
		port->ops->wake_peer(port);

	if (!uart_circ_empty(&state->xmit) && state->xmit.buf &&
	    !tty->stopped && !tty->hw_stopped)
		port->ops->start_tx(port);		/*即imx_start_tx, 在《Linux串口驱动(2) - 线路规程》中的serial_imx_probe函数中做的交接 --->*/
}

static void imx_start_tx(struct uart_port *port)
{
	struct imx_port *sport = (struct imx_port *)port;
	unsigned long temp;

	/* Clear any pending ORE flag before enabling interrupt */
	temp = readl(sport->port.membase + USR2);
	writel(temp | USR2_ORE, sport->port.membase + USR2);
 //这个中间有很多寄存器读写操作被删掉了······
	if (sport->dma_is_enabled) {
		schedule_delayed_work(&sport->tsk_dma_tx, 0);  /* 回看《Linux串口驱动(3) - open详解》的imx_startup函数可知,sport->tsk_dma_tx对应dma_tx_work() ---> */
		return;
	}

	if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
		imx_transmit_buffer(sport);
}

static void dma_tx_work(struct work_struct *w)
{
	struct delayed_work *delay_work = to_delayed_work(w);
	struct imx_port *sport = container_of(delay_work, struct imx_port, tsk_dma_tx);
	struct circ_buf *xmit = &sport->port.state->xmit;
	struct scatterlist *sgl = sport->tx_sgl;
	struct dma_async_tx_descriptor *desc;
	struct dma_chan	*chan = sport->dma_chan_tx;
	struct device *dev = sport->port.dev;
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&sport->port.lock, flags);
	sport->tx_bytes = uart_circ_chars_pending(xmit);

	if (sport->tx_bytes > 0) {
		if (xmit->tail > xmit->head && xmit->head > 0) {
			sport->dma_tx_nents = 2;
			sg_init_table(sgl, 2);
			sg_set_buf(sgl, xmit->buf + xmit->tail,
					UART_XMIT_SIZE - xmit->tail);   //将环形缓冲区的地址配置到sgl。
			sg_set_buf(sgl + 1, xmit->buf, xmit->head);
		} else {
			sport->dma_tx_nents = 1;
			sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
		}
		spin_unlock_irqrestore(&sport->port.lock, flags);

		ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);  //流式DMA映射
		if (ret == 0) {
			dev_err(dev, "DMA mapping error for TX.\n");
			goto err_out;
		}
		desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
						DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);  //初始化DMA描述符,将sgl中环形缓冲区的地址赋值到DMA的配置中
		if (!desc) {
			dev_err(dev, "We cannot prepare for the TX slave dma!\n");
			goto err_out;
		}
		desc->callback = dma_tx_callback;   //DMA完成一次搬运后,会调用这个回调函数
		desc->callback_param = sport;

		dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
				uart_circ_chars_pending(xmit));
		/* fire it */
		sport->dma_is_txing = 1;
		dmaengine_submit(desc);   //将该描述符插入dmaengine驱动的传输队列
		dma_async_issue_pending(chan);  //启动对应DMA通道上的传输
		return;
	};
}

2. 总结

        1) 只要环形缓冲区不为空,DMA就会一直搬运,这一实现就是通过在DMA回调函数中调度dma_tx_work来启动DMA搬运,在将环形缓冲区搬运完之前,会一直在dma_tx_work<=>dma_tx_callback之间循环运行。

        2) write是会阻塞的,因为用户态的数据并不是总能够全部写入到环形缓冲区的(环形缓冲区大小是有限的!),如果用户数据没有全部写到环形缓冲区,n_tty_write()进程会休眠,等环形缓冲区有空间以后再唤醒,这一步是在DMA回调函数中实现的。

你可能感兴趣的:(#,tty,driver,uart,Linux)