转载:http://blog.csdn.net/sirzjp/article/details/6134489
前面分析了open操作,现在分析读操作tty_read。tty_read直接调用线路规程中的读操作从tty->read_buf中读取数据到用户空间。其中tty.read_head记录已读数据的起始位置,tty.read_tail记录已读数据的末尾位置,tty.read_cnt记录已读数据的数量。至于所读数据从何而来我们在下一篇中分析,下面看具体代码:
/**
* tty_read - read method for tty device files
* @file: pointer to tty file
* @buf: user buffer
* @count: size of user buffer
* @ppos: unused
*
* Perform the read system call function on this terminal device. Checks
* for hung up devices before calling the line discipline method.
*
* Locking:
* Locks the line discipline internally while needed. Multiple
* read calls may be outstanding in parallel.
*/
static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int i;
struct tty_struct *tty;
struct inode *inode;
struct tty_ldisc *ld;
tty = (struct tty_struct *)file->private_data;//得到open中设置的tty结构
inode = file->f_path.dentry->d_inode;
if (tty_paranoia_check(tty, inode, "tty_read")) //tty及其幻数检查
return -EIO;
if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
return -EIO;
/* We want to wait for the line discipline to sort out in this
situation */
ld = tty_ldisc_ref_wait(tty); //TTY_LDISC 表示tty和线路规程绑定
if (ld->ops->read)
i = (ld->ops->read)(tty, file, buf, count);
else
i = -EIO;
tty_ldisc_deref(ld);
if (i > 0)
inode->i_atime = current_fs_time(inode->i_sb);
return i;
}
读操作的具体细节都在线路规程中实现的,默认的线路规程的读操作时read_chan函数,下面看具体源码:
/**
* read_chan - read function for tty
* @tty: tty device
* @file: file object
* @buf: userspace buffer pointer
* @nr: size of I/O
*
* Perform reads for the line discipline. We are guaranteed that the
* line discipline will not be closed under us but we may get multiple
* parallel readers and must handle this ourselves. We may also get
* a hangup. Always called in user context, may sleep.
*
* This code must be sure never to sleep through a hangup.
*/
static ssize_t read_chan(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t nr)
{
unsigned char __user *b = buf;
DECLARE_WAITQUEUE(wait, current); //声明等待队列项,每次读操作都加入tty.read_wait等待队列
int c;
int minimum, time;
ssize_t retval = 0;
ssize_t size;
long timeout;
unsigned long flags;
int packet;
do_it_again:
if (!tty->read_buf) {
printk(KERN_ERR "n_tty_read_chan: read_buf == NULL?!?/n");
return -EIO;
}
c = job_control(tty, file); //tty非控制台而是进程控制终端时的处理
if (c < 0)
return c;
minimum = time = 0;
timeout = MAX_SCHEDULE_TIMEOUT;
//tty设备对数据的处理分为原始模式和规范规范模式tty->icannon表示这种模式
//原始模式时根据c_cc[VTIME]和c_cc[VMIN]设置唤醒用户读读进程的超时时间和数据量
if (!tty->icanon) {
time = (HZ / 10) * TIME_CHAR(tty);
minimum = MIN_CHAR(tty);
if (minimum) {
if (time)
tty->minimum_to_wake = 1;
else if (!waitqueue_active(&tty->read_wait) ||
(tty->minimum_to_wake > minimum))
tty->minimum_to_wake = minimum;
} else {
timeout = 0;
if (time) {
timeout = time;
time = 0;
}
tty->minimum_to_wake = minimum = 1;
}
}
//tty->atomic_read_lock对读操作的互斥保护
/*
* Internal serialization of reads.
*/
if (file->f_flags & O_NONBLOCK) {
if (!mutex_trylock(&tty->atomic_read_lock))
return -EAGAIN;
} else {
if (mutex_lock_interruptible(&tty->atomic_read_lock))
return -ERESTARTSYS;
}
//伪终端可以用ioctl将主从两端的通讯方式设置为packet模式(信包模式),tty->link->ctrl_status非零表明提交的是链路控制信息
packet = tty->packet;
add_wait_queue(&tty->read_wait, &wait); //把读进程加入读等待队列
//进行被唤醒,进程被唤醒时一切情况是不可预测的,因此需要对等待条件进行判断。
while (nr) {
/* First test for status change. */
if (packet && tty->link->ctrl_status) {
unsigned char cs;
if (b != buf)
break;
spin_lock_irqsave(&tty->link->ctrl_lock, flags);
cs = tty->link->ctrl_status;
tty->link->ctrl_status = 0;
spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
if (tty_put_user(tty, cs, b++)) {
retval = -EFAULT;
b--;
break;
}
nr--;
break;
}
/* This statement must be first before checking for input
so that any interrupt will set the state back to
TASK_RUNNING. */
set_current_state(TASK_INTERRUPTIBLE);
if (((minimum - (b - buf)) < tty->minimum_to_wake) &&
((minimum - (b - buf)) >= 1))
tty->minimum_to_wake = (minimum - (b - buf));
//判断有没有数据可读,原始模式和规范模式有差异,以下表示无数据可读若读条件不成立则退出,否则再次加入到等待队列中
if (!input_available_p(tty, 0)) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { //伪终端对待端关闭
retval = -EIO;
break;
}
if (tty_hung_up_p(file))
break;
if (!timeout)
break;
if (file->f_flags & O_NONBLOCK) { //非阻塞模式
retval = -EAGAIN;
break;
}
if (signal_pending(current)) {//TIF_SIGPENDING进程有信号要处理
retval = -ERESTARTSYS;
break;
}
/* FIXME: does n_tty_set_room need locking ? */
n_tty_set_room(tty); //设置接收空间
timeout = schedule_timeout(timeout);
continue;
}
//有数据可读,设置进程为可执行状态
__set_current_state(TASK_RUNNING);
/* Deal with packet mode. */
if (packet && b == buf) { //伪终端的信包模式
if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
retval = -EFAULT;
b--;
break;
}
nr--;
}
//规范模式的读处理:规范模式下缓冲区的数据是经过加工的,要积累起一个缓冲行,才唤醒等待读的进程。
if (tty->icanon) {
/* N.B. avoid overrun if nr == 0 */
while (nr && tty->read_cnt) {
int eol;
//tty->read_tail对应的tty->read_flags为1表示缓冲行的终点
eol = test_and_clear_bit(tty->read_tail,
tty->read_flags);
c = tty->read_buf[tty->read_tail];
spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = ((tty->read_tail+1) & //环形缓冲区的处理
(N_TTY_BUF_SIZE-1));
tty->read_cnt--;
if (eol) {
/* this test should be redundant:
* we shouldn't be reading data if
* canon_data is 0
*/
if (--tty->canon_data < 0)
tty->canon_data = 0;
}
spin_unlock_irqrestore(&tty->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) { //DISABLED '/0'
if (tty_put_user(tty, c, b++)) {
retval = -EFAULT;
b--;
break;
}
nr--;
}
if (eol) {
tty_audit_push(tty);
break;
}
}
if (retval)
break;
} else { //原始模式的处理:直接把数据批量复制,注意调用两次是处理环形缓冲区的回头
int uncopied;
/* The copy function takes the read lock and handles
locking internally for this case */
uncopied = copy_from_read_buf(tty, &b, &nr);
uncopied += copy_from_read_buf(tty, &b, &nr);
if (uncopied) {
retval = -EFAULT;
break;
}
}
/* If there is enough space in the read buffer now, let the
* low-level driver know. We use n_tty_chars_in_buffer() to
* check the buffer, as it now knows about canonical mode.
* Otherwise, if the driver is throttled and the line is
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
//读缓冲区中的可读数据少于某阈值时就调用tty->ops->unthrottle()操作
if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
n_tty_set_room(tty);
check_unthrottle(tty);
}
if (b - buf >= minimum)
break;
if (time)
timeout = time;
}
mutex_unlock(&tty->atomic_read_lock);
remove_wait_queue(&tty->read_wait, &wait);
if (!waitqueue_active(&tty->read_wait))
tty->minimum_to_wake = minimum;
__set_current_state(TASK_RUNNING);
size = b - buf;
if (size) {
retval = size;
if (nr)
clear_bit(TTY_PUSH, &tty->flags);
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
goto do_it_again;
n_tty_set_room(tty);
return retval;
}
上面分析了用户空间从tty读数据的过程,读数据时从tty->read_buf那么tty->read_buf中的数据从而而来呢?这就是我们今天要讨论的问题。tty_struct结构中有个 struct tty_bufhead buf 成员,比如当tty串口中有数据过来时就会产生中断,tty就利用tty.buf中的成员保存中断到来的数据,在合适的机会再用tty_flip_buffer_push类函数把tty->buf中的数据保存到tty->read_buf中去,从而就达到数据来源的效果。具体源码如下:
先看看下面两个数据结构 struct tty_buffer是接收中断数据的一个缓存结构,
struct tty_buffer {
struct tty_buffer *next;
char *char_buf_ptr; //数据缓存
unsigned char *flag_buf_ptr; //数据标志缓存
int used; //已用数据大小
int size; //缓存空间的大小
int commit; //已提交数据大小
int read; //已读走数据大小
/* Data points here */
unsigned long data[0];
};
struct tty_bufhead { //tty_struct结构中临时缓存区的管理结构
struct delayed_work work; //工作队列中的一个工作项
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
struct tty_buffer *tail; /* Active buffer */
struct tty_buffer *free; /* Free queue head */
int memory_used; /* Buffer space used excluding
free queue */
};
在中断程序中我们一般通过int tty_insert_flip_char 把接收到的数据放入tty_struct 的临时缓存中,tty->buf.head指向临时缓存链表的表头,tty->buf.tail指向待操作的临时缓存;
static inline int tty_insert_flip_char(struct tty_struct *tty,
unsigned char ch, char flag)
{
struct tty_buffer *tb = tty->buf.tail;
if (tb && tb->used < tb->size) { //操作缓存存在并有空间
tb->flag_buf_ptr[tb->used] = flag; //数据的标志位
tb->char_buf_ptr[tb->used++] = ch; //接收的数据
return 1;
}
return tty_insert_flip_string_flags(tty, &ch, &flag, 1); //操作缓存不存在或者空间不够时的操作
}
//tty_insert_flip_string_flags 在空间不足的情况下申请临时空间并把接收的数据及标志保存到临时缓存,并返回保存数据大小
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
* @tty: tty structure
* @chars: characters
* @flags: flag bytes
* @size: size
*
* Queue a series of bytes to the tty buffering. For each character
* the flags array indicates the status of the character. Returns the
* number added.
*
* Locking: Called functions may take tty->buf.lock
*/
int tty_insert_flip_string_flags(struct tty_struct *tty,
const unsigned char *chars, const char *flags, size_t size)
{
int copied = 0;
do {
int space = tty_buffer_request_room(tty, size - copied);//申请临时缓存空间
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
break;
memcpy(tb->char_buf_ptr + tb->used, chars, space);
memcpy(tb->flag_buf_ptr + tb->used, flags, space);
tb->used += space;
copied += space;
chars += space;
flags += space;
/* There is a small chance that we need to split the data over
several buffers. If this is the case we must loop */
} while (unlikely(size > copied));
return copied;
}
当接收到的外界数据保存到临时缓存后通过tty_flip_buffer_push把临时缓存中的数据发送到tty->read_buf中供用户空间读取
/**
* tty_flip_buffer_push - terminal
* @tty: tty to push
*
* Queue a push of the terminal flip buffers to the line discipline. This
* function must not be called from IRQ context if tty->low_is set.
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
*
* Locking: tty buffer lock. Driver locks in low latency mode.
*/
void tty_flip_buffer_push(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
if (tty->buf.tail != NULL)
tty->buf.tail->commit = tty->buf.tail->used;
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency) //tty->low_latency //设置时直接提交到tty->read_buf 否则采用工作队列方式
flush_to_ldisc(&tty->buf.work.work);
else
schedule_delayed_work(&tty->buf.work, 1);
}
//把临时缓存中的数据提交到线路规程中去即tty->read_buf中去
/**
* flush_to_ldisc
* @work: tty structure passed from work queue.
*
* This routine is called out of the software interrupt to flush data
* from the buffer chain to the line discipline.
*
* Locking: holds tty->buf.lock to guard buffer list. Drops the lock
* while invoking the line discipline receive_buf method. The
* receive_buf method is single threaded for each tty instance.
*/
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_struct *tty =
container_of(work, struct tty_struct, buf.work.work);
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf, *head;
char *char_buf;
unsigned char *flag_buf; //tty存在线路规程并且设置了线路规程
disc = tty_ldisc_ref(tty);
if (disc == NULL) /* !TTY_LDISC */
return;
spin_lock_irqsave(&tty->buf.lock, flags);
/* So we know a flush is running */
set_bit(TTY_FLUSHING, &tty->flags); //设置数据提交标志
head = tty->buf.head;
if (head != NULL) {
tty->buf.head = NULL;
for (;;) {
int count = head->commit - head->read; //确定要提交数据的大小
if (!count) { //没有数据要提交就继续下个临时缓存
if (head->next == NULL)
break;
tbuf = head;
head = head->next;
tty_buffer_free(tty, tbuf);
continue;
}
/* Ldisc or user is trying to flush the buffers
we are feeding to the ldisc, stop feeding the
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
if (!tty->receive_room) { //tty->read_buf 空间不够时提交到工作队列中去处理
schedule_delayed_work(&tty->buf.work, 1);
break;
}
if (count > tty->receive_room)
count = tty->receive_room;
char_buf = head->char_buf_ptr + head->read;
flag_buf = head->flag_buf_ptr + head->read;
head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
disc->ops->receive_buf(tty, char_buf,
flag_buf, count); //调用线路规程的ldisc->receive_buf把数据从临时缓存提交到tty->read_buf
spin_lock_irqsave(&tty->buf.lock, flags);
}
/* Restore the queue head */
tty->buf.head = head;
}
/* We may have a deferred request to flush the input buffer,
if so pull the chain under the lock and empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags)) { //提交工作处于等待状态
__tty_buffer_flush(tty);
clear_bit(TTY_FLUSHPENDING, &tty->flags);
wake_up(&tty->read_wait); //唤醒等待读的等待队列,为提交赢得空间
}
clear_bit(TTY_FLUSHING, &tty->flags);
spin_unlock_irqrestore(&tty->buf.lock, flags);
tty_ldisc_deref(disc);
}
/**
* n_tty_receive_buf - data receive
* @tty: terminal device
* @cp: buffer
* @fp: flag buffer
* @count: characters
*
* Called by the terminal driver when a block of characters has
* been received. This function must be called from soft contexts
* not from interrupt context. The driver is responsible for making
* calls one at a time and in order (or using flush_to_ldisc)
*/
static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
const unsigned char *p;
char *f, flags = TTY_NORMAL;
int i;
char buf[64];
unsigned long cpuflags;
if (!tty->read_buf)
return;
if (tty->real_raw) { //非规范模式的数据处理,这里有两次操作是考虑环形缓存的临界状态
spin_lock_irqsave(&tty->read_lock, cpuflags);
i = min(N_TTY_BUF_SIZE - tty->read_cnt,
N_TTY_BUF_SIZE - tty->read_head);
i = min(count, i);
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
cp += i;
count -= i;
i = min(N_TTY_BUF_SIZE - tty->read_cnt,
N_TTY_BUF_SIZE - tty->read_head);
i = min(count, i);
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
spin_unlock_irqrestore(&tty->read_lock, cpuflags);
} else { //规范模式具体大家去分析
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
flags = *f++;
switch (flags) {
case TTY_NORMAL:
n_tty_receive_char(tty, *p);
break;
case TTY_BREAK:
n_tty_receive_break(tty);
break;
case TTY_PARITY:
case TTY_FRAME:
n_tty_receive_parity_error(tty, *p);
break;
case TTY_OVERRUN:
n_tty_receive_overrun(tty);
break;
default:
printk(KERN_ERR "%s: unknown flag %d/n",
tty_name(tty, buf), flags);
break;
}
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
n_tty_set_room(tty);
if (!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
}
/*
* Check the remaining room for the input canonicalization
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
if (tty->receive_room < TTY_THRESHOLD_THROTTLE) //j接收空间过小可以设置阈值
tty_throttle(tty);
}
tty设备的读操作tty_write首先对读操作的需求做检查,然后调用ldisc->write操作默认即write_chain函数。wrtie_chain通过tty->ops->write或者tty->ops->flush_chars把数据写入到设备中,两者都实现时后者有限。其中write_room函数是用来检测缓存
于空间.
/**
* tty_write - write method for tty device file
* @file: tty file pointer
* @buf: user data to write
* @count: bytes to write
* @ppos: unused
*
* Write data to a tty device via the line discipline.
*
* Locking:
* Locks the line discipline as required
* Writes to the tty driver are serialized by the atomic_write_lock
* and are then processed in chunks to the device. The line discipline
* write method will not be involked in parallel for each device
* The line discipline write method is called under the big
* kernel lock for historical reasons. New code should not rely on this.
*/
static ssize_t tty_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct tty_struct *tty;
struct inode *inode = file->f_path.dentry->d_inode;
ssize_t ret;
struct tty_ldisc *ld;
tty = (struct tty_struct *)file->private_data;
if (tty_paranoia_check(tty, inode, "tty_write")) //幻数检查及设备结构相关检测
return =-EIO ;
(!tty || !tty->ops->write ||
(test_bit(TTY_IO_ERROR, &tty->flags)))
return -EIO;
/* Short term debug to catch buggy drivers */
if (tty->ops->write_room == NULL)
printk(KERN_ERR "tty driver %s lacks a write_room method./n",
tty->driver->name);
ld = tty_ldisc_ref_wait(tty);
if (!ld->ops->write)
ret = -EIO;
else
ret = do_tty_write(ld->ops->write, tty, file, buf, count); //调用线路规程write函数即write_chain写入,下面分析
tty_ldisc_deref(ld);
return ret;
}
分配临时缓存给tty->write_buf并把用户空间数据拷贝进去然后调用线路规程写函数即write_chain。
/*
* Split writes up in sane blocksizes to avoid
* denial-of-service type attacks
*/
static inline ssize_t do_tty_write(
ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
struct tty_struct *tty,
struct file *file,
const char __user *buf,
size_t count)
{
ssize_t ret, written = 0;
unsigned int chunk;
ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
if (ret < 0)
return ret;
/*
* We chunk up writes into a temporary buffer. This
* simplifies low-level drivers immensely, since they
* don't have locking issues and user mode accesses.
*
* But if TTY_NO_WRITE_SPLIT is set, we should use a
* big chunk-size..
*
* The default chunk-size is 2kB, because the NTTY
* layer has problems with bigger chunks. It will
* claim to be able to handle more characters than
* it actually does.
*
* FIXME: This can probably go away now except that 64K chunks
* are too likely to fail unless switched to vmalloc...
*/
chunk = 2048;
if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
chunk = 65536;
if (count < chunk)
chunk = count;
/* write_buf/write_cnt is protected by the atomic_write_lock mutex */
if (tty->write_cnt < chunk) {
unsigned char *buf;
if (chunk < 1024)
chunk = 1024;
buf = kmalloc(chunk, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out;
}
kfree(tty->write_buf);
tty->write_cnt = chunk;
tty->write_buf = buf;
}
/* Do the write .. */
for (;;) {
size_t size = count;
if (size > chunk)
size = chunk;
ret = -EFAULT;
if (copy_from_user(tty->write_buf, buf, size))
break;
ret = write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
written += ret;
buf += ret;
count -= ret;
if (!count)
break;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
cond_resched();
}
if (written) {
struct inode *inode = file->f_path.dentry->d_inode;
inode->i_mtime = current_fs_time(inode->i_sb);
ret = written;
}
out:
tty_write_unlock(tty);
return ret;
}
//write_chain主要根据数据是否是经过加工的调用tty->ops->flush_chars或者tty->ops->write把数据写入设备
//当写入的空间不足时,且数据没有完全写完则调用schedule()把写操作加入写等待队列
/**
* write_chan - write function for tty
* @tty: tty device
* @file: file object
* @buf: userspace buffer pointer
* @nr: size of I/O
*
* Write function of the terminal device. This is serialized with
* respect to other write callers but not to termios changes, reads
* and other such events. We must be careful with N_TTY as the receive
* code will echo characters, thus calling driver write methods.
*
* This code must be sure never to sleep through a hangup.
*/
static ssize_t write_chan(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
const unsigned char *b = buf;
DECLARE_WAITQUEUE(wait, current);
int c;
ssize_t retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
//TOSTOP标志设置时若后台进程试图写控制台时将发出SIGTTOU信号,也即执行下面的操作
if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
retval = tty_check_change(tty); //进程控制终端相关设置
if (retval)
return retval;
}
add_wait_queue(&tty->write_wait, &wait); //代表写进程的等待队列项加入到写等待队列中
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) {
retval = -EIO;
break;
}
//OPOST设置,则操作可以选择加工过的输入
//TTY_HW_COOK_OUT若设置通知线路加工其输出的数据,否则只做拷贝
if (O_OPOST(tty) && !(test_bit(TTY_HW_COOK_OUT, &tty->flags))) {
while (nr > 0) {
//opost_block申请写入空间,并把数据根据加工要求成然后调用tty_operations->write成块写入tty设备
ssize_t num = opost_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
retval = num;
goto break_out;
}
b += num;
nr -= num;
if (nr == 0)
break;
c = *b;
if (opost(c, tty) < 0)
break;
b++; nr--;
}
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
} else {
while (nr > 0) {
c = tty->ops->write(tty, b, nr);
if (c < 0) {
retval = c;
goto break_out;
}
if (!c)
break;
b += c;
nr -= c;
}
}
if (!nr)
break;
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
break;
}
schedule();
}
break_out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
return (b - buf) ? b - buf : retval;
}
使用非阻塞的IO应用程序常调用select类函数,允许进程决定是否可以对一个或多个打开的文件进行非阻塞的读取或者写入
该功能的实现就要下面tty_poll操作来实现的。该功能的函数实现有一个关键的数据结构poll_table和一个关键的函数poll_wait
对该结构和函数这里不深入解析只知道其功能即可
/**
* tty_poll - check tty status
* @filp: file being polled
* @wait: poll wait structures to update
*
* Call the line discipline polling method to obtain the poll
* status of the device.
*
* Locking: locks called line discipline but ldisc poll method
* may be re-entered freely by other callers.
*/
//tty_poll功能的实现主要在线路规程ldisc->poll中实现即normal_poll函数
static unsigned int tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty;
struct tty_ldisc *ld;
int ret = 0;
tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll"))
return 0;
ld = tty_ldisc_ref_wait(tty);
if (ld->ops->poll)
ret = (ld->ops->poll)(tty, filp, wait); //调用normal_poll函数
tty_ldisc_deref(ld);
return ret;
}
poll方法的实现主要完成两步操作:
1)在一个或多个指示poll状态变化的等待队列上调用poll_wait.poll_wait增加一个等待队列到poll_table结构
如果没有可执行的文件IO则内核将进程在传递到该系统调用的所有文件描述符对应的等待队列上等待。
2)返回一个用来描述操作的是否可以立即执行的位掩码
/**
* normal_poll - poll method for N_TTY
* @tty: terminal device
* @file: file accessing it
* @wait: poll table
*
* Called when the line discipline is asked to poll() for data or
* for special events. This code is not serialized with respect to
* other events save open/close.
*
* This code must be sure never to sleep through a hangup.
* Called without the kernel lock held - fine
*/
static unsigned int normal_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
unsigned int mask = 0;
poll_wait(file, &tty->read_wait, wait); //增加一个等待队列到poll_table结构
poll_wait(file, &tty->write_wait, wait);
//POLLIN 设备可不阻塞地读
//POLLRDNORM 可以读"正常"数据. 一个可读的设备返回( POLLIN|POLLRDNORM ).
if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty)))
mask |= POLLIN | POLLRDNORM;
//POLLPRI 可不阻塞地读取高优先级数据(带外). 控制模式数据
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
//POLLHUP 当读这个设备的进程见到文件尾, 驱动必须设置POLLUP(hang-up).
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= POLLHUP;
if (tty_hung_up_p(file))
mask |= POLLHUP;
//唤醒tty读的最小值设置
if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
if (MIN_CHAR(tty) && !TIME_CHAR(tty))
tty->minimum_to_wake = MIN_CHAR(tty);
else
tty->minimum_to_wake = 1;
}
//POLLOUT 设备可被写入而不阻塞.
//POLLWRNORM 这个位和POLLOUT有相同的含义, 并且有时它确实是相同的数. 一个可写的设备返回( POLLOUT|POLLWRNORM).
if (tty->ops->write && !tty_is_writelocked(tty) &&
tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
tty_write_room(tty) > 0)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
套接字和终端通常都具有异步通知机制,即应用程序可以在数据可用的时候接收到一个信号SIGIO而不需要去轮询关注的数据。但是当对于多个数据源时,应用不能区分SIGIO的来源。为了实现异步通知机制,应用程序需要为数据源设置一个属主进程即用fcntl的F_SETOWN来设置属主进程,以及用fcntl的F_SETFL设置FASYNC标志来开启文件的异步通知机制。
终端设备是tty设备的一种,其异步通知机制的实现在驱动中是分布的:
1)首先在F_SETOWN被调用时对filp->owner赋值。
2)文件打开时默认FASYNC标志是清除的,当设置这个标志式调用fasync方法。fasync方法依赖于struct fasync_struct 结构和fasync_helper函数。下面具体分析该函数:
static DEFINE_RWLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
/*
* fasync_helper() is used by some character device drivers (mainly mice)
* to set up the fasync queue. It returns negative on error, 0 if it did
* no changes and positive if it added/deleted the entry.
*/
//fasync_helper从相关的进程列表中增加或删除文件,on为0表示删除,非0表示增加
int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
struct fasync_struct *new = NULL;
int result = 0;
//若在进程列表中增加文件则从后备高速缓存中分配一个struct fasync_struct 结构
if (on) {
new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
if (!new)
return -ENOMEM;
}
write_lock_irq(&fasync_lock);
//遍历进程的异步通知文件列表,若存在相关文件且为增加则删除前面分配的fasync_struct 对象,若为删除则删除文件对应
//的fasync_struct对象
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file == filp) {
if(on) {
fa->fa_fd = fd;
kmem_cache_free(fasync_cache, new);
} else {
*fp = fa->fa_next;
kmem_cache_free(fasync_cache, fa);
result = 1;
}
goto out;
}
}
//列表中不存在相关文件则初始化faync_struct 对象并加入到列表头
if (on) {
new->magic = FASYNC_MAGIC;
new->fa_file = filp;
new->fa_fd = fd;
new->fa_next = *fapp;
*fapp = new;
result = 1;
}
out:
write_unlock_irq(&fasync_lock);
return result;
}
//下面再来看看tty核心中fasync方法tty_fasync函数
static int tty_fasync(int fd, struct file *filp, int on)
{
struct tty_struct *tty;
unsigned long flags;
int retval = 0;
lock_kernel();
tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
goto out;
retval = fasync_helper(fd, filp, on, &tty->fasync); //增加或删除文件到进程列表
if (retval <= 0)
goto out;
if (on) {
enum pid_type type;
struct pid *pid;
if (!waitqueue_active(&tty->read_wait))
tty->minimum_to_wake = 1;
spin_lock_irqsave(&tty->ctrl_lock, flags);
if (tty->pgrp) {
pid = tty->pgrp;
type = PIDTYPE_PGID;
} else {
pid = task_pid(current);
type = PIDTYPE_PID;
}
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
retval = __f_setown(filp, pid, type, 0); //设置文件的属主
if (retval)
goto out;
} else {
if (!tty->fasync && !waitqueue_active(&tty->read_wait))
tty->minimum_to_wake = N_TTY_BUF_SIZE;
}
retval = 0;
out:
unlock_kernel();
return retval;
}
3)上面的步骤完成后就是当数据到达时给应用程序发送SIGIO信号,这一步是一般分布在数据的读写操作中,我们具体分析内核中的辅助函数kill_fasync其作用是当数据到达时通知所有相关进程
//sig表示要发送的信号,band表示模式读为POLL_IN 写为POLL_OUT
void kill_fasync(struct fasync_struct **fp, int sig, int band)
{
/* First a quick test without locking: usually
* the list is empty.
*/
if (*fp) {
read_lock(&fasync_lock);
/* reread *fp after obtaining the lock */
__kill_fasync(*fp, sig, band);
read_unlock(&fasync_lock);
}
}
//遍历struct fasync_struct 链表并对相关进程发送信号
void __kill_fasync(struct fasync_struct *fa, int sig, int band)
{
while (fa) {
struct fown_struct * fown;
if (fa->magic != FASYNC_MAGIC) {
printk(KERN_ERR "kill_fasync: bad magic number in "
"fasync_struct!/n");
return;
}
fown = &fa->fa_file->f_owner;
/* Don't send SIGURG to processes which have not set a
queued signum: SIGURG has its own default signalling
mechanism. */
if (!(sig == SIGURG && fown->signum == 0))
send_sigio(fown, fa->fa_fd, band);
fa = fa->fa_next;
}
}
4)最好在关闭进程时再把文件从进程列表中清除即tty_fysnc(-1,flip,0);
因为异步通知机制是分散的这节中就主要分析其实现机制,实现源码比较容易理解就没有列举出全部源码
tty_ioctl和tty_compat_ioctl都是对设备的控制操作,比较容易理解这里就不做分析,有兴趣的读者可以自己分析。其中tty_compat_ioctl使用在用户空间为32位模式而内核空间为64位模式时将64位转化为32位的操作方式。
剩下的就是最后的操作,当关闭tty设备是调用的tty_release操作,主要是释放前面分配的资费做tty_open的反操作
/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
*
* Called the last time each file handle is closed that references
* this tty. There may however be several such references.
*
* Locking:
* Takes bkl. See release_dev
*/
static int tty_release(struct inode *inode, struct file *filp)
{
lock_kernel();
release_dev(filp);
unlock_kernel();
return 0;
}
/*
* Even releasing the tty structures is a tricky business.. We have
* to be very careful that the structures are all released at the
* same time, as interrupts might otherwise get the wrong pointers.
*
* WSH 09/09/97: rewritten to avoid some nasty race conditions that could
* lead to double frees or releasing memory still in use.
*/
static void release_dev(struct file *filp)
{
struct tty_struct *tty, *o_tty;
int pty_master, tty_closing, o_tty_closing, do_sleep;
int devpts;
int idx;
char buf[64];
tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode,
"release_dev"))
return;
check_tty_count(tty, "release_dev"); //对tty设备打开次数进行统计,tty设备的每次打开tty->files创建一个文件描述符
tty_fasync(-1, filp, 0); //从文件队列中删去异步通知结构struct fasync_struct
idx = tty->index;
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER); //设备是伪终端主设备
devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
o_tty = tty->link;
//对设备在驱动对应对对象进行检查
#ifdef TTY_PARANOIA_CHECK
if (idx < 0 || idx >= tty->driver->num) {
printk(KERN_DEBUG "release_dev: bad idx when trying to "
"free (%s)/n", tty->name);
return;
}
if (!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
if (tty != tty->driver->ttys[idx]) {
printk(KERN_DEBUG "release_dev: driver.table[%d] not tty "
"for (%s)/n", idx, tty->name);
return;
}
if (tty->termios != tty->driver->termios[idx]) {
printk(KERN_DEBUG "release_dev: driver.termios[%d] not termios "
"for (%s)/n",
idx, tty->name);
return;
}
if (tty->termios_locked != tty->driver->termios_locked[idx]) {
printk(KERN_DEBUG "release_dev: driver.termios_locked[%d] not "
"termios_locked for (%s)/n",
idx, tty->name);
return;
}
}
#endif
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "release_dev of %s (tty count=%d)...",
tty_name(tty, buf), tty->count);
#endif
//伪终端设备对等端的检查
#ifdef TTY_PARANOIA_CHECK
if (tty->driver->other &&
!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
if (o_tty != tty->driver->other->ttys[idx]) {
printk(KERN_DEBUG "release_dev: other->table[%d] "
"not o_tty for (%s)/n",
idx, tty->name);
return;
}
if (o_tty->termios != tty->driver->other->termios[idx]) {
printk(KERN_DEBUG "release_dev: other->termios[%d] "
"not o_termios for (%s)/n",
idx, tty->name);
return;
}
if (o_tty->termios_locked !=
tty->driver->other->termios_locked[idx]) {
printk(KERN_DEBUG "release_dev: other->termios_locked["
"%d] not o_termios_locked for (%s)/n",
idx, tty->name);
return;
}
if (o_tty->link != tty) {
printk(KERN_DEBUG "release_dev: bad pty pointers/n");
return;
}
}
#endif
if (tty->ops->close)
tty->ops->close(tty, filp); //调用tty->ops->close释放相应资源
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
* wait queues and kick everyone out _before_ actually starting to
* close. This ensures that we won't block while releasing the tty
* structure.
*
* The test for the o_tty closing is necessary, since the master and
* slave sides may close in any order. If the slave side closes out
* first, its count will be one, since the master side holds an open.
* Thus this test wouldn't be triggered at the time the slave closes,
* so we do it now.
*
* Note that it's possible for the tty to be opened again while we're
* flushing out waiters. By recalculating the closing flags before
* each iteration we avoid any problems.
*/
while (1) { //循环操作,知道tty设备的的读写操作都完成才退出循环
/* Guard against races with tty->count changes elsewhere and
opens on /dev/tty */
mutex_lock(&tty_mutex);
tty_closing = tty->count <= 1; //是否执行真正的关闭
o_tty_closing = o_tty &&
(o_tty->count <= (pty_master ? 1 : 0));
do_sleep = 0;
if (tty_closing) { //在设备关闭前先完成等待的读写操作
if (waitqueue_active(&tty->read_wait)) {
wake_up(&tty->read_wait);
do_sleep++;
}
if (waitqueue_active(&tty->write_wait)) {
wake_up(&tty->write_wait);
do_sleep++;
}
}
if (o_tty_closing) {
if (waitqueue_active(&o_tty->read_wait)) {
wake_up(&o_tty->read_wait);
do_sleep++;
}
if (waitqueue_active(&o_tty->write_wait)) {
wake_up(&o_tty->write_wait);
do_sleep++;
}
}
if (!do_sleep) //完成所有的读写操作
break;
printk(KERN_WARNING "release_dev: %s: read/write wait queue "
"active!/n", tty_name(tty, buf));
mutex_unlock(&tty_mutex);
schedule();
}
/*
* The closing flags are now consistent with the open counts on
* both sides, and we've completed the last operation that could
* block, so it's safe to proceed with closing.
*/
if (pty_master) {
if (--o_tty->count < 0) {
printk(KERN_WARNING "release_dev: bad pty slave count "
"(%d) for %s/n",
o_tty->count, tty_name(o_tty, buf));
o_tty->count = 0;
}
}
if (--tty->count < 0) {
printk(KERN_WARNING "release_dev: bad tty->count (%d) for %s/n",
tty->count, tty_name(tty, buf));
tty->count = 0;
}
/*
* We've decremented tty->count, so we need to remove this file
* descriptor off the tty->tty_files list; this serves two
* purposes:
* - check_tty_count sees the correct number of file descriptors
* associated with this tty.
* - do_tty_hangup no longer sees this file descriptor as
* something that needs to be handled for hangups.
*/
file_kill(filp); //关闭文件描述符
filp->private_data = NULL;
/*
* Perform some housekeeping before deciding whether to return.
*
* Set the TTY_CLOSING flag if this was the last open. In the
* case of a pty we may have to wait around for the other side
* to close, and TTY_CLOSING makes sure we can't be reopened.
*/
if (tty_closing)
set_bit(TTY_CLOSING, &tty->flags);
if (o_tty_closing)
set_bit(TTY_CLOSING, &o_tty->flags);
/*
* If _either_ side is closing, make sure there aren't any
* processes that still think tty or o_tty is their controlling
* tty.
*/
if (tty_closing || o_tty_closing) {
read_lock(&tasklist_lock);
session_clear_tty(tty->session);
if (o_tty)
session_clear_tty(o_tty->session);
read_unlock(&tasklist_lock);
}
mutex_unlock(&tty_mutex);
/* check whether both sides are closing ... */
if (!tty_closing || (o_tty && !o_tty_closing))
return;
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "freeing tty structure...");
#endif
/*
* Ask the line discipline code to release its structures
*/
tty_ldisc_release(tty, o_tty);
/*
* The release_tty function takes care of the details of clearing
* the slots and preserving the termios structure.
*/
release_tty(tty, idx);
/* Make this pty number available for reallocation */
if (devpts)
devpts_kill_index(idx);
}
自此我们分析了linux操作系统中/dev/tty /dev/tty0 /dev/console等设备作为字符设备的驱动程序,同时tty核心也为其他tty设备驱动的注册提供了一个通用的接口和一个通用的管理平台,为其他tty设备驱动的实现提供了一个通用层,下一节中我们将分析tty设备驱动的管理以及如何利用tty核心去实现一个tty设备驱动。
linux tty 核心是构建在标准字符设备驱动之上,提供一系列功能,作为接口为终端类型设备驱动所使用。在前面的分析中我们看到 tty核心控制了通过tty设备的数据流,格式化了这些数据流,所以常规tty驱动的工作不必考虑常规操作方法与用户空间的交互,同时不同的线路规程可以虚拟的插入到任何tty设备上。从前面的各种方法分析上,我们知道tty核心从用户空间得到数据把数据发送到tty线路规程,线路规程驱动把数据传递给tty驱动程序,同时tty设备从硬件那里接收到的数据传递给tty线路规程,线路规程再传递给tty核心。所以tty驱动程序的主要任务放在处理流向或流出硬件的数据上。
tty驱动程序在内核中用struct tty_driver表示,在tty核心中有一个tty_drivers链表,任何tty设备驱动程序都挂在在这个链表上。从tty_open的操作中就有一个get_tty_driver函数就是根据设备号在tty_drivers链表中找到相应的设备驱动程序。
tty类设备主要分为三类:控制台、伪终端、串口。其中前两者前面已经介绍了内核为我们实现好了,所以以后我们写tty设备驱动程序主要就是写串口的设备驱动程序。tty驱动程序通俗的说就是分配一个tty_driver结构初始化后并把其挂载到tty_dirvers链表上。下面我们看看struct tty_driver结构及其成员。
extern struct list_head tty_drivers;
struct tty_driver {
int magic; /* magic number for this structure */
struct cdev cdev; //tty驱动是一个字符设备驱动
struct module *owner;
const char *driver_name; //驱动程序名字
const char *name; //设备名称的一部分
int name_base; /* offset of printed name */
int major; /* major device number */ //主设备号
int minor_start; /* start of minor device number */ //次设备号的起始
int minor_num; /* number of *possible* devices */ //设备号的个数
int num; /* number of devices allocated */ //设备个数
short type; /* type of tty driver */
short subtype; /* subtype of tty driver */
struct ktermios init_termios; /* Initial termios */ //初始化设备的struct ktermmios结构
int flags; /* tty driver flags */
int refcount; /* for loadable tty drivers */ //引用计数
struct proc_dir_entry *proc_entry; /* /proc fs entry */
struct tty_driver *other; /* only used for the PTY driver */ //伪终端时指向通信另一端
/*
* Pointer to the tty data structures
*/
struct tty_struct **ttys; //驱动所属设备的tty_struct 结构
struct ktermios **termios; //每个设备相应的
struct ktermios **termios_locked;
void *driver_state; //驱动状态
/*
* Driver methods
*/
const struct tty_operations *ops; //驱动程序的操作函数(驱动的主要工作)
struct list_head tty_drivers;
};
下面介绍实现一个tty设备驱动的主要流程:
1)首先调用alloc_tty_driver动态分配一个tty_driver结构,lines表示驱动中设备的个数
struct tty_driver *alloc_tty_driver(int lines)
{
struct tty_driver *driver;
driver = kzalloc(sizeof(struct tty_driver), GFP_KERNEL);
if (driver) {
driver->magic = TTY_DRIVER_MAGIC;
driver->num = lines;
/* later we'll move allocation of tables here */
}
return driver;
}
2)初始化tty_driver中各个成员 其中主要是 init_termios成员和ops成员 。如果用户在端口初始化以前使用端口,该变量用来提供一系列安全的设置值,成员的具体含义在termios手册中能找到这里就不具体介绍。其次就是tty设备驱动最根本的操作函数集ops,并用set_tty_operations和驱动关联在前面的分析时我们已经知道这些回调函数的具体调用位置。
struct tty_operations {
int (*open)(struct tty_struct * tty, struct file * filp);
void (*close)(struct tty_struct * tty, struct file * filp);
int (*write)(struct tty_struct * tty,
const unsigned char *buf, int count);
int (*put_char)(struct tty_struct *tty, unsigned char ch);
void (*flush_chars)(struct tty_struct *tty);
int (*write_room)(struct tty_struct *tty);
int (*chars_in_buffer)(struct tty_struct *tty);
int (*ioctl)(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg);
long (*compat_ioctl)(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
void (*throttle)(struct tty_struct * tty);
void (*unthrottle)(struct tty_struct * tty);
void (*stop)(struct tty_struct *tty);
void (*start)(struct tty_struct *tty);
void (*hangup)(struct tty_struct *tty);
int (*break_ctl)(struct tty_struct *tty, int state);
void (*flush_buffer)(struct tty_struct *tty);
void (*set_ldisc)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, int timeout);
void (*send_xchar)(struct tty_struct *tty, char ch);
int (*read_proc)(char *page, char **start, off_t off,
int count, int *eof, void *data);
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
int (*resize)(struct tty_struct *tty, struct tty_struct *real_tty,
struct winsize *ws);
#ifdef CONFIG_CONSOLE_POLL
int (*poll_init)(struct tty_driver *driver, int line, char *options);
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
};
3)初始化好tty_driver就调用tty_register_driver在内核中注册一个字符设备驱动并把tty_driver注册到tty_drivers链表中
/*
* Called by a tty driver to register itself.
*/
int tty_register_driver(struct tty_driver *driver)
{
int error;
int i;
dev_t dev;
void **p = NULL;
if (driver->flags & TTY_DRIVER_INSTALLED)
return 0;
if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM) && driver->num) {
p = kzalloc(driver->num * 3 * sizeof(void *), GFP_KERNEL);
if (!p)
return -ENOMEM;
}
//字符设备驱动设备分配的标准步骤
if (!driver->major) {
error = alloc_chrdev_region(&dev, driver->minor_start,
driver->num, driver->name);
if (!error) {
driver->major = MAJOR(dev);
driver->minor_start = MINOR(dev);
}
} else {
dev = MKDEV(driver->major, driver->minor_start);
error = register_chrdev_region(dev, driver->num, driver->name);
}
if (error < 0) {
kfree(p);
return error;
}
//为每个设备的及其ktermios分配指针空间
if (p) {
driver->ttys = (struct tty_struct **)p;
driver->termios = (struct ktermios **)(p + driver->num);
driver->termios_locked = (struct ktermios **)
(p + driver->num * 2);
} else {
driver->ttys = NULL;
driver->termios = NULL;
driver->termios_locked = NULL;
}
//字符设备驱动注册
cdev_init(&driver->cdev, &tty_fops);
driver->cdev.owner = driver->owner;
error = cdev_add(&driver->cdev, dev, driver->num);
if (error) {
unregister_chrdev_region(dev, driver->num);
driver->ttys = NULL;
driver->termios = driver->termios_locked = NULL;
kfree(p);
return error;
}
//tty_driver加入链表
mutex_lock(&tty_mutex);
list_add(&driver->tty_drivers, &tty_drivers);
mutex_unlock(&tty_mutex);
//动态加载设备时,注册驱动控制的设备
if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) {
for (i = 0; i < driver->num; i++)
tty_register_device(driver, i, NULL);
}
proc_tty_register_driver(driver);
return 0;
}
至此设备驱动注册成功,当卸载模块时调用tty_unregister_driver完成相反的工作