1,从close系统调用开始
从用户层close系统调用进到内核层,linux系统调用用一些列宏来定义,close位于fs/open.c
SYSCALL_DEFINE1(close, unsigned int, fd)
{
int retval = __close_fd(current->files, fd);
/* can't restart close syscall because file table entry was cleared */
if (unlikely(retval == -ERESTARTSYS ||
retval == -ERESTARTNOINTR ||
retval == -ERESTARTNOHAND ||
retval == -ERESTART_RESTARTBLOCK))
retval = -EINTR;
return retval;
}
int __close_fd(struct files_struct *files, unsigned fd)
{
struct file *file;
struct fdtable *fdt;
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (fd >= fdt->max_fds)
goto out_unlock;
file = fdt->fd[fd];
if (!file)
goto out_unlock;
rcu_assign_pointer(fdt->fd[fd], NULL);
__clear_close_on_exec(fd, fdt);
__put_unused_fd(files, fd);
spin_unlock(&files->file_lock);
return filp_close(file, files);
out_unlock:
spin_unlock(&files->file_lock);
return -EBADF;
}
传递了两个参数给__close_fd函数,第一个参数current即当前进程指向的一个file_struct结构体,定义如下
/* * Open file table structure */struct files_struct { /* * read mostly part */atomic_t count;struct fdtable __rcu *fdt;struct fdtable fdtab; /* * written part on a separate cache line in SMP */spinlock_t file_lock ____cacheline_aligned_in_smp;int next_fd;unsigned long close_on_exec_init[1];unsigned long open_fds_init[1];struct file __rcu * fd_array[NR_OPEN_DEFAULT];};每个进程打开的file都用这个结构体管理。 其中*fdt和fdtab是fdtable结构体,此结构体里有一个成员struct file __rcu **fd; 就是当前进程打开的文件结构体组成的数组
struct fdtable { unsigned int max_fds; struct file __rcu **fd; /* current fd array */ unsigned long *close_on_exec; unsigned long *open_fds; struct rcu_head rcu;};
所以通过这两条代码可以获取到实际要close的文件的struct file
fdt = files_fdtable(files);
file = fdt->fd[fd];
接下来调用关系就是filp_close()----->fput()---->__fput()------>file->f_op->release(). 就来到了tty层的release
2,从tty层到uart driver的clsoe
/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
*
* Called the last time each file handle is closed that references
* this tty. There may however be several such references.
*
* Locking:
* Takes bkl. See tty_release_dev
*
* Even releasing the tty structures is a tricky business.. We have
* to be very careful that the structures are all released at the
* same time, as interrupts might otherwise get the wrong pointers.
*
* WSH 09/09/97: rewritten to avoid some nasty race conditions that could
* lead to double frees or releasing memory still in use.
*/
int tty_release(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = file_tty(filp);
.......
if (tty->ops->close)
tty->ops->close(tty, filp);
.......
return 0;
}
那么这个close具体是在哪里实现?是怎么初始化到tty_struct结构体呢?
tty_struct的初始化时在tty_open中不是在tty driver的register中。
/drivers/tty/tty_io.c
tty_open()----->tty_init_dev()---->initialize_tty_struct()
tty->ops = driver->ops;
tty_struct 的ops即tty_driver的ops。
tty_driver这边是在serial层注册的,就是uart驱动初始化时,注册一个tty_driver,并分配ops
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal;
int i, retval;
BUG_ON(drv->state);
/*
* Maybe we should be using a slab cache for this, especially if
* we have a large number of ports to handle.
*/
drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
if (!drv->state)
goto out;
normal = alloc_tty_driver(drv->nr);
if (!normal)
goto out_kfree;
drv->tty_driver = normal;
normal->driver_name = drv->driver_name;
normal->name = drv->dev_name;
normal->major = drv->major;
normal->minor_start = drv->minor;
normal->type = TTY_DRIVER_TYPE_SERIAL;
normal->subtype = SERIAL_TYPE_NORMAL;
normal->init_termios = tty_std_termios;
normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
normal->driver_state = drv;
tty_set_operations(normal, &uart_ops);
/*
* Initialise the UART state(s).
*/
for (i = 0; i < drv->nr; i++) {
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
tty_port_init(port);
port->ops = &uart_port_ops;
port->close_delay = HZ / 2; /* .5 seconds */
port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
if (retval >= 0)
return retval;
for (i = 0; i < drv->nr; i++)
tty_port_destroy(&drv->state[i].port);
put_tty_driver(normal);
out_kfree:
kfree(drv->state);
out:
return -ENOMEM;
}
这个ops即 uart_ops. 这样,这个close已经从tty层到了serial层。在drivers/tty/serial/serial_core.c
然后就比较清晰了。
uart_close()----->uart_shutdown()----->uport->ops->shutdown()
static void uart_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free
* the irq here so the queue might never be woken up. Note
* that we won't end up waiting on delta_msr_wait again since
* any outstanding file descriptors should be pointing at
* hung_up_tty_fops now.
*/
wake_up_interruptible(&port->delta_msr_wait);
/*
* Free the IRQ and disable the port.
*/
uport->ops->shutdown(uport);
/*
* Ensure that the IRQ handler isn't running on another CPU.
*/
synchronize_irq(uport->irq);
}
这里的uport->ops 即uart实际驱动层的struct_ops .由于tty层其实和serial层已经做了大部分工作,到驱动层只剩下很少的硬件相关操作。