input事件处理流程 input driver -> input core ->event handler -> userspace 给应用程序。
void input_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
unsigned long flags;
if (is_event_supported(type, dev->evbit, EV_MAX)) {
spin_lock_irqsave(&dev->event_lock, flags);
input_handle_event(dev, type, code, value);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
}
先判断type是否支持,接着进入处理核心。
static void input_handle_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
int disposition;
disposition = input_get_disposition(dev, type, code, value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
if (!dev->vals)
return;
if (disposition & INPUT_PASS_TO_HANDLERS) {
struct input_value *v;
if (disposition & INPUT_SLOT) {
v = &dev->vals[dev->num_vals++];
v->type = EV_ABS;
v->code = ABS_MT_SLOT;
v->value = dev->mt->slot;
}
v = &dev->vals[dev->num_vals++];
v->type = type;
v->code = code;
v->value = value;
}
if (disposition & INPUT_FLUSH) {
if (dev->num_vals >= 2)
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
} else if (dev->num_vals >= dev->max_vals - 2) {
dev->vals[dev->num_vals++] = input_value_sync;
input_pass_values(dev, dev->vals, dev->num_vals);
dev->num_vals = 0;
}
}
input_get_disposition()获得事件处理者身份。INPUT_PASS_TO_HANDLERS表示交给input hardler处理,INPUT_PASS_TO_DEVICE表示交给input device处理,INPUT_FLUSH表示需要handler立即处理。如果事件正常一般返回的是INPUT_PASS_TO_HANDLERS,只有code为SYN_REPORT时返回INPUT_PASS_TO_HANDLERS | INPUT_FLUSH。需要说明的是下面一段:
case EV_ABS:
if (is_event_supported(code, dev->absbit, ABS_MAX))
disposition = input_handle_abs_event(dev, code, &value);
static int input_handle_abs_event(struct input_dev *dev,
unsigned int code, int *pval)
{
struct input_mt *mt = dev->mt;
bool is_mt_event;
int *pold;
if (code == ABS_MT_SLOT) {
/*
* "Stage" the event; we'll flush it later, when we
* get actual touch data.
*/
if (mt && *pval >= 0 && *pval < mt->num_slots)
mt->slot = *pval;
return INPUT_IGNORE_EVENT;
}
is_mt_event = input_is_mt_value(code);
if (!is_mt_event) {
pold = &dev->absinfo[code].value;
} else if (mt) {
pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
} else {
/*
* Bypass filtering for multi-touch events when
* not employing slots.
*/
pold = NULL;
}
if (pold) {
*pval = input_defuzz_abs_event(*pval, *pold,
dev->absinfo[code].fuzz);
if (*pold == *pval)
return INPUT_IGNORE_EVENT;
*pold = *pval;
}
/* Flush pending "slot" event */
if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
}
return INPUT_PASS_TO_HANDLERS;
}
首先说明的是过滤处理,如果code不是ABS_MT_FIRST到ABS_MT_LAST之间,那就是单点上报(比如ABS_X);否则符合多点上报;它们的事件值value存储的位置是不一样的,所以取pold指针的方式是不一样的。(这个pold是过滤之后存的*pold = *pval;)。input_defuzz_abs_event()会对比当前value和上一次的old value;如果一样就过滤掉;不产生事件,但是只针对type B进行处理;type B的framework层sync后是不会清除slot的,所以要确保上报数据的准确;type A的sync后会清除slot。
static unsigned int input_to_handler(struct input_handle *handle,
struct input_value *vals, unsigned int count)
{
struct input_handler *handler = handle->handler;
struct input_value *end = vals;
struct input_value *v;
for (v = vals; v != vals + count; v++) {
if (handler->filter &&
handler->filter(handle, v->type, v->code, v->value))
continue;
if (end != v)
*end = *v;
end++;
}
count = end - vals;
if (!count)
return 0;
if (handler->events)
handler->events(handle, vals, count);
else if (handler->event)
for (v = vals; v != end; v++)
handler->event(handle, v->type, v->code, v->value);
return count;
}
首先过滤,相同事件都合并,然后交给handler->events或者handler->event处理;前者是成批处理,后者是单一事件处理。
static void evdev_events(struct input_handle *handle,
const struct input_value *vals, unsigned int count)
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
ktime_t time_mono, time_real;
time_mono = ktime_get();
time_real = ktime_sub(time_mono, ktime_get_monotonic_offset());
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
evdev_pass_values(client, vals, count, time_mono, time_real);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
evdev_pass_values(client, vals, count,
time_mono, time_real);
rcu_read_unlock();
}
如果指定了client就用指定的,这个client是指evdev_client,否则遍历evdev->client_list,放到client_list的client中。client_list中的client是什么时候挂上的?(evdev_open()->evdev_attach_client()->list_add_tail_rcu(&client->node, &evdev->client_list))接下来evdev_pass_values会把事情交给client处理。
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
ktime_t mono, ktime_t real)
{
struct evdev *evdev = client->evdev;
const struct input_value *v;
struct input_event event;
bool wakeup = false;
event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
mono : real);
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
for (v = vals; v != vals + count; v++) {
event.type = v->type;
event.code = v->code;
event.value = v->value;
__pass_event(client, &event);
if (v->type == EV_SYN && v->code == SYN_REPORT)
wakeup = true;
}
spin_unlock(&client->buffer_lock);
if (wakeup)
wake_up_interruptible(&evdev->wait);
}
此时input_value需要转换为input_event;目的是为了添加时间信息。每个input_event都会__pass_event;收到SYNC后会设置wakeup标志,唤醒evdev->wait。这个wait也是connect时候初始化的,init_waitqueue_head(&evdev->wait);唤醒它干什么呢?因为用户如果读不到数据根据open标志O_NONBLOCK会发生阻塞;就是需要client中有数据时来唤醒。
static void __pass_event(struct evdev_client *client,
const struct input_event *event)
{
client->buffer[client->head++] = *event;
client->head &= client->bufsize - 1;
if (unlikely(client->head == client->tail)) {
/*
* This effectively "drops" all unconsumed events, leaving
* EV_SYN/SYN_DROPPED plus the newest event in the queue.
*/
client->tail = (client->head - 2) & (client->bufsize - 1);
client->buffer[client->tail].time = event->time;
client->buffer[client->tail].type = EV_SYN;
client->buffer[client->tail].code = SYN_DROPPED;
client->buffer[client->tail].value = 0;
client->packet_head = client->tail;
if (client->use_wake_lock)
wake_unlock(&client->wake_lock);
}
if (event->type == EV_SYN && event->code == SYN_REPORT) {
client->packet_head = client->head;
if (client->use_wake_lock)
wake_lock(&client->wake_lock);
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
}
client中一些字段的含义:
packet_head:一个数据包头;
head:动态索引,每加入一个event到buffer中,head++;
tail:也是动态索引,每取出一个buffer中的event,tail++;
buffer:event存储器,是一个环形区域。
__pass_event会把数据放到client->buffer中。
个人猜想:client->bufsize是一个2的次幂值,client->head &= client->bufsize - 1是为防止溢出,client->head == client->tail时,说明用户读的太快了,读的也是无效的。如果收到SYNC说明一个包结束了,更新一个包头packet_head,再上个锁wake_lock(&client->wake_lock);,这把锁在用户读取的时候会打开;向内核发送SIGIO,POLL_IN表示可读。
事件的传递过程:首先在驱动层调用inport_report_abs,然后调用input core层的input_event,input_event调用了input_handle_event对事件进行分派,调用input_pass_event,在这里他会把事件传递给具体的handler层,然后在相应handler的event处理函数中,封装一个event,然后把它投入evdev的那个client_list上的client的事件buffer中,等待用户空间来读取。
static const struct file_operations evdev_fops = {
.owner = THIS_MODULE,
.read = evdev_read,
.write = evdev_write,
.poll = evdev_poll,
.open = evdev_open,
.release = evdev_release,
.unlocked_ioctl = evdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = evdev_ioctl_compat,
#endif
.fasync = evdev_fasync,
.flush = evdev_flush,
.llseek = no_llseek,
};
static int evdev_open(struct inode *inode, struct file *file)
{
struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
struct evdev_client *client;
int error;
client = kzalloc(sizeof(struct evdev_client) +
bufsize * sizeof(struct input_event),
GFP_KERNEL);
if (!client)
return -ENOMEM;
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
snprintf(client->name, sizeof(client->name), "%s-%d",
dev_name(&evdev->dev), task_tgid_vnr(current));
client->evdev = evdev;
evdev_attach_client(evdev, client);
error = evdev_open_device(evdev);
if (error)
goto err_free_client;
file->private_data = client;
nonseekable_open(inode, file);
return 0;
err_free_client:
evdev_detach_client(evdev, client);
kfree(client);
return error;
}
evdev结构是怎么找到的?已知该结构中的cdev指针,找到这个结构;说明初始化evdev的时候,evdev->cdev就对应这个eventx;这也是connect做的事情。bufsize就是max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS, EVDEV_MIN_BUFFER_SIZE);之后转化成2的次幂。终于看到client登场了。evdev_attach_client()->list_add_tail_rcu(&client->node, &evdev->client_list);把自己挂到了evdev->client_list上。这样,pass event的时候才能找到对应的client。
static int evdev_open_device(struct evdev *evdev)
{
int retval;
retval = mutex_lock_interruptible(&evdev->mutex);
if (retval)
return retval;
if (!evdev->exist)
retval = -ENODEV;
else if (!evdev->open++) {
retval = input_open_device(&evdev->handle);
if (retval)
evdev->open--;
}
mutex_unlock(&evdev->mutex);
return retval;
}
显然evdev->exist = true;也是connect时候做的事情。如果open成功会更新evdev->open计数。
int input_open_device(struct input_handle *handle)
{
struct input_dev *dev = handle->dev;
int retval;
retval = mutex_lock_interruptible(&dev->mutex);
if (retval)
return retval;
if (dev->going_away) {
retval = -ENODEV;
goto out;
}
handle->open++;
if (!dev->users++ && dev->open)
retval = dev->open(dev);
if (retval) {
dev->users--;
if (!--handle->open) {
/*
* Make sure we are not delivering any more events
* through this handle
*/
synchronize_rcu();
}
}
out:
mutex_unlock(&dev->mutex);
return retval;
}
static ssize_t evdev_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
size_t read = 0;
int error;
if (count != 0 && count < input_event_size())
return -EINVAL;
for (;;) {
if (!evdev->exist)
return -ENODEV;
if (client->packet_head == client->tail &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
/*
* count == 0 is special - no IO is done but we check
* for error conditions (see above).
*/
if (count == 0)
break;
while (read + input_event_size() <= count &&
evdev_fetch_next_event(client, &event)) {
if (input_event_to_user(buffer + read, &event))
return -EFAULT;
read += input_event_size();
}
if (read)
break;
if (!(file->f_flags & O_NONBLOCK)) {
error = wait_event_interruptible(evdev->wait,
client->packet_head != client->tail ||
!evdev->exist);
if (error)
return error;
}
}
return read;
}
static int evdev_fetch_next_event(struct evdev_client *client,
struct input_event *event)
{
int have_event;
spin_lock_irq(&client->buffer_lock);
have_event = client->packet_head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
if (client->use_wake_lock &&
client->packet_head == client->tail)
wake_unlock(&client->wake_lock);
}
spin_unlock_irq(&client->buffer_lock);
return have_event;
}
如果packet_head和tail不等,说明循环buffer里有数据,直接取出来,别忘了更新动态索引client->tail++。如果首尾相接了,说明数据读完了。wake_unlock(&client->wake_lock);;因为_pass_event中添加_event的时候上了一把锁wake_lock(&client->wake_lock);。