struct spi_device { struct device dev; struct spi_master *master; u32 max_speed_hz; u8 chip_select; u8 mode; #define SPI_CPHA 0x01 /* clock phase */ #define SPI_CPOL 0x02 /* clock polarity */ #define SPI_MODE_0 (0|0) /* (original MicroWire) */ #define SPI_MODE_1 (0|SPI_CPHA) #define SPI_MODE_2 (SPI_CPOL|0) #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) #define SPI_CS_HIGH 0x04 /* chipselect active high? */ #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ #define SPI_3WIRE 0x10 /* SI/SO signals shared */ #define SPI_LOOP 0x20 /* loopback mode */ #define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */ #define SPI_READY 0x80 /* slave pulls low to pause */ u8 bits_per_word; int irq; void *controller_state; void *controller_data; char modalias[SPI_NAME_SIZE]; int cs_gpio; /* chip select gpio */ /* * likely need more hooks for more protocol options affecting how * the controller talks to each chip, like: * - memory packing (12 bit samples into low bits, others zeroed) * - priority * - drop chipselect after each word * - chipselect delays * - ... */ };
spi从设备,相当于i2c_client。它需要依附一个spi_master。
struct spi_master { struct device dev; struct list_head list; /* other than negative (== assign one dynamically), bus_num is fully * board-specific. usually that simplifies to being SOC-specific. * example: one SOC has three SPI controllers, numbered 0..2, * and one board's schematics might show it using SPI-2. software * would normally use bus_num=2 for that controller. */ s16 bus_num; /* chipselects will be integral to many controllers; some others * might use board-specific GPIOs. */ u16 num_chipselect; /* some SPI controllers pose alignment requirements on DMAable * buffers; let protocol drivers know about these requirements. */ u16 dma_alignment; /* spi_device.mode flags understood by this controller driver */ u16 mode_bits; /* other constraints relevant to this driver */ u16 flags; #define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */ #define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */ #define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */ /* lock and mutex for SPI bus locking */ spinlock_t bus_lock_spinlock; struct mutex bus_lock_mutex; /* flag indicating that the SPI bus is locked for exclusive use */ bool bus_lock_flag; /* Setup mode and clock, etc (spi driver may call many times). * * IMPORTANT: this may be called when transfers to another * device are active. DO NOT UPDATE SHARED REGISTERS in ways * which could break those transfers. */ int (*setup)(struct spi_device *spi); /* bidirectional bulk transfers * * + The transfer() method may not sleep; its main role is * just to add the message to the queue. * + For now there's no remove-from-queue operation, or * any other request management * + To a given spi_device, message queueing is pure fifo * * + The master's main job is to process its message queue, * selecting a chip then transferring data * + If there are multiple spi_device children, the i/o queue * arbitration algorithm is unspecified (round robin, fifo, * priority, reservations, preemption, etc) * * + Chipselect stays active during the entire message * (unless modified by spi_transfer.cs_change != 0). * + The message transfers use clock and SPI mode parameters * previously established by setup() for this device */ int (*transfer)(struct spi_device *spi, struct spi_message *mesg); /* called on release() to free memory provided by spi_master */ void (*cleanup)(struct spi_device *spi); /* * These hooks are for drivers that want to use the generic * master transfer queueing mechanism. If these are used, the * transfer() function above must NOT be specified by the driver. * Over time we expect SPI drivers to be phased over to this API. */ bool queued; struct kthread_worker kworker; struct task_struct *kworker_task; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; bool busy; bool running; bool rt; int (*prepare_transfer_hardware)(struct spi_master *master); int (*transfer_one_message)(struct spi_master *master, struct spi_message *mesg); int (*unprepare_transfer_hardware)(struct spi_master *master); /* gpio chip select */ int *cs_gpios; };
struct spi_driver { const struct spi_device_id *id_table; int (*probe)(struct spi_device *spi); int (*remove)(struct spi_device *spi); void (*shutdown)(struct spi_device *spi); int (*suspend)(struct spi_device *spi, pm_message_t mesg); int (*resume)(struct spi_device *spi); struct device_driver driver; };
struct spi_transfer { /* it's ok if tx_buf == rx_buf (right?) * for MicroWire, one buffer must be null * buffers must work with dma_*map_single() calls, unless * spi_message.is_dma_mapped reports a pre-existing mapping */ const void *tx_buf; void *rx_buf; unsigned len; dma_addr_t tx_dma; dma_addr_t rx_dma; unsigned cs_change:1; u8 bits_per_word; u16 delay_usecs; u32 speed_hz; struct list_head transfer_list; };
struct spi_message { struct list_head transfers; struct spi_device *spi; unsigned is_dma_mapped:1; /* REVISIT: we might want a flag affecting the behavior of the * last transfer ... allowing things like "read 16 bit length L" * immediately followed by "read L bytes". Basically imposing * a specific message scheduling algorithm. * * Some controller drivers (message-at-a-time queue processing) * could provide that as their default scheduling algorithm. But * others (with multi-message pipelines) could need a flag to * tell them about such special cases. */ /* completion is reported through a callback */ void (*complete)(void *context); void *context; unsigned actual_length; int status; /* for optional use by whatever driver currently owns the * spi_message ... between calls to spi_async and then later * complete(), that's the spi_master controller driver. */ struct list_head queue; void *state; };
spi_transfer定义了一对读写buffer,还有一个transfer_list;利用这个list把自己挂在spi_message的transfers上,也就是这两个结构合起来相当于i2c_msg。spi的传输单位就是一个spi_message。
struct spi_board_info { /* the device name and module name are coupled, like platform_bus; * "modalias" is normally the driver name. * * platform_data goes to spi_device.dev.platform_data, * controller_data goes to spi_device.controller_data, * irq is copied too */ char modalias[SPI_NAME_SIZE]; const void *platform_data; void *controller_data; int irq; /* slower signaling on noisy or low voltage boards */ u32 max_speed_hz; /* bus_num is board specific and matches the bus_num of some * spi_master that will probably be registered later. * * chip_select reflects how this chip is wired to that master; * it's less than num_chipselect. */ u16 bus_num; u16 chip_select; /* mode becomes spi_device.mode, and is essential for chips * where the default of SPI_CS_HIGH = 0 is wrong. */ u8 mode; /* ... may need additional spi_device chip config data here. * avoid stuff protocol drivers can set; but include stuff * needed to behave without being bound to a driver: * - quirks like clock rate mattering when not selected */ };
static int spi_master_initialize_queue(struct spi_master *master) { int ret; master->queued = true; master->transfer = spi_queued_transfer; /* Initialize and start queue */ ret = spi_init_queue(master); if (ret) { dev_err(&master->dev, "problem initializing queue\n"); goto err_init_queue; } ret = spi_start_queue(master); if (ret) { dev_err(&master->dev, "problem starting queue\n"); goto err_start_queue; } return 0; err_start_queue: err_init_queue: spi_destroy_queue(master); return ret; }果然提供了一个传输函数 spi_queued_transfer,是基于排队提交的。
static int spi_init_queue(struct spi_master *master) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; INIT_LIST_HEAD(&master->queue); spin_lock_init(&master->queue_lock); master->running = false; master->busy = false; init_kthread_worker(&master->kworker); master->kworker_task = kthread_run(kthread_worker_fn, &master->kworker, dev_name(&master->dev)); if (IS_ERR(master->kworker_task)) { dev_err(&master->dev, "failed to create message pump task\n"); return -ENOMEM; } init_kthread_work(&master->pump_messages, spi_pump_messages); /* * Master config will indicate if this controller should run the * message pump with high (realtime) priority to reduce the transfer * latency on the bus by minimising the delay between a transfer * request and the scheduling of the message pump thread. Without this * setting the message pump thread will remain at default priority. */ if (master->rt) { dev_info(&master->dev, "will run message pump with realtime priority\n"); sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); } return 0; }
init_kthread_worker(&master->kworker);初始化一个线程工作者,其结构中会包含当前线程工作项;主要初始化worker->lock、worker->work_list和worker->task。
master->kworker_task创建了一个线程;线程函数是kthread_worker_fn,该函数的参数是&master->kworker;线程name是dev_name(&master->dev)。
init_kthread_work(&master->pump_messages, spi_pump_messages);这个就是线程工作项了,其结构会依附一个线程工作者;这里初始化了&(work)->node,这个一个list,可能是要把自己挂在线程工作者的work_list上。
(work)->func = (fn);spi_pump_messages就是线程工作项的工作函数了。
master->rt是realtime标志,若设置表示高优先级的信息处理,有必要减少传输等待时间,把传输请求和信息pump线程之间的延时缩短最小;所以需要调用sched_setscheduler()改变thread的调度策略为实现级别。未设置保持默认优先级。
static int spi_start_queue(struct spi_master *master) { unsigned long flags; spin_lock_irqsave(&master->queue_lock, flags); if (master->running || master->busy) { spin_unlock_irqrestore(&master->queue_lock, flags); return -EBUSY; } master->running = true; master->cur_msg = NULL; spin_unlock_irqrestore(&master->queue_lock, flags); queue_kthread_work(&master->kworker, &master->pump_messages); return 0; }queue_kthread_work(&master->kworker, &master->pump_messages);
static void insert_kthread_work(struct kthread_worker *worker, struct kthread_work *work, struct list_head *pos) { lockdep_assert_held(&worker->lock); list_add_tail(&work->node, pos); work->worker = worker; if (likely(worker->task)) wake_up_process(worker->task); }果然work把自己挂在了work_list上,work也就找到了依附的worker;如果worker->task当前有任务,就wake_up_process(worker->task)。
int kthread_worker_fn(void *worker_ptr) { struct kthread_worker *worker = worker_ptr; struct kthread_work *work; WARN_ON(worker->task); worker->task = current; repeat: set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); spin_lock_irq(&worker->lock); worker->task = NULL; spin_unlock_irq(&worker->lock); return 0; } work = NULL; spin_lock_irq(&worker->lock); if (!list_empty(&worker->work_list)) { work = list_first_entry(&worker->work_list, struct kthread_work, node); list_del_init(&work->node); } worker->current_work = work; spin_unlock_irq(&worker->lock); if (work) { __set_current_state(TASK_RUNNING); work->func(work); } else if (!freezing(current)) schedule(); try_to_freeze(); goto repeat; }这里会遍历&worker->work_list,找到上面依附的work并删除(不删除就会重复执行了)后执行work->func(work);如果已经没有线程工作项了,会schedule();休眠。根据前面的一系列初始化,这个work就是spi_start_queue()->queue_kthread_work(&master->kworker, &master->pump_messages)->insert_kthread_work()->list_add_tail(&work->node, pos);挂上来的&master->pump_messages;它的线程工作者函数是spi_init_queue(&master->pump_messages, spi_pump_messages)->init_kthread_work()->((work)->func = (fn))填充的spi_pump_messages。到目前为止spi_pump_messages已经运行起来了。
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) { struct spi_master *master = spi->master; unsigned long flags; spin_lock_irqsave(&master->queue_lock, flags); if (!master->running) { spin_unlock_irqrestore(&master->queue_lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &master->queue); if (master->running && !master->busy) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); return 0; }插入一下master->transfer = spi_queued_transfer;
static void spi_pump_messages(struct kthread_work *work) { struct spi_master *master = container_of(work, struct spi_master, pump_messages); unsigned long flags; bool was_busy = false; int ret; /* Lock queue and check for queue work */ spin_lock_irqsave(&master->queue_lock, flags); if (list_empty(&master->queue) || !master->running) { if (master->busy && master->unprepare_transfer_hardware) { ret = master->unprepare_transfer_hardware(master); if (ret) { spin_unlock_irqrestore(&master->queue_lock, flags); dev_err(&master->dev, "failed to unprepare transfer hardware\n"); return; } } master->busy = false; spin_unlock_irqrestore(&master->queue_lock, flags); return; } /* Make sure we are not already running a message */ if (master->cur_msg) { spin_unlock_irqrestore(&master->queue_lock, flags); return; } /* Extract head of queue */ master->cur_msg = list_entry(master->queue.next, struct spi_message, queue); list_del_init(&master->cur_msg->queue); if (master->busy) was_busy = true; else master->busy = true; spin_unlock_irqrestore(&master->queue_lock, flags); if (!was_busy && master->prepare_transfer_hardware) { ret = master->prepare_transfer_hardware(master); if (ret) { dev_err(&master->dev, "failed to prepare transfer hardware\n"); return; } } ret = master->transfer_one_message(master, master->cur_msg); if (ret) { dev_err(&master->dev, "failed to transfer one message from queue\n"); return; } }