linux內核qspi驅動層次分析


平台:Xilinx Zynq UltraScale+MPSoC ZCU102 平台         內核版本: linux-4.4

       linux qspi驅動是為了解決spi驅動異步操作的沖突問題,引入了"隊列化"的概念。其基本的原理是把具體需要傳輸的message放入到隊列中,啟動一個內核線

程檢測隊列中是否有在等待的message,如果有則啟動具體的傳輸。

1 相關結構體:

       一個SPI控制器對應一個spi_master結構體,通過它和掛在對應控制器下面的flash進行通信。每一次傳輸由spi_message來表示,spi_message掛入到spi_master

queue隊列中,spi_message又由多個傳輸片段spi_transfer構成。

struct spi_master{
    struct device    dev;  // 控制器本身對應的設備

    struct list_head list; // 通過這個插槽把spi_master鏈入全局的spi_master_list,一個芯片內部可以有多個SPI控制器

    s16            bus_num; // 用於識別一個spi控制器,一個SOC或板子可以有多個spi控制器
                            // 該控制器對應的SPI總線編號(由0開始)

    /* chipselects will be integral to many controllers; some others
     * might use board-specific GPIOs.
     */
    u16            num_chipselect; // 該spi控制器支持多少個從芯片

    u16            dma_alignment;

    /* spi_device.mode flags understood by this controller driver */
    u16            mode_bits; // 工作模式

    /* bitmask of supported bits_per_word for transfers */
    u32            bits_per_word_mask;

    /* limits on transfer speed */
    u32            min_speed_hz;
    u32            max_speed_hz;

    /* other constraints relevant to this driver */
    u16            flags;

    /* lock and mutex for SPI bus locking */
    spinlock_t        bus_lock_spinlock;
    struct mutex        bus_lock_mutex;

    /* flag indicating that the SPI bus is locked for exclusive use */
    bool            bus_lock_flag;

    /* Setup mode and clock, etc (spi driver may call many times).
     *
     * IMPORTANT:  this may be called when transfers to another
     * device are active.  DO NOT UPDATE SHARED REGISTERS in ways
     * which could break those transfers.
     */
    int            (*setup)(struct spi_device *spi); // 設置spi控制器的參數

    /* 把message加入隊列中,master的主要工作就是處理消息隊列。選中一個芯片
     * 把數據傳出去
     */
    int            (*transfer)(struct spi_device *spi,
                        struct spi_message *mesg);

    /* 釋放master的回調函數 */
    void            (*cleanup)(struct spi_device *spi);

                       
    bool                queued;
    struct kthread_worker        kworker; // 
    struct task_struct        *kworker_task; // 具體的內核線程,用於處理kworker下面的每個work
    struct kthread_work        pump_messages; 
    spinlock_t            queue_lock;
    struct list_head        queue;  // 等待傳輸的消息隊列
    struct spi_message        *cur_msg; // 當前正在處理的消息

    bool                cur_msg_mapped;
    struct completion               xfer_completion;
    size_t                max_dma_len;
    /* 用於准備硬件資源 */
    int (*prepare_transfer_hardware)(struct spi_master *master);
    /* 每個消息的原子傳送回調函數 */
    int (*transfer_one_message)(struct spi_master *master,
                    struct spi_message *mesg);
    int (*prepare_message)(struct spi_master *master,
                   struct spi_message *message);
    
    /*
     * These hooks are for drivers that use a generic implementation
     * of transfer_one_message() provied by the core.
     */
    void (*set_cs)(struct spi_device *spi, bool enable);
    int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
                struct spi_transfer *transfer);
    void (*handle_err)(struct spi_master *master,
               struct spi_message *message);

    /* gpio chip select */
    int            *cs_gpios;
}

struct spi_message { /* 一個多段的傳輸結構 */
    struct list_head    transfers; // 具體的傳輸片段

    struct spi_device    *spi; /* 這個傳輸放入具體設備的隊列 */

    unsigned        is_dma_mapped:1;

    /* completion is reported through a callback */
    void            (*complete)(void *context); // 當所有的transfers傳輸完了以后會被調用到
    void            *context;
    unsigned        frame_length; // 所有片段的傳輸總數據
    unsigned        actual_length; // 已經傳輸的數據
    int            status;

    struct list_head    queue; /* 通過該字段把本結構體掛入到對應的master的queue中 */
    void            *state;
};

struct spi_transfer { /* 最小的傳輸單元 */

    const void    *tx_buf;
    void        *rx_buf;
    unsigned    len;    // rx tx buf字節總數

    dma_addr_t    tx_dma;
    dma_addr_t    rx_dma;
    struct sg_table tx_sg;
    struct sg_table rx_sg;

    unsigned    cs_change:1;
    unsigned    tx_nbits:3;
    unsigned    rx_nbits:3;

    u8        bits_per_word; // 0 默認  非0 
    u16        delay_usecs;
    u32        speed_hz;

    struct list_head transfer_list; // 通過這個掛入到 spi_message中
};

       畫出spi_master結構體和spi_message以及spi_transfer結構體的關系如圖1所示:

圖1 spi_master和spi_message以及spi_transfer關系

2 驅動層次

static struct platform_driver zynqmp_qspi_driver = { /* 平台驅動 */
	.probe = zynqmp_qspi_probe,
	.remove = zynqmp_qspi_remove,
	.driver = {   /* struct device_driver driver; */
		.name = "zynqmp-qspi",
		.of_match_table = zynqmp_qspi_of_match,
		.pm = &zynqmp_qspi_dev_pm_ops,
	},
};
struct bus_type platform_bus_type = { /* 平台總線類型 */
	.name		= "platform",
	.dev_groups	= platform_dev_groups,
	.match		= platform_match,
	.uevent		= platform_uevent,
	.pm		= &platform_dev_pm_ops,
};

/* 注冊平台驅動 */
__platform_driver_register(struct platform_driver *drv, struct module *owner) // drivers/base/platform.c
	drv->driver.bus = &platform_bus_type; // 注意是平台總線
	drv->driver.probe = platform_drv_probe; // 平台驅動探測函數
	driver_register(&drv->driver) // 注冊驅動
		driver_find(drv->name, drv->bus); // .name = "zynqmp-qspi", &platform_bus_type; 看是否已經注冊過了同名的驅動
		ret = bus_add_driver(drv);
			driver_attach(struct device_driver *drv)
				bus_for_each_dev(drv->bus, NULL, drv, __driver_attach); /* 通過bus設備下面的設備列表進行匹配 */

__driver_attach(struct device *dev, void *data) // data = device_driver
	struct device_driver *drv = data;
	driver_match_device(drv, dev)  // 匹配上了才會往下走!!!!!!!!!!!!!否則直接去匹配下一個可能的設備
		return drv->bus->match ? drv->bus->match(dev, drv) : 1 /* 調用bus下面的match函數, 既platform_match函數, 
	                                                              主要通過platform_driver下面的id_table以及名字匹配*/ 
	/* 至此已經找到了設備 */																
  if (!dev->driver) // 還未綁定驅動,調用probe函數把驅動和設備綁定到一起
		driver_probe_device(drv, dev);
			really_probe(dev, drv);
				if (dev->bus->probe) {
						ret = dev->bus->probe(dev); // 未設置, 為空
				} else if (drv->probe) {
					ret = drv->probe(dev); // 走這個分支,為之前設置的platform_drv_probe 
				}
	
platform_drv_probe(struct device *_dev)
	struct platform_driver *drv = to_platform_driver(_dev->driver); // 獲取宿主結構體 platform_driver zynqmp_qspi_driver
	ret = drv->probe(dev); // 調用zynqmp_qspi_probe函數
	
zynqmp_qspi_probe  // 實際上是匹配控制器對應的設備
	struct spi_master *master;
	struct device *dev = &pdev->dev;
	master = spi_alloc_master(&pdev->dev, sizeof(*xqspi)); // 分配master空間,設置num_chipselect bus_num
	master->dev.of_node = pdev->dev.of_node; // 應該是dtb里面的spi控制器對應的節點???
	設置時鍾,使能時鍾 以及控制器zynqmp_qspi_init_hw(xqspi); 獲取終端資源
	設置master的變量setup  set_cs transfer_one prepare_transfer_hardware unprepare_transfer_hardware max_speed_hz bits_per_word_mask mode_bits
	spi_register_master(master); // 注冊master
		設置num_chipselect bus_num
		INIT_LIST_HEAD(&master->queue); /* 初始化master下面的message隊列 */
		spin_lock_init(&master->queue_lock);
		dev_set_name(&master->dev, "spi%u", master->bus_num);
		status = device_add(&master->dev); // 把設備加入到系統中,平台總線上??
		spi_master_initialize_queue(master); // 初始化隊列
			master->transfer = spi_queued_transfer;
			master->transfer_one_message = spi_transfer_one_message;
			ret = spi_init_queue(master); /* 初始化和啟動工作隊列 */
				/* 啟動一個內核線程,該線程工作對象為工作隊列master->kworker,工作函數為kthread_worker_fn,后面會介紹 */
				master->kworker_task = kthread_run(kthread_worker_fn, &master->kworker, "%s", dev_name(&master->dev));
				/* 初始化工作實例master->pump_messages, 其回調的函數為spi_pump_messages */
				init_kthread_work(&master->pump_messages, spi_pump_messages);
			master->queued = true;
			ret = spi_start_queue(master);
				/* 把工作實例master->pump_messages掛入到工作隊列master->kworker中 */
				queue_kthread_work(&master->kworker, &master->pump_messages);
		list_add_tail(&master->list, &spi_master_list); // 把master掛入總的鏈表spi_master_list
		of_register_spi_devices(master); // 注冊spi控制器設備下面的子設備,這個時候才開始設置spi設備下面的flash芯片
			for_each_available_child_of_node(master->dev.of_node, nc) {
				spi = of_register_spi_device(master, nc);
					struct spi_device *spi = spi_alloc_device(master);
						spi->master = master;   // 將flash設備和master控制器連接到一起
						spi->dev.parent = &master->dev; // 父設備為spi控制器
						spi->dev.bus = &spi_bus_type;  // 明確掛入到了spi_bus下面
					of_modalias_node(nc, spi->modalias, sizeof(spi->modalias)); // 通過dtb里面的compatible獲取驅動
					rc = of_property_read_u32(nc, "reg", &value); // 通過dtb里面的reg條目獲取設備的地址,既對應的片選片選
					spi->chip_select = value;  // 選中或者不選中對應的芯片
					//獲取 spi-rx-bus-width spi-tx-bus-width spi-max-frequency 等芯片級的信息並且設置
					of_property_read_u32(nc, "spi-max-frequency", &value);
					spi->max_speed_hz = value;
					rc = spi_add_device(spi); // 注冊spi設備
						bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); // 通過spi->chip_select spi->master來匹配
						spi->cs_gpio = master->cs_gpios[spi->chip_select]; // 設置本芯片為master的哪個片選
						spi_setup(spi); // 設置spi設備
							status = spi->master->setup(spi);  // 調用spi_master 的 setup函數
							spi_set_cs(spi, false); // 禁止片選
						device_add(&spi->dev); // 加入設備,匹配具體的驅動
							error = bus_add_device(dev);
							bus_probe_device(dev);
								device_initial_probe(dev);
									__device_attach(dev, true);
										bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
											driver_probe_device(drv, dev);
												really_probe(dev, drv);
													if (dev->bus->probe) {
														ret = dev->bus->probe(dev);
													} else if (drv->probe) {
														ret = drv->probe(dev); // m25p_probe
													}
m25p_probe								
	ret = spi_nor_scan(nor, flash_name, mode); // 建立  spi-nor   mtd   芯片的工作模式 dummy  扇區等
	return mtd_device_parse_register(&nor->mtd, NULL, &ppdata,
			data ? data->parts : NULL,
			data ? data->nr_parts : 0); // 注冊mtd分區

繼續分析剩下的函數kthread_worker_fn和spi_pump_messages

int kthread_worker_fn(void *worker_ptr)
    struct kthread_worker *worker = worker_ptr; // 為設置的master->kworker
    struct kthread_work *work;
repeat:
    if (!list_empty(&worker->work_list)) {
        /* 工作隊列下面的work_list不為空,則取出工作實例 */
        work = list_first_entry(&worker->work_list,
                    struct kthread_work, node);
        list_del_init(&work->node); // 從工作隊列中摘下具體的實例
    }
    worker->current_work = work;
    work->func(work); // 調用工作實例下面的func執行,func為spi_pump_messages
    goto repeat;
    
spi_pump_messages
    /* 取出queue下面的message,賦給master->cur_msg */
    master->cur_msg = list_first_entry(&master->queue, struct spi_message, queue);
    list_del_init(&master->cur_msg->queue);
    ret = master->prepare_transfer_hardware(master); // 准備硬件資源
    ret = master->transfer_one_message(master, master->cur_msg); // 傳輸

 補齊最后的數據構造以及上傳到queue以及處理過程:

struct kthread_worker  kworker; // 每個spi控制器上都有的一個工作隊列
struct kthread_worker {
    spinlock_t        lock;                // 自旋鎖
    struct list_head    work_list;       // 工作隊列上的工作實例隊列
    struct task_struct    *task;           // 具體執行數據傳輸的進程        
    struct kthread_work    *current_work;   // 工作隊列當前正在處理的工作實例
};

struct task_struct        *kworker_task;   // 處理工作隊列的線程

struct kthread_work pump_messages;
struct kthread_work {
    struct list_head    node;     // 通過node把work掛入到工作隊列中
    // 當調度到本work時執行的回調函數,具體為spi_pump_messages
    kthread_work_func_t    func;     // void (*kthread_work_func_t)(struct kthread_work *work);
    struct kthread_worker    *worker;  // 具體屬於的工作隊列
};

// spi讀len個數據到buf中,spi為具體要進行數據傳輸的設備
static inline int
spi_read(struct spi_device *spi, void *buf, size_t len) 
    // 構造spi_transfer結構體
    struct spi_transfer    t = {
            .rx_buf        = buf,
            .len        = len,
        };
    struct spi_message    m;
    
    spi_message_add_tail(&t, &m);// 把spi_transfer掛入到spi_message中
    return spi_sync(spi, &m);
        __spi_sync(spi, message, 0);
            struct spi_master *master = spi->master; // 獲取到spi控制器,在初始化的時候就確定了
            message->spi = spi; // 把message和具體的spi設備掛鈎
            status = __spi_queued_transfer(spi, message, false);
                list_add_tail(&msg->queue, &master->queue); // 把message掛入到master的queue中
            __spi_pump_messages(master, false); // 對message進行"抽取"
                // 把master->pump_messages工作實例掛入到master->kworker中,喚醒內核線程處理queue里面的message
                queue_kthread_work(&master->kworker, &master->pump_messages);
                return;
            wait_for_completion(&done); // 等待傳輸完成
            status = message->status;
            return status;

// 異步傳輸spi數據,主要是利用master->transfer函數進行處理
int spi_async(struct spi_device *spi, struct spi_message *message)
    ret = __spi_async(spi, message);
        return master->transfer(spi, message);
    return ret;

數據的寫流程和讀基本一致!!

 qspi驅動的基本流程如圖2所示,紅色的步驟表示數據的構造以及處理過程。

圖2 qspi驅動的基本流程

 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM