2. linux ehci device driver(ehci hcd)
2.1. linux usb subsystem arch overview(host)
2.2. ehci_hcd
2.3. ehci 实现的接口
2.3.1. ehci_pci_setup() (hc_driver->reset)
2.3.2. ehci_run() (hc_driver->start)
2.3.3. ehci_stop() (hc_driver->stop)
2.3.4. ehci_get_frame()(hc_driver-> get_frame_number)
2.3.5. ehci_urb_enqueue()(hc_driver->urb_enqueue)*
2.3.6. ehci_urb_dequeue()(hc_driver->urb_dequeue)
2.3.7. ehci_endpoint_disable()(hc_driver-> endpoint_disable)
2.3.8. ehci_irq()(hc_driver->irq)*
2.3.9. ehci_hub_control()(hc_driver->hub_control)
2.3.10. ehci_hub_status_data(hc_driver->hub_status_data)
1. linux ehci device driver(ehci hcd)
1.1.linux usb subsystem arch overview(host)
Fig 2-1 linux usb subsystem block diagram
Fig 2-1中, 绿色线代表调用关系,绿线起点(非箭头)连接服务提供者,绿线终点(箭头)指向调用方; 红色线代表实现关系, 红线终点(箭头)指向接口定义者,红线起点(非箭头)连接接口实现方.
Usb Interface driver 指操作usb function的驱动,负责实现function层的协议(mass-storage, usb video class,HID…),比如usb-storage/uvc/hid等模块,一般每个(类)Interface使用一种驱动. 用户可自己开发一些标准class的驱动或者一些非标准的function驱动,系统中支持function的数目由usb 2.0 spec确定,一般来说不同(类)的function均有一个对应的Usb Interface driver, usb subsystem可以同时支持多个usb Interface driver(usb function)的运行. 一般来说,该层比较薄, class的协议都不是很复杂.
Linux usbcore模块是对整个linux usb subsytem的抽象, 在整个subsystem中起着承上启下的作用, 实现usb 2.0部分协议. 提供了大量的API供外部模块调用, 这些API 对应linux usbcore module中通过EXPORT_SYMBOL()导出的函数和变量; 同时,linux usbcore 也定义了接口类型, 这些接口需要usb interface driver以及usb host controller driver来实现. usbcore模块一般不需要用户改动. 该层比较厚,内容较为丰富.
Linux usb host controller driver指操作usb host controller(hardware)的驱动,主要负责将上层发来的URB传输请求转化成HC可识别的格式并启动HC传输,直至完成传输. 另外HC一般都集成了Root Hub的功能,hcd也要实现root hub port访问的功能 ,整个subsystem只有该驱动直接操作硬件寄存器. 该层也支持多种不同的host controller driver, 比如EHCI ,UHCI,OHCI以及一些非标准的HC实现(多用于嵌入式环境). 从软件角度该层在整个subsystem中较薄,但由于软硬件接口的复杂导致hcd driver都比较复杂. Linux Ehci device driver就属于该层.
整个subsystem中, 用户只需要开发或者定制usb interface driver和usb host controller driver,
Usbcore一般不需要修改.
Usb Interface driver需要实现usbcore中struct usb_driver定义的接口, usb host controller driver需要实现usbcore中struct hc_driver定义的接口,即Fig2-1中指向linux usbcore的两个红线.
1.2.ehci_hcd
struct ehci_hcd 定义了ehci host controller driver的数据结构部分,struct hc_driver则定义了基于struct usb_hcd的接口,该接口中的所有函数类型第一个参数都是struct usb_hcd*; 从面向对象角度看,两者联合起来,给出了usb_hcd “class”以及ehci_hcd “class”的定义, struct ehci_hcd可以看作是对struct usb_hcd的继承, usb_create_hcd()会创建一个包含struct hcd以及一个struct ehci_hcd 的“对象”, 可以通过hcd_to_ehci()从struct usb_hcd *获得对应的struct ehci_hcd *, 还可以通过ehci_to_hcd()从struct ehci_hcd *获得struct usb_hcd*.
struct ehci_hcd { /* one per controller */
/* glue to PCI and HCD framework */
struct ehci_caps __iomem *caps;
struct ehci_regs __iomem *regs;
struct ehci_dbg_port __iomem *debug;
__u32 hcs_params; /* cached register copy */
spinlock_t lock; /*用于对ehci_hcd数据结构以及对ehci 操作的保护*/
/* async schedule support */
struct ehci_qh *async;
struct ehci_qh *reclaim;
unsigned reclaim_ready : 1;
unsigned scanning : 1;
/* periodic schedule support */
unsigned periodic_size; /*一般为1024, 有些ehci hc可以支持256/512*/
__le32 *periodic; /* hw periodic table */
dma_addr_t periodic_dma;
unsigned i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
int next_uframe; /* scan periodic, start here */
unsigned periodic_sched; /* periodic activity count */
/* per root hub port */
unsigned long reset_done [EHCI_MAX_ROOT_PORTS];
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
struct dma_pool *qtd_pool; /* one or more per qh */
struct dma_pool *itd_pool; /* itd per iso urb */
struct dma_pool *sitd_pool; /* sitd per split iso urb */
struct timer_list watchdog;
struct notifier_block reboot_notifier;
unsigned long actions;
unsigned stamp;
unsigned long next_statechange;
u32 command;
u8 sbrn; /* packed release number */
};
Struct hc_driver可以看作linux usbcore模块定义的需要底层host controller driver实现的接口,通过ehci driver实现这些接口,usbcore可以将更上层软件的请求传递给host controller driver以及HC,HC以及host controller driver完成后, 也会通过这些接口通知usbcore module.
struct hc_driver {
const char *description; /* "ehci-hcd" etc */
const char *product_desc; /* product/vendor string */
size_t hcd_priv_size; /* size of private data */
/* irq handler */
irqreturn_t (*irq) (struct usb_hcd *hcd, struct pt_regs *regs);
int flags;
/* called to init HCD and root hub */
int (*reset) (struct usb_hcd *hcd);
int (*start) (struct usb_hcd *hcd);
/* NOTE: these suspend/resume calls relate to the HC as
* a whole, not just the root hub; they're for PCI bus glue.
*/
/* called after suspending the hub, before entering D3 etc */
int (*suspend) (struct usb_hcd *hcd, pm_message_t message);
/* called after entering D0 (etc), before resuming the hub */
int (*resume) (struct usb_hcd *hcd);
/* cleanly make HCD stop writing memory and doing I/O */
void (*stop) (struct usb_hcd *hcd);
/* return current frame number */
int (*get_frame_number) (struct usb_hcd *hcd);
/* manage i/o requests, device state */
int (*urb_enqueue) (struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags);
int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
/* hw synch, freeing endpoint resources that urb_dequeue can't */
void (*endpoint_disable)(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
/* root hub support */
int (*hub_status_data) (struct usb_hcd *hcd, char *buf);
int (*hub_control) (struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int (*bus_suspend)(struct usb_hcd *);
int (*bus_resume)(struct usb_hcd *);
int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
void (*hub_irq_enable)(struct usb_hcd *);
/* Needed only if port-change IRQs are level-triggered */
};
Ehci device driver的主要工作就是实现struct hc_driver中定义的主要接口, 一般来说以下接口是必须要实现的:
irqreturn_t (*irq) (struct usb_hcd *hcd, struct pt_regs *regs) //ehci hcd的irq handler
int (*reset) (struct usb_hcd *hcd);
int (*start) (struct usb_hcd *hcd); //启动HC
void (*stop) (struct usb_hcd *hcd); //停止HC
int (*get_frame_number) (struct usb_hcd *hcd);//获得当前的frame号
/*根据hcd和ep的信息,安排urb的schedule到EHCI,该URB的传输完成后,会调用urb->complete ()通知usbcore*/
int (*urb_enqueue) (struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags);
/*该接口用于用户取消已经enqueue 的urb,主要为usbcore的 unlink_urb()所调用*/
int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
/*disable the ep , 并且释放该ep上资源(unlink 该ep上的qh)*/
void (*endpoint_disable)(struct usb_hcd *hcd,
struct usb_host_endpoint *ep);
/*获取root hub port的状态信息*/
int (*hub_status_data) (struct usb_hcd *hcd, char *buf);
/*操作root hub以及port*/
int (*hub_control) (struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
1.3.ehci 实现的接口
1.3.1. ehci_pci_setup() (hc_driver->reset)
原型
static int ehci_pci_setup(struct usb_hcd *hcd)
调用时机
usbcore 的API ---usb_add_hcd()会通过hcd->driver->reset(hcd)来调用.
一般,在 ehci pci device的probe()函数会调用 usb_add_hcd().
调用说明
无
主要流程
1),初始化ehci寄存器基地址(ehci->regs和ehci->caps);
2),将ehci的Capability Parameters读入到ehci->hcs_params缓冲起来;
3),调用ehci_halt() 强制ehci进入halt状态;
4),调用 ehci_init() 初始化ehci 的数据结构:
a),spin_lock_init(&ehci->lock);
b),初始化watchdog timer;(主要用于发现和处理irq lost的情况)
c),令ehci->periodic_size = DEFAULT_I_TDPS,调用ehci_mem_init()分配并初始化HC schedule所需的数据结构,主要有
.预先分配一定数量的qtd,qh,itd以及sitd到ehci->qtd_pool, ehci->qh_pool, ehci->itd_pool和ehci->sitd_pool中作为cache;
.从ehci->qh_pool分配一个qh, 并使得ehci->async指向该qh,这个qh用作asynchronous schedule的reclamation list head (H bit为1),实现Empty Asynchronous Schedule Detection;
.调用dma_alloc_coherent()分配Hardware periodic table,并令ehci->periodic指向其,ehci->periodic_size设为1024,ehci->periodic_dma返回表格对应的物理地址;初始化表格中每项初值为EHCI_LIST_END,即不包含periodic schedule data structure;
.分配software shadow of hardware table, 令ehci->ehci->pshadow指向其,并初始化表格内容为全0;
d),根据ehci->caps->hcc_params指向的参数初始化ehci->i_thresh,该参数代表了HC会预取多少个micro-frame的periodic schedule data structure;
e),初始化asynchronous schedule data structure:
ehci->reclaim = NULL;
ehci->reclaim_ready = 0;
ehci->next_uframe = -1;
初始化ehci->async;
d),依据irq_thresh, park mode, periodic size等信息构造ehci->command缺省值;
e),安装 reboot 回掉函数: ehci_reboot().
5),对一些个别厂商的hc ic, 做特定的处理;
6),调用ehci_pci_reinit():
a),视chip支持情况设置 ehci->debug;
b),调用ehci_port_power()打开ehci 每个port的电源(通过调用ehci_hub_control()完成);
1.3.2. ehci_run() (hc_driver->start)
原型
static int ehci_run (struct usb_hcd *hcd)
调用时机
usbcore 的API ---usb_add_hcd()在分配完root hub usbdevice后会通过hcd->driver->start(hcd)来调用.
调用说明
无
主要流程
0),该函数依照ehci spec 4.1实现;
1),调用ehci_reset() 复位HC;
2),写入periodic schedule list地址以及asynchronous schedule list地址到HC的相应寄存器:
writel(ehci->periodic_dma, &ehci->regs->frame_list);
writel((u32)ehci->async->qh_dma, &ehci->regs->async_next);
3),对64bit 模式(HC作为bus master生成64bit地址)的处理:
if (HCC_64BIT_ADDR(hcc_params)) {
writel(0, &ehci->regs->segment);
}
4),启动HC:
ehci->command
&= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
ehci->command |= CMD_RUN;
writel (ehci->command, &ehci->regs->command);
5), 使得EHCI作为root hub port的owner;
hcd->state = HC_STATE_RUNNING;
writel (FLAG_CF, &ehci->regs->configured_flag);
readl (&ehci->regs->command); /* unblock posted writes */
6),使能中断:
writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */
使能了STS_IAA , STS_FATAL , STS_PCD ,STS_ERR 以及STS_INT五个中断.
1.3.3. ehci_stop() (hc_driver->stop)
原型
static void ehci_stop (struct usb_hcd *hcd)
调用时机
1), usb_remove_hcd() 会通过hcd->driver->stop(hcd) 调用;
2), usb_add_hcd()中分配 root hub usbdevice时候出错也会通过hcd->driver->stop(hcd)调用;
调用说明
无
主要流程
1),调用 ehci_port_power (ehci, 0) 关闭root hub上每个port的电源;
2),删除watchdog timer;
3),强制HC从running state 进入 idle状态,并复位HC chip, disable 所有中断;
spin_lock_irq(&ehci->lock);
if (HC_IS_RUNNING (hcd->state))
ehci_quiesce (ehci); //from running to idle
ehci_reset (ehci);
writel (0, &ehci->regs->intr_enable);
spin_unlock_irq(&ehci->lock);
4), 将root hub的port 控制权交给companion HC;
writel (0, &ehci->regs->configured_flag);
unregister_reboot_notifier (&ehci->reboot_notifier);
5), 清除未完成的asynchronous schedule QH结构;
spin_lock_irq (&ehci->lock);
if (ehci->async)
ehci_work (ehci, NULL); //unlink未完成的asynchronous qhs;
spin_unlock_irq (&ehci->lock);
ehci_work()在ehci_work() 中会详细说明.
1.3.4. ehci_get_frame()(hc_driver-> get_frame_number)
原型
static int ehci_get_frame (struct usb_hcd *hcd)
调用时机
1), hcd_get_frame_number():hcd.c, 该函数为struct usb_operations定义的接口;
调用说明
无
主要流程
1),返回当前usb bus的frame number;
1.3.5. ehci_urb_enqueue()(hc_driver->urb_enqueue)*
原型
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
)
调用时机
1),被hcd_submit_urb():hcd.c以hcd->driver->urb_enqueue (hcd, ep, urb, mem_flags)方式所调用;
2), hcd_submit_urb():hcd.c为struct usb_operations所定义的接口;
调用说明
1),该函数是ehci hcd所要实现的重点函数,主要实现:将来自usbcore层的urb的传输请求转换成ehci 可识别的传输描述结构(iTD,siTD,qTD等),然后安排到echi的periodic schedule list或者asynchronous schedule list的合适位置,随后等HC完成urb对应的传输, ehci hcd通过urb->complete()通知usbcore 对应的传输结果;(该函数并不处理红色部分)
2),该函数不会阻塞, 处理后即返回;(这一点也是usb 的传输特性, 即由usb host所主导,无论是读或者写,都是host 发起,然后等待完成)
3),每个ep只会对应1个QH;
主要流程
1), 初始化一个 链表头结构: INIT_LIST_HEAD (&qtd_list);
2), 获得 urb 所要发送到endpoint的类型;
3), 如果2)获得类型为CONTROL或者BULK:
a), 调用qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)从urb生成一系列qTD结构,并将这些结构连接到qtd_list;
b), 调用submit_async (ehci, ep, urb, &qtd_list, mem_flags)将qtd_list链接的qTD结构分配到ep对应的QH, 将该QH安排到ehci asynchronous schedule list中,转6);
4), 如果2)获得类型为INTERRUPT:
a), 调用qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)从urb生成一系列qTD结构,并将这些结构连接到qtd_list;
b), 调用intr_submit (ehci, ep, urb, &qtd_list, mem_flags)将qtd_list链接的qTD结构分配到ep对应的QH, 将该QH安排到ehci asynchronous schedule list中,转6);
5), 如果2)获得类型为ISOCHRONOUS:
a), 如果是high speed device, 调用itd_submit (ehci, urb, mem_flags)将urb转换为iTDs,并安排到periodic schedule list中,转6);
b), 如果是full speed device, 调用sitd_submit (ehci, urb, mem_flags)将urb转换为siTDs,并安排到periodic schedule list中,转6);
6),返回.
1.3.5.1. qh_urb_transaction ()
原型
static struct list_head *
qh_urb_transaction (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *head,
gfp_t flags
)
1),参数说明
Ehci: ehci hcd 变量(input)
Urb : 用于生成qtd的urb(input)
Head: 生成的qtd会依次链接在head指向链表尾部(output)
Flags: 用于分配qtd结构,内存分配函数需要该参数(input)
返回值:
正常返回head指针;
异常返回NULL.
调用时机
1), 仅仅为ehci_urb_enqueue()所调用;
调用说明
无
主要流程
1),该函数根据urb中的pipe, transfer_dma, transfer_buffer_length等信息, 分配一系列的qTD结构,这些qTD结构在软件层次上依次链接到head指向的链表尾部,同时硬件层面依次通过qTD->hw_next链接到下一个qTD的qtd_dma field;
2),对分配的每一个qTD调用qtd_fill()填充qTD的hw_token, hw_buf[],hw_buf_hi []以及length等信息;
3), control/int/bulk transfer均使用该函数构造qTD linked list;
1.3.5.2. submit_async ()
原型
static int submit_async (
struct ehci_hcd *ehci,
struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
)
1),参数说明:
Ehci : ehci hcd 变量;(input)
Ep: host侧endpoint 描述信息, 由hcd_submit_urb ():hcd.c 传到ehci_urb_enqueue(),再到该函数(input)
Urb: 上层传来的urb传输请求(input)
Qtd_list: 根据urb生成的qtd 链表头指针(input)
Mem_flags: 用于动态分配内存时候使用;
调用时机
1), 该函数仅被ehci_urb_enqueue()所调用;
调用说明
无
主要流程
1), 该函数体内锁住了ehci->lock, 并关闭中断;
2), 判断ehci 硬件当前是否允许访问,如果不可以,那么返回- ESHUTDOWN;
3), 调用 qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv) 返回qh, 并将qtd_list添加到该qh;
4), 如果返回的qh->state == QH_STATE_IDLE,那么调用qh_link_async (ehci, qh_get (qh)) 将该qh链接到asynchronous schedule list中;
5), 结束.
其他说明
1),对qh->overlay的更新需要注意;
2),对qtd_list添加到qh(参考qh_append_tds())的理解是个难点;
1.3.5.3. intr_submit ()
原型
static int intr_submit (
struct ehci_hcd *ehci,
struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
)
1),参数说明:
Ehci : ehci hcd 变量;(input)
Ep: host侧endpoint 描述信息, 由hcd_submit_urb ():hcd.c 传到ehci_urb_enqueue(),再到该函数(input)
Urb: 上层传来的urb传输请求(input)
Qtd_list: 根据urb生成的qtd 链表头指针(input)
Mem_flags: 用于动态分配内存时候使用;
调用时机
1),仅被ehci_urb_enqueue()所调用;
调用说明
无
主要流程
1), 调用spin_lock_irqsave (&ehci->lock, flags)锁住ehci->lock并关闭中断;
2), 判断ehci 硬件当前是否允许访问,如果不可以,那么status = - ESHUTDOWN,转6);
3), 调用 qh_append_tds (ehci, urb, empty, epnum, &ep->hcpriv) 返回qh, empty为一个空的链表;
4), 如果返回的qh->state == QH_STATE_IDLE,那么调用staus=qh_schedule (ehci, qh) 将该qh链接到periodic schedule list中; 如果status包含错误信息,那么转6);
5), 调用 qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv)将qtd_list添加到该qh;
6), 调用spin_unlock_irqrestore (&ehci->lock, flags)解锁ehci->lock并恢复中断;
7), 错误处理部分:如果status!=0,调用qtd_list_free()释放掉qtd_list结构以及链表上的qtd结构;
其他说明
1),该函数和submit_async()比较类似,不同之处在于intr_submit ()先将qh schedule到HC,然后添加qtd_list到qh,而submit_async()则与之相反; intr_submit ()这样做的目的在于,事先调用qh_schedule(ehci,qh)可以发现HC能否完成该中断传输,如果不能的话可以及早错误处理,如果能够完成,直接将qtd_list添加到已经schedule 到periodic schedule list的qh也不会有什么问题;
1.3.5.4. itd_submit ()
原型
static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
gfp_t mem_flags)
1),参数说明:
ehci : ehci hcd 变量;(input)
urb: 指向提交到HC的同步传输请求(input)
mem_flags:分配内存的标志(input)
调用时机
1),仅被ehci_urb_enqueue()所调用;
调用说明
无
主要流程
1), status = -EINVAL;
2), Get iso_stream head
stream = iso_stream_find (ehci, urb);
3), if (stream==NULL || (urb->interval != stream->interval)),转8);
4),调用itd_urb_transaction (stream, ehci, urb, mem_flags)分配iTD结构和struct ehci_iso_sched结构; 如果返回出错,转8);
5), 调用spin_lock_irqsave (&ehci->lock, flags)锁住ehci->lock并关闭中断;
6), 判断ehci 硬件当前是否允许访问,如果不可以,那么status = - ESHUTDOWN,转8);
否则调用status = iso_stream_schedule (ehci, urb, stream)判断stream代表的同步传输HC是否可以满足;
7), 将stream代表的同步传输(iTDs)链接到periodic schedule list;
if (status == 0) itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
8), 判断status并且返回;
if (unlikely (status < 0))
iso_stream_put (ehci, stream);
return status;
其他说明
1), struct ehci_iso_stream类似与struct ehci_qh,只不过ehci_qh是echi spec定义的HC可识别的的结构, ehci_iso_stream有与echi_qh相似的软件功能,但不是ehci spec所定义也不是HC认识的数据结构,每个isochronous endpoint对应一个ehci_iso_stream, 也是保存在struct usb_host_endpoint结构的hcpriv field, 保存了许多调度需要的信息;
1.3.5.5. sitd_submit ()
原型
static int
sitd_link_urb (
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
struct ehci_iso_stream *stream
)
调用时机
1), ehci_urb_enqueue();
调用说明
无
主要流程
略
其他说明
1),该函数和itd_submit()非常类似,就不再说明;
1.3.5.6. qh_append_tds()
原型
static struct ehci_qh *qh_append_tds (
struct ehci_hcd *ehci,
struct urb *urb,
struct list_head *qtd_list,
int epnum,
void **ptr
)
1),参数说明
ehci : ehci hcd 变量;(input)
urb: 根据urb生成的qtd 链表头指针(input)
qtd_list: 根据urb生成的qtd 链表头指针(input)
epnum: 主要用来标记是否为control endpoint(0)(input)
ptr: 传入一个指向qh指针的指针, 如果指向的qh指针为空,该函数创建一个qh, 然后通过该参数将其指向qh的指针返回( output)
调用时机
1), 被submit_async()所调用;
2), 被intr_submit()所调用;
调用说明
无
主要流程
1), 判断ptr指向是否为空,如果是,则通过qh_make (ehci, urb, GFP_ATOMIC)分配一个qh,并令*ptr=qh;
2), 令qtd 指向链表qtd_list中第一个qtd结构;
3), 如果epnum等于0, 对qh->hw_info1做一些patch处理;
4), 如果qtd指向一个有效结构,那么:
a), 交换dummy 以及 qtd;
令dummy= qh->dummy,交换dummy指向内容和qtd指向内容;(使dummy-> qtd_dma保持原来的值)
list_del (&qtd->qtd_list);
list_add (&dummy->qtd_list, qtd_list);
b), 在qh->qtd_list链表的末尾添加qtd_list 链表;
c), 新的dummy qtd的初始化
ehci_qtd_init (qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT (dma);
/* let the hc process these next qtds */
wmb ();
dummy->hw_token = token;
5), 返回qh;
注:
1>, only hc or qh_refresh() ever modify the overlay.
2>, 步骤4) 交换qh->dummy内容以及qtd内容的原因:qh overlay area和HC中已经缓冲了qh->dummy->qtd_dma,使用交换的方法可以使HC避免race condition,此处理解是个难点,可以结合Advance Queue来理解;
1.3.5.7. qh_link_async
原型
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1), 被submit_async()所调用;
2), 被end_unlink_async()所调用;
调用说明
无
主要流程
1),将qh插入到ehci->async后,如果需要的话enable asynchronous schedule;
2),修改qh->qh_state = QH_STATE_LINKED;
1.3.5.8. qh_make()
原型
static struct ehci_qh *
qh_make (
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t flags
)
调用时机
1),该函数仅被qh_append_tds()所调用;
调用说明
无
主要流程
1), 调用 ehci_qh_alloc()分配一个ehci_qh结构,令qh指向其;
2), 计算中断schedule 参数,保存qh的相关field中;
.usecs, c_usecs, gap_uf, period, tt_usecs等;
3), 初始化 hw相关field;
.init as live, toggle clear, advance to dummy
1.3.5.9. qh_schedule()
原型
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1), intr_submit();
2), qh_completions();
3), ehci_urb_dequeue();
调用说明
无
主要流程
1), 初始化 qh: 更新qh的overlay区域,以及qh->hw_next = EHCI_LIST_END;
2), 为该qh选择一个start_frame和uFrame,需要满足如下条件:
a), start_frame
period ,if (qh->period>0)
start_frame=0, if (qh->period==0)
b),
.if (qh->period == 0)
{
for(N=start_frame;N< ehci->periodic_size;N++)
第N frame中的每个uFrame可以预留qh->usecs,并使得包括qh->usecs的同步带宽占用小于当前uFrame总带宽的80%;
}
else
{
for(N=start_frame;N< ehci->periodic_size;N+=qh- 第N frame中的uFrame可以预留qh->usecs,并使得包括qh->usecs的同步带宽占用小于当前uFrame总带宽的80%;
如果是FS/LS transfer,那么还要满足CS需要的时间: 第N frame中的uFrame+qh->gap_uf, uFrame+qh->gap_uf+1两个微帧内,可以预留qh->c_usecs, 并使得包括qh->c_usecs的同步带宽占用小于当前uFrame总带宽的80%;
}
c), if(FS/LS transfer ) uFrame < 6
以上条件主要通过check_intr_schedule()完成选择.
3), 使用2)选择的start_frame 和 uFrame 设置qh的一些field:
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period? cpu_to_le32 (1 << uframe): __constant_cpu_to_le32 (QH_SMASK);
qh->hw_info2 |= c_mask; //用于cs, 在2)中返回;
4),调用 qh_link_periodic (ehci, qh) 链接到peridic schedule list;
其他说明
1), 该函数只用于将interrupt qh 调度到 periodic schedule list,而不能用于control/bulk qh; 该函数同时支持HS/FS/LS的 interrupt qh schedule;
2), 该函数比较关键,需要选择qh被链接到periodic schedule list的start frame和uframe,保证对应的uframe内同步传输不会超过125us的80%;
1.3.5.10. qh_link_periodic()
原型
static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1),仅被qh_schedule()所调用;
调用说明
无
主要流程
1), if (period == 0) period = 1;
2),将qh链接到periodic schedule list的合适slot:
for (i = qh->start; i < ehci->periodic_size; i += period) {
将qh 插入到ehci->periodic [i]指向的链表中的qh 链表部分,同时保持qh链表中qh按照period从大到小的顺序(保证poll rate 从低到高);
将qh 插入到ehci->pshadow [i]指向链表的相应位置;
}
3), 设置qh->qh_state;
qh->qh_state = QH_STATE_LINKED;
qh_get (qh);
4), 如果尚未使能同步调度,使能其:
if (!ehci->periodic_sched++)
return enable_periodic (ehci);
其他说明
1.3.5.11. check_intr_schedule()
原型
static int check_intr_schedule (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
const struct ehci_qh *qh,
__le32 *c_maskp
)
1),参数说明:
ehci : ehci hcd 变量(input)
frame:check 的frame number(input)
uframe:check 的uframe number(input)
qh : check的 interrupt qh(input)
c_maskp : 返回的complete transaction uframe mask(output)
调用时机
1),仅为qh_schedule()所调度;
调用说明
无
主要流程
1), retval = -ENOSPC;
2), if (qh->c_usecs && uframe >= 6) /* FSTN territory? */(不支持FSTN)
goto 7);
3), 判断在微帧序列 frame:uframe,frame+qh->period:uframe,…中,qh->usecs是否可以完成
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
goto 7);
4),如果qh不包含complete transaction,那么令*c_maskp=0,retval=0;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = 0;
goto 7);
}
5),对包含complete transaction的qh设置cs mask,安全起见连续设置了两个uframe来处理complete transaction:
mask = 0x03 << (uframe + qh->gap_uf);
*c_maskp = cpu_to_le32 (mask << 8);
mask |= 1 << uframe;
6),判断TT是否能满足该periodic schedule(frame是start frame, mask包含了相关的uframe),如果可以满足,继续判断HC是否可以满足complete transaction的需求:
if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
qh->period, qh->c_usecs))
goto done;
if (!check_period (ehci, frame, uframe + qh->gap_uf,
qh->period, qh->c_usecs))
goto 7);
retval = 0;
}
7),return retval;
其他说明
1), qh->gap_uf表示start transaction(输出还包含DATA)和complete transaction(输入还包含DATA)之间的微帧间隔数,该参数在qh_make()中计算;
1.3.5.12. check_period()
原型
static int check_period (
struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
unsigned period,
unsigned usecs
)
1),参数说明:
ehci : ehci hcd 变量(input)
frame:check 的frame number(input)
uframe:check 的uframe number(input)
period:
所要check的periodic schedule的以frame为单位的周期(input)
0表示周期为1 uframe,即每个frame的每个uframe都需要schedule
usecs:
check “usecs” 是否可以完成,以保证不会超过80%的微帧带宽
调用时机
1), 为check_intr_schedule()所调用;
调用说明
无
主要流程
1),该函数比较简单, 主要调用periodic_usecs (ehci, frame, uframe)获得frame:uframe内已经安排了多少时间(us为单位)的periodic schedule;
其他说明
1.3.5.13. iso_stream_find ()
原型
static struct ehci_iso_stream *
iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
参数说明:
1),ehci : ehci hcd 变量(input)
2),urb : isochronous periodic schedule request (input)
调用时机
1), itd_submit();
2), sitd_submit();
调用说明
无
主要流程
1), 获得 urb对应的struct usb_host_endpoint*:
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein(urb->pipe))
ep = urb->dev->ep_in[epnum];
else
ep = urb->dev->ep_out[epnum];
2), spin_lock_irqsave (&ehci->lock, flags);
3), 如果ep->hcpriv为空,那么分配并初始化一个struct ehci_iso_stream,令
stream以及ep->hcpriv指向其;
4), /* caller guarantees an eventual matching iso_stream_put */
stream = iso_stream_get (stream);
5),spin_unlock_irqrestore (&ehci->lock, flags);
6),return stream;
1.3.5.14. itd_urb_transaction()
原型
static int
itd_urb_transaction (
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
gfp_t mem_flags
)
调用时机
1), itd_submit();
调用说明
无
主要流程
1),分配并初始化struct ehci_iso_sched 结构:
sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
itd_sched_init (sched, stream, urb);
2),计算对应iTD的数目:
if (urb->interval < 8)
num_itds = 1 + (sched->span + 7) / 8;
else
num_itds = urb->number_of_packets;
3),分配iTD,同时将iTD链接到sched->td_list中,此过程需要锁住ehci->lock并禁止irq;
4), 将sched保存在urb->hcpriv中;
urb->hcpriv = sched;
urb->error_count = 0;
其他说明
1),该函数类似与qh_urb_transaction();
2),Isochronous urb 与 其他类型的urb不一样, 需要指定传输多少个packet,每个packet使用struct iso_frame_desc来描述; struct ehci_iso_sched仍然定义了对应的”packet”,不过这些packet是用来初始化iTD中每个uFrame的transaction描述信息的
3),该函数完成后,iTD中并没有包含有效的调度信息;
1.3.5.15. iso_stream_schedule()
原型
static int
iso_stream_schedule (
struct ehci_hcd *ehci,
struct urb *urb,
struct ehci_iso_stream *stream
)
调用时机
1), itd_submit();
调用说明
无
主要流程
1),计算mod以及sched;
mod = ehci->periodic_size << 3;
sched = urb->hcpriv;
2),判断该isochronous schedule是否可以在mod内完成:
if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
status = -EFBIG;
goto 6);
}
if ((stream->depth + sched->span) > mod) { //???
status = -EFBIG;
goto 6);
}
sched->span代表了该schedule总共跨越了多少个uFrame; stream->depth 代表了???
stream->depth是一个动态的参数(link:+interval per itd, complete:-interval per itd);
3),计算now以及max:
now = readl (&ehci->regs->frame_index) % mod;
max = now + mod;
4), 如果stream还包含未完成的itd,并且允许在max前调度,那么转向7);否则,goto 6)
5), 开始schedule,计算stream->next_uframe:
start = SCHEDULE_SLOP * 8 + (now & ~0x07);
start %= mod;
stream->next_uframe = start;
period = urb->interval;
if (!stream->highspeed)
period <<= 3;
/* find a uframe slot with enough bandwidth */
for (; start < (stream->next_uframe + period); start++) {
int enough_space;
/* check schedule: enough space? */
if (stream->highspeed)
enough_space = itd_slot_ok (ehci, mod, start,
stream->usecs, period);
else {
if ((start % 8) >= 6)
continue;
enough_space = sitd_slot_ok (ehci, mod, stream,
start, sched, period);
}
/* schedule it here if there's enough bandwidth */
if (enough_space) {
stream->next_uframe = start % mod;
goto ready;
}
}
status = -ENOSPC;
goto 6);
6), fail:
iso_sched_free (stream, sched);
urb->hcpriv = NULL;
return status;
7), ready:
/* report high speed start in uframes; full speed, in frames */
urb->start_frame = stream->next_uframe;
if (!stream->highspeed)
urb->start_frame >>= 3;
return 0;
其他说明
1),本质上搜索一个合适的start_uframe,使得满足:
for(idx=start_frame;idx idx 都可以有足够的时间完成 usecs,保证有20%的时间预留;
2),schedule 算法和interrupt transaction类似:
3),itd schedule以micro-frame为单位,而不像interrupt以frame:uframe为参考.
1.3.5.16. itd_link_urb()
原型
static int
itd_link_urb (
struct ehci_hcd *ehci,
struct urb *urb,
unsigned mod,
struct ehci_iso_stream *stream
)
参数说明:
调用时机
1), itd_submit();
调用说明
无
主要流程
1),一些准备工作:
next_uframe = stream->next_uframe % mod;
if (unlikely (list_empty(&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
stream->start = jiffies;
}
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2),按照micro-frame 填充itd中的微帧描述符, 每填充完成一个itd的所有微帧,将其链接到periodic schedule list的相应slot:
for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
if (itd == NULL) {
itd = list_entry (iso_sched->td_list.next,
struct ehci_itd, itd_list);
list_move_tail (&itd->itd_list, &stream->td_list);
itd->stream = iso_stream_get (stream);
itd->urb = usb_get_urb (urb);
itd_init (stream, itd);
}
uframe = next_uframe & 0x07;
frame = next_uframe >> 3;
itd->usecs [uframe] = stream->usecs;
itd_patch (itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
stream->depth += stream->interval;
next_uframe %= mod;
packet++;
/* link completed itds into the schedule */
if (((next_uframe >> 3) != frame) || packet == urb->number_of_packets) {
itd_link (ehci, frame % ehci->periodic_size, itd);
itd = NULL;
}
}
stream->next_uframe = next_uframe;
3), 释放iso_sched,后续使用stream跟踪schedule的完成情况:
iso_sched_free (stream, iso_sched);
urb->hcpriv = NULL;
4), 使能watchdog监控schedule完成情况(超时后,watchdog会查询完成情况),同时如果需要使能同步调度:
timer_action (ehci, TIMER_IO_WATCHDOG);
if (unlikely (!ehci->periodic_sched++))
return enable_periodic (ehci);
return 0;
其他说明
1),类似于qh_link_async(),实现将urb的iTDs链接到periodic schedule list中所选择好(iso_stream_schedule()负责选择)的slot;
2),同时会将iso_sched-> td_list (urb->hcpriv:iso_sched)中的itd链接到stream->td_list;
1.3.5.17. iso_stream_init
原型
static void
iso_stream_init (
struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct usb_device *dev,
int pipe,
unsigned interval
)
参数说明:
1),ehci : ehci hcd 变量(input)
2),stream : 待初始化的stream(input)
3),dev : urb->dev(input)
4),pipe :urb->pipe(input)
5),interval :urb->interval(input)
调用时机
1), iso_stream_find();
调用说明
无
主要流程
1),如果dev是HS 设备,那么设置stream的highspeed,buf0,buf1,buf2以及usecs等field;
2),如果dev是FS 设备, 那么设置stream的usecs, tt_usecs, c_usecs, raw_mask以及address等信息;
3),最后设置stream的如下field:
bandwidth, udev, bEndpointAddress, interval, maxp
其他说明
1),该函数类似与qh_make(),会在stream中初始化许多schedule时候需要的参数,这些参数对一个endpoint的传输只需要初始化一次;
1.3.6. ehci_urb_dequeue()(hc_driver->urb_dequeue)
原型
static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
调用时机
1), unlink1()通过hcd->driver->urb_dequeue()调用;
2), hcd_unlink_urb():hcd.c会调用unlink1(),实现了struct usb_operations所定义的接口unlink_urb();
调用说明
无
主要流程
1),hold ehci->lock并关闭中断;
2),如果urb->pipe为bulk/control 类型:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
unlink_async (ehci, qh);
3),如果urb->pipe为interrupt 类型:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
intr_deschedule (ehci, qh);
/* FALL THROUGH */
case QH_STATE_IDLE:
qh_completions (ehci, qh, NULL);
break;
default:
ehci_dbg (ehci, "bogus qh %p state %d\n",
qh, qh->qh_state);
goto done;
}
/* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
&& HC_IS_RUNNING (hcd->state)) {
int status;
status = qh_schedule (ehci, qh);
spin_unlock_irqrestore (&ehci->lock, flags);
if (status != 0) {
// shouldn't happen often, but ...
// FIXME kill those tds' urbs
err ("can't reschedule qh %p, err %d",
qh, status);
}
return status;
}
break;
4),如果urb->pipe为isochronous 类型:
什么也不做,只是等待;
5),release ehci->lock并打开中断,返回0;
其他说明
1),该函数主要由usbcore 来调用,用于取消已经提交的urb transfer,会将相应数据结构从hardware list中remove;
2),一般都是异步完成;
1.3.7. ehci_endpoint_disable()(hc_driver-> endpoint_disable)
原型
static void ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
调用时机
1),被hcd_endpoint_disable():hcd.c 通过hcd->driver->endpoint_disable (hcd, ep)被调用;
2), hcd_endpoint_disable():hcd.c为struct usb_operations定义的接口;
调用说明
无
主要流程
1), hold ehci->lock,判断ep->hcpriv是否为空:
Rescan:
spin_lock_irqsave (&ehci->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
2),如果该ep是一个Isochronous endpoint,那么等待unlink完成:
if (qh->hw_info1 == 0) {
ehci_vdbg (ehci, "iso delay\n");
goto idle_timeout;
}
3),如果HC not RUNNING,那么设置qh为idle状态:
if (!HC_IS_RUNNING (hcd->state))
qh->qh_state = QH_STATE_IDLE;
4),对qh的处理:
switch (qh->qh_state){
case QH_STATE_LINKED:
for (tmp = ehci->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
/* periodic qh self-unlinks on empty */
if (!tmp)
goto nogood;
unlink_async (ehci, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
idle_timeout:
spin_unlock_irqrestore (&ehci->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (list_empty (&qh->qtd_list)) {
qh_put (qh);
break;
}
/* else FALL THROUGH */
default:
nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
ehci_err (ehci, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty (&qh->qtd_list) ? "" : "(has tds)");
break;
}
5),后续处理:
ep->hcpriv = NULL;
done:
spin_unlock_irqrestore (&ehci->lock, flags);
其他说明:
1), 调用endpoint_disable()时候:
any requests/urbs are being unlinked;
nobody can be submitting urbs for this any more;
1.3.8. ehci_irq()(hc_driver->irq)*
原型
static irqreturn_t ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs)
调用时机
1),HC中断cpu, cpu调用usb_hcd_irq(), 该函数在usb_add_hcd()中被注册为HC的irq handler;
2), usb_hcd_irq()会调用ehci_irq();
调用说明
无
主要流程
1), 判断是否发生中断:
status = readl (&ehci->regs->status);
if (status == ~(u32) 0) { //HC被拔出
goto dead;
}
status &= INTR_MASK;
if (!status) { //没有发生期待的中断
spin_unlock(&ehci->lock);
return IRQ_NONE;
}
2),清除中断
writel (status, &ehci->regs->status);
readl (&ehci->regs->command); /* unblock posted write */
bh = 0;
3),判断中断状态,做相应设置:
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
COUNT (ehci->stats.normal);
else
COUNT (ehci->stats.error);
bh = 1;
}
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
COUNT (ehci->stats.reclaim);
ehci->reclaim_ready = 1;
bh = 1;
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS (ehci->hcs_params);
/* resume root hub? */
status = readl (&ehci->regs->command);
if (!(status & CMD_RUN))
writel (status | CMD_RUN, &ehci->regs->command);
while (i--) {
status = readl (&ehci->regs->port_status [i]);
if (status & PORT_OWNER)
continue;
if (!(status & PORT_RESUME)
|| ehci->reset_done [i] != 0)
continue;
/* start 20 msec resume signaling from this port,
* and make khubd collect PORT_STAT_C_SUSPEND to
* stop that signaling.
*/
ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
usb_hcd_resume_root_hub(hcd); //唤醒hub kthread
}
/* PCI errors [4.15.2.4] */
if (unlikely ((status & STS_FATAL) != 0)) {
/* bogus "fatal" IRQs appear on some chips... why? */
status = readl (&ehci->regs->status);
if (status & STS_HALT) {
ehci_err (ehci, "fatal error\n");
dead:
ehci_reset (ehci);
writel (0, &ehci->regs->configured_flag);//释放port owner
bh = 1;
}
}
4),判断是否需要进一步处理:
if (bh)
ehci_work (ehci, regs);
其他说明
1),函数入口和出口分别锁住和解锁ehci->lock;
2),所处理的几个中断在ehci spec都有详细说明;
1.3.8.1. ehci_work()
原型
static void ehci_work (struct ehci_hcd *ehci, struct pt_regs *regs)
调用时机
1),ehci_irq();
2), ehci_watchdog();
3), ehci_stop();
4), ehci_bus_suspend()
5), ehci_pci_resume()
调用说明
1), 调用该函数之前,应该持有 ehci->lock,并禁止中断;
主要流程
1), prevent watchdog from processing TIMER_IO_WATCHDOG;
timer_action_done (ehci, TIMER_IO_WATCHDOG);
2), asynchronous schedule的reclaim处理:
if (ehci->reclaim_ready)
end_unlink_async (ehci, regs);
3),如果另一个cpu正在scanning,那么返回;
if (ehci->scanning)
return;
4),扫描异步和同步调度,主要是处理完成的urb;
ehci->scanning = 1;
scan_async (ehci, regs);
if (ehci->next_uframe != -1) //如果还有同步事务未完成
scan_periodic (ehci, regs);
ehci->scanning = 0;
5),如果HC is running,并且有未完成的调度,那么启动IO watchdog monitor:
if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) &&
(ehci->async->qh_next.ptr != NULL || ehci->periodic_sched != 0))
timer_action (ehci, TIMER_IO_WATCHDOG);
其他说明
1),
1.3.8.2. scan_async
原型
static void
scan_async (struct ehci_hcd *ehci, struct pt_regs *regs)
调用时机
1), echi_work();
调用说明
无
主要流程
1), 准备工作:
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
if (!++(ehci->stamp))
ehci->stamp++; //echi->stamp != 0
timer_action_done (ehci, TIMER_ASYNC_SHRINK);//清除该watchdog monitor
2),获得asynchronous schedule list 上的第一个qh
qh = ehci->async->qh_next.qh;
3), if (likely (qh != NULL)),做如下处理:
do {
/* clean any finished work for this qh */
//如果qh尚未被scan并且包含未完成的qTD,那么对qh清理已经完成的qTD
if (!list_empty (&qh->qtd_list)
&& qh->stamp != ehci->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
*/
qh = qh_get (qh);
qh->stamp = ehci->stamp;
temp = qh_completions (ehci, qh, regs);
qh_put (qh);
if (temp != 0) { //如果qh已经有完成的urb,那么重新扫描
goto 2);
}
}
/* unlink idle entries, reducing HC PCI usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
//qh中所有qTD已经完成,那么开始unlink qh处理
if (list_empty (&qh->qtd_list)) {
if (qh->stamp == ehci->stamp) //对于刚scan的qh,延时unlink,提高性能
action = TIMER_ASYNC_SHRINK;
else if (!ehci->reclaim
&& qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
}
qh = qh->qh_next.qh; //qh指向asynchronous schedule list的下一个qh
} while (qh);
4), 如果扫描过程中存在延时unlink的qh, 那么启动shrink watchdog monitor;
if (action == TIMER_ASYNC_SHRINK)
timer_action (ehci, TIMER_ASYNC_SHRINK);
其他说明
1),
1.3.8.3. scan_periodic
原型
static void
scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
调用时机
1),ehci_work();
调用说明
无
主要流程
1),入口操作:
mod = ehci->periodic_size << 3;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = ehci->next_uframe; //ehci->next_uframe应该记录了last scan point
//设置 ehci->next_uframe的几个地方:
// enable_periodic()
// scan_periodic()结尾
if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
clock = readl (&ehci->regs->frame_index);
else
clock = now_uframe + mod - 1;
clock %= mod;
2),scan each Frame from now_uframe to clock
a), 初始化current scan:
/* don't scan past the live uframe */
frame = now_uframe >> 3;
if (frame == (clock >> 3))
uframes = now_uframe & 0x07;
else {
/* safe to scan the whole frame at once */
now_uframe |= 0x07;
uframes = 8;
}
b),/* scan each element in frame's queue for completions */
restart:
q_p = &ehci->pshadow [frame];
hw_p = &ehci->periodic [frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE (*hw_p);
modified = 0;
c),Process a Frame periodic transaction completion:
while (q.ptr != NULL) {
.live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
.switch(type):
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get (q.qh);
type = Q_NEXT_TYPE (q.qh->hw_next);
q = q.qh->qh_next;
modified = qh_completions (ehci, temp.qh, regs);
if (unlikely (list_empty (&temp.qh->qtd_list)))
intr_deschedule (ehci, temp.qh);
qh_put (temp.qh);
break;
case Q_TYPE_ITD:
/* skip itds for later in the frame */
rmb ();
for (uf = live ? uframes : 8; uf < 8; uf++) {
if (0 == (q.itd->hw_transaction [uf]
& ITD_ACTIVE))
continue;
q_p = &q.itd->itd_next;
hw_p = &q.itd->hw_next;
type = Q_NEXT_TYPE (q.itd->hw_next);
q = *q_p;
break;
}
if (uf != 8)
break;
/* this one's ready ... HC won't cache the
* pointer for much longer, if at all.
*/
*q_p = q.itd->itd_next;
*hw_p = q.itd->hw_next;
type = Q_NEXT_TYPE (q.itd->hw_next);
wmb();
modified = itd_complete (ehci, q.itd, regs);
q = *q_p;
break;
/* assume completion callbacks modify the queue */
if (unlikely (modified)) goto restart;
}
注:
modified --- 表示完成了几个URB;
d),判断是否scan结束
/* stop when we catch up to the HC */
// FIXME: this assumes we won't get lapped when
// latencies climb; that should be rare, but...
// detect it, and just go all the way around.
// FLR might help detect this case, so long as latencies
// don't exceed periodic_size msec (default 1.024 sec).
// FIXME: likewise assumes HC doesn't halt mid-scan
if (now_uframe == clock) {
unsigned now;
if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
break;
ehci->next_uframe = now_uframe;
now = readl (&ehci->regs->frame_index) % mod;
if (now_uframe == now)
break;
/* rescan the rest of this frame, then ... */
clock = now;
} else {
now_uframe++;
now_uframe %= mod;
}
其他说明
1), scan_period()实现基于: 一个int/iso URB会使用多个frame/uframe,一般schedule时候会在合适的qh/iTD/siTD设置ioc,使得hc完成后中断cpu,导致scan_period()被调用:
ehci_work(){
...
if (ehci->next_uframe != -1)
scan_periodic (ehci, regs);
...
}
1.3.8.4. qh_completions()
原型
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh, struct pt_regs *regs)
调用时机
1), ehci_urb_dequeue()
2), end_unlink_async();
3), scan_async()
4), scan_periodic()
调用说明
无
主要流程
1), if (unlikely (list_empty (&qh->qtd_list)))
return 0;
2),遍历qh->qtd_list中的qTD,删除已经完成的qTDs, 并处理已经完成的urb;
3),如果qh已经停止,那么unlink it:
if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ehci, qh);
break;
case QH_STATE_LINKED:
if ((__constant_cpu_to_le32 (QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule (ehci, qh);
(void) qh_schedule (ehci, qh);
} else
unlink_async (ehci, qh);
break;
/* otherwise, unlink already started */
}
}
对interrupt qh和control/bulk qh的处理是不同的.
4),返回完成的urb的数目;
其他说明
1),该函数不仅仅处理control/bulk qh,也处理interrupt qh;
2),主要处理并释放qh已经完成的qTD, 将完成的URB返回给上层驱动,返回所完成的urb 数目;
3),qTD是按照顺序来完成的, 如果发现active的qTD并且HC在running,那么退出扫描qTD;
1.3.8.5. unlink_async()
原型
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1), ehci_urb_dequeue();
2), ehci_endpoint_disable();
3), qh_completions() 中发现 qh已经stop;
调用说明
无
主要流程
1), 如果 HC running, ehci->reclaim != NULL 并且qh->qh_state== QH_STATE_LINKED,那么标记qh->qh_state = QH_STATE_UNLINK_WAIT,并将qh添加到qh->reclaim链表的末尾;
/* if we need to use IAA and it's busy, defer */
if (qh->qh_state == QH_STATE_LINKED
&& ehci->reclaim
&& HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) {
struct ehci_qh *last;
for (last = ehci->reclaim;
last->reclaim;
last = last->reclaim)
continue;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
/* bypass IAA if the hc can't care */
}
2),如果HC not running, 直接unlink;
if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && ehci->reclaim)
end_unlink_async (ehci, NULL);
3),如果不是以上两种情况,那么:
/* something else might have unlinked the qh by now */
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async (ehci, qh);
其他说明
1), ehci->reclaim != NULL 表示ehci正在unlinking…
1.3.8.6. start_unlink_async()
原型
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1), ehci_watchdog();
2), unlink_async();
3), end_unlink_async();
4), scan_async();
调用说明
无
主要流程
1), 如果qh == ehci->async, 那么disable asynchronous schedule; ???
/* stop async schedule right now? */
if (unlikely (qh == ehci->async)) {
/* can't get here without STS_ASS set */
if (ehci_to_hcd(ehci)->state != HC_STATE_HALT) {
writel (cmd & ~CMD_ASE, &ehci->regs->command);
wmb ();
// handshake later, if we need to
}
timer_action_done (ehci, TIMER_ASYNC_OFF);
return;
}
2),将qh从ehci->async链表中删除,并令ehci->reclaim 指向 qh;
qh->qh_state = QH_STATE_UNLINK;
ehci->reclaim = qh = qh_get (qh);
prev = ehci->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb ();
3),如果HC已经HALT,那么unlink ehci->reclaim 链表;
if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
end_unlink_async (ehci, NULL);
return;
}
4),通知HC 软件unlink qh:
ehci->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel (cmd, &ehci->regs->command);
(void) readl (&ehci->regs->command);
5),启动IAA watchdog monitor:
timer_action (ehci, TIMER_IAA_WATCHDOG);
其他说明
1), unlink ehci->async显然是不可以的,这个是reclaim head, 但是TIMER_ASYNC_OFF watchdog monitor发现asynchronous schedule idle一定时间后,就通过start_unlink_async(ehci, ehci->async) disable HC asynchronous schedule;
2), 可能引起start_*和end_*两个函数的嵌套调用,是否会导致dead loop?
1.3.8.7. end_unlink_async()
原型
static void end_unlink_async (struct ehci_hcd *ehci, struct pt_regs *regs)
调用时机
1), ehci_work();
2), unlink_async();
3), start_unlink_async();
调用说明
无
主要流程
1), 清除IAA watchdog monitor;
2), unlink qh:
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put (qh); // refcount from reclaim
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
ehci->reclaim = next;
ehci->reclaim_ready = 0;
qh->reclaim = NULL;
qh_completions (ehci, qh, regs); //完成qh中qTD以及对应urb的处理,overlay update
3),如果qh还包含未完成的qTD,那么重新将其链接到asynchronous schedule list;
4),如果qh中的所有qTDs都已经完成,那么:
qh_put (qh); // refcount from async list
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
&& ehci->async->qh_next.qh == NULL)
timer_action (ehci, TIMER_ASYNC_OFF);
5),如果echi->reclaim还有需要unlink的qh,那么启动unlink :
if (next) {
ehci->reclaim = NULL; //???
start_unlink_async (ehci, next);
}
其他说明
1), qh_link_async()入口会清除TIMER_ASYNC_OFF monitor; 所以后续的qh submit将使HC不会被asynchronoud schedule disabled,但一段时间的不使用导致其asynchronoud schedule disabled;
2),正常路径是,IAA中断导致ehci_work()调用该函数被调用;
1.3.8.8. intr_deschedule()
原型
static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1), ehci_urb_dequeue();
2), qh_completions();
3), scan_periodic();
调用说明
无
主要流程
1), 调用 qh_unlink_periodic(ehci,qh)将qh从periodic schedule list中unlink;
2), 确定一个延时等待的时间,并等待:
/* simple/paranoid: always delay, expecting the HC needs to read
* qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
* expect khubd to clean up after any CSPLITs we won't issue.
* active high speed queues may need bigger delays...
*/
if (list_empty (&qh->qtd_list)
|| (__constant_cpu_to_le32 (QH_CMASK)
& qh->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay (wait);
3),qh 更新;
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
wmb ();
其他说明
1),该函数中的等待值得思考;(应该是和qh 类似问题,HC会cache hw pointer)
1.3.8.9. qh_unlink_periodic()
原型
static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
调用时机
1), intr_deschedule();
调用说明
无
主要流程
1),调用periodic_unlink()将qh从所在的periodic schedule list slot删除掉;
/* high bandwidth, or otherwise part of every microframe */
if ((period = qh->period) == 0)
period = 1;
for (i = qh->start; i < ehci->periodic_size; i += period)
periodic_unlink (ehci, i, qh);
2),修改统计信息:
/* update per-qh bandwidth for usbfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
3),qh 状态更新:
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
qh_put (qh);
4), 判断是否需要disable periodic schedule;
/* maybe turn off periodic schedule */
ehci->periodic_sched--;
if (!ehci->periodic_sched)
(void) disable_periodic (ehci);
其他说明
1.3.8.10. itd_complete ()
原型
static unsigned
itd_complete (
struct ehci_hcd *ehci,
struct ehci_itd *itd,
struct pt_regs *regs
)
调用时机
1), scan_periodic();
调用说明
无
主要流程
1), /*for each uframe with a packet,update the desc->status and desc->actual_length*/
for (uframe = 0; uframe < 8; uframe++) {
urb_index = itd->index[uframe];
desc = &urb->iso_frame_desc [urb_index];
t = le32_to_cpup (&itd->hw_transaction [uframe]);
itd->hw_transaction [uframe] = 0;
stream->depth -= stream->interval;
//desc->status会包含错误信息;
update desc->status and desc->actual_length;
}
2),回收itd
usb_put_urb (urb); //???
itd->urb = NULL;
itd->stream = NULL;
list_move (&itd->itd_list, &stream->free_list);
iso_stream_put (ehci, stream);
3),/* handle completion now? */
if (likely ((urb_index + 1) != urb->number_of_packets))
return 0; //当前urb还没有完成
4),/* give urb back to the driver ... can be out-of-order */
dev = usb_get_dev (urb->dev);
ehci_urb_done (ehci, urb, regs);
urb = NULL;
5),/* defer stopping schedule; completion can submit */(似乎看不出defer)
ehci->periodic_sched--;
if (unlikely (!ehci->periodic_sched))
(void) disable_periodic (ehci);
ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
6),后续处理
if (unlikely (list_empty (&stream->td_list))) {
ehci_to_hcd(ehci)->self.bandwidth_allocated-= stream->bandwidth;
ehci_vdbg (ehci,"deschedule devp %s ep%d%s-iso\n",
dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
iso_stream_put (ehci, stream);
usb_put_dev (dev);
其他说明
1), 修改periodic_sched的几个地方:
periodic_sched初值为0;
.periodic_sched++ 的几个地方:
qh_link_periodic() ---interrupt submit
itd_link_urb() ---iso itd submit
sitd_link_urb() ---iso sitd submit
.periodic_sched-- 的几个地方:
qh_unlink_periodic()
itd_complete()
sitd_complete()
1.3.9. ehci_hub_control()(hc_driver->hub_control)
原型
static int ehci_hub_control (
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
)
调用时机
1), ehci_port_power()会直接调用该函数打开每个port的power;
2), rh_call_control():hcd.c 会调用该函数实现对root hub的请求;
3), hcd_submit_urb()->rh_urb_enqueue()->rh_call_control();
调用说明
无
主要流程
略
其他说明
1),root hub和标准hub 设备的主要区别在于root hub和HC集成在一个chip内,HC对root hub的访问不需要通过标准的总线transaction, 只要ehci driver提供接口通过HC的内部register访问即可实现;
2), ehci_hub_control() 只实现了对root hub的”控制传输”,只支持如下标准usb2.0 ch9.4定义的hub class设备请求:
ClearHubFeature
ClearPortFeature *
GetHubDescriptor
GetHubStatus
GetPortStatus *
SetHubFeature
SetPortFeature*
*: ehci_hub_control()重点实现的接口;
3), 函数入口和出口分别要加锁和解锁ehci->lock;
1.3.10. ehci_hub_status_data(hc_driver->hub_status_data)
原型
static int ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
调用时机
1), usb_hcd_poll_rh_status()中通过hcd->driver->hub_status_data(hcd, buffer)调用;
2), rh_timer_func():hcd.c 和usb_add_hcd():hcd.c -> usb_hcd_poll_rh_status();
调用说明
无
主要流程
略
其他说明
1),该函数根据HC port register值构造port “status change” packet;
2), usb_hcd_poll_rh_status()会将buffer返回的内容填充到hcd->status_urb中,并
调用usb_hcd_giveback_urb()返回该urb到driver 层; hcd->status_urb实际上还是由hcd_submit_urb()->rh_urb_enqueue()->rh_queue_status()提交的(simulate a hub interrupt urb);
新一篇: understanding linux usb ehci device driver(3) | 旧一篇: understanding linux usb ehci device driver(1)