2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
18 * Intel documents this fairly well, and as far as I know there
19 * are no royalties or anything like that, but even so there are
20 * people who decided that they want to do the same thing in a
21 * completely different way.
23 * WARNING! The USB documentation is downright evil. Most of it
24 * is just crap, written by a committee. You're better off ignoring
25 * most of it, the important stuff is:
26 * - the low-level protocol (fairly simple but lots of small details)
27 * - working around the horridness of the rest
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
55 #include <asm/bitops.h>
56 #include <asm/uaccess.h>
59 #include <asm/system.h>
61 #include "../core/hcd.h"
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
74 * debug = 0, no debugging messages
75 * debug = 1, dump failed URB's except for stalls
76 * debug = 2, dump all failed URB's (including stalls)
77 * show all queues in /proc/driver/uhci/[pci_addr]
78 * debug = 3, show all TD's in URB's when dumping
85 MODULE_PARM(debug, "i");
86 MODULE_PARM_DESC(debug, "Debug level");
88 #define ERRBUF_LEN (32 * 1024)
91 #include "uhci-debug.c"
93 static kmem_cache_t *uhci_up_cachep; /* urb_priv */
95 static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
103 static void hc_state_transitions(struct uhci_hcd *uhci);
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT (HZ / 20) /* 50 ms */
107 #define FSBR_DELAY (HZ / 20) /* 50 ms */
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
115 * Technically, updating td->status here is a race, but it's not really a
116 * problem. The worst that can happen is that we set the IOC bit again
117 * generating a spurious interrupt. We could fix this by creating another
118 * QH and leaving the IOC bit always set, but then we would have to play
119 * games with the FSBR code to make sure we get the correct order in all
120 * the cases. I don't think it's worth the effort
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
124 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
129 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
133 struct urb_priv *urbp)
135 list_move_tail(&urbp->urb_list, &uhci->complete_list);
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
140 dma_addr_t dma_handle;
143 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
147 td->dma_handle = dma_handle;
149 td->link = UHCI_PTR_TERM;
155 INIT_LIST_HEAD(&td->list);
156 INIT_LIST_HEAD(&td->remove_list);
157 INIT_LIST_HEAD(&td->fl_list);
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165 u32 token, u32 buffer)
167 td->status = cpu_to_le32(status);
168 td->token = cpu_to_le32(token);
169 td->buffer = cpu_to_le32(buffer);
173 * We insert Isochronous URB's directly into the frame list at the beginning
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
177 framenum %= UHCI_NUMFRAMES;
179 td->frame = framenum;
181 /* Is there a TD already mapped there? */
182 if (uhci->fl->frame_cpu[framenum]) {
183 struct uhci_td *ftd, *ltd;
185 ftd = uhci->fl->frame_cpu[framenum];
186 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
188 list_add_tail(&td->fl_list, &ftd->fl_list);
190 td->link = ltd->link;
192 ltd->link = cpu_to_le32(td->dma_handle);
194 td->link = uhci->fl->frame[framenum];
196 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197 uhci->fl->frame_cpu[framenum] = td;
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
203 /* If it's not inserted, don't remove it */
204 if (td->frame == -1 && list_empty(&td->fl_list))
207 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208 if (list_empty(&td->fl_list)) {
209 uhci->fl->frame[td->frame] = td->link;
210 uhci->fl->frame_cpu[td->frame] = NULL;
214 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216 uhci->fl->frame_cpu[td->frame] = ntd;
221 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222 ptd->link = td->link;
226 td->link = UHCI_PTR_TERM;
228 list_del_init(&td->fl_list);
233 * Inserts a td into qh list at the top.
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth)
237 struct list_head *tmp, *head;
238 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
239 struct uhci_td *td, *ptd;
241 if (list_empty(&urbp->td_list))
244 head = &urbp->td_list;
247 /* Ordering isn't important here yet since the QH hasn't been */
248 /* inserted into the schedule yet */
249 td = list_entry(tmp, struct uhci_td, list);
251 /* Add the first TD to the QH element pointer */
252 qh->element = cpu_to_le32(td->dma_handle) | breadth;
256 /* Then link the rest of the TD's */
258 while (tmp != head) {
259 td = list_entry(tmp, struct uhci_td, list);
263 ptd->link = cpu_to_le32(td->dma_handle) | breadth;
268 ptd->link = UHCI_PTR_TERM;
271 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
273 if (!list_empty(&td->list))
274 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
275 if (!list_empty(&td->remove_list))
276 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
277 if (!list_empty(&td->fl_list))
278 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
281 usb_put_dev(td->dev);
283 dma_pool_free(uhci->td_pool, td, td->dma_handle);
286 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
288 dma_addr_t dma_handle;
291 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
295 qh->dma_handle = dma_handle;
297 qh->element = UHCI_PTR_TERM;
298 qh->link = UHCI_PTR_TERM;
303 INIT_LIST_HEAD(&qh->list);
304 INIT_LIST_HEAD(&qh->remove_list);
311 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
313 if (!list_empty(&qh->list))
314 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
315 if (!list_empty(&qh->remove_list))
316 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
319 usb_put_dev(qh->dev);
321 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
325 * Append this urb's qh after the last qh in skelqh->list
327 * Note that urb_priv.queue_list doesn't have a separate queue head;
328 * it's a ring with every element "live".
330 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
332 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
333 struct list_head *tmp;
336 /* Grab the last QH */
337 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
339 /* Point to the next skelqh */
340 urbp->qh->link = lqh->link;
341 wmb(); /* Ordering is important */
344 * Patch QHs for previous endpoint's queued URBs? HC goes
345 * here next, not to the next skelqh it now points to.
347 * lqh --> td ... --> qh ... --> td --> qh ... --> td
350 * +<----------------+-----------------+
352 * newqh --> td ... --> td
357 * The HC could see (and use!) any of these as we write them.
359 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
361 list_for_each (tmp, &lqh->urbp->queue_list) {
362 struct urb_priv *turbp =
363 list_entry(tmp, struct urb_priv, queue_list);
365 turbp->qh->link = lqh->link;
369 list_add_tail(&urbp->qh->list, &skelqh->list);
373 * Start removal of QH from schedule; it finishes next frame.
374 * TDs should be unlinked before this is called.
376 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
386 * Only go through the hoops if it's actually linked in
388 if (!list_empty(&qh->list)) {
390 /* If our queue is nonempty, make the next URB the head */
391 if (!list_empty(&qh->urbp->queue_list)) {
392 struct urb_priv *nurbp;
394 nurbp = list_entry(qh->urbp->queue_list.next,
395 struct urb_priv, queue_list);
397 list_add(&nurbp->qh->list, &qh->list);
398 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
402 /* Fix up the previous QH's queue to link to either
403 * the new head of this queue or the start of the
404 * next endpoint's queue. */
405 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
408 struct list_head *head, *tmp;
410 head = &pqh->urbp->queue_list;
412 while (head != tmp) {
413 struct urb_priv *turbp =
414 list_entry(tmp, struct urb_priv, queue_list);
418 turbp->qh->link = newlink;
423 /* Leave qh->link in case the HC is on the QH now, it will */
424 /* continue the rest of the schedule */
425 qh->element = UHCI_PTR_TERM;
427 list_del_init(&qh->list);
430 list_del_init(&qh->urbp->queue_list);
433 age = uhci_get_current_frame_number(uhci);
434 if (age != uhci->qh_remove_age) {
435 uhci_free_pending_qhs(uhci);
436 uhci->qh_remove_age = age;
439 /* Check to see if the remove list is empty. Set the IOC bit */
440 /* to force an interrupt so we can remove the QH */
441 if (list_empty(&uhci->qh_remove_list))
442 uhci_set_next_interrupt(uhci);
444 list_add(&qh->remove_list, &uhci->qh_remove_list);
447 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
449 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
450 struct list_head *head, *tmp;
452 head = &urbp->td_list;
454 while (head != tmp) {
455 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
460 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
462 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
471 /* This function will append one URB's QH to another URB's QH. This is for */
472 /* queuing interrupt, control or bulk transfers */
473 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
475 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
476 struct list_head *tmp;
477 struct uhci_td *lltd;
479 eurbp = eurb->hcpriv;
482 /* Find the first URB in the queue */
484 struct list_head *head = &eurbp->queue_list;
487 while (tmp != head) {
488 struct urb_priv *turbp =
489 list_entry(tmp, struct urb_priv, queue_list);
497 tmp = &eurbp->queue_list;
499 furbp = list_entry(tmp, struct urb_priv, queue_list);
500 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
502 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
504 /* Control transfers always start with toggle 0 */
505 if (!usb_pipecontrol(urb->pipe))
506 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
507 usb_pipeout(urb->pipe),
508 uhci_fixup_toggle(urb,
509 uhci_toggle(td_token(lltd)) ^ 1));
511 /* All qh's in the queue need to link to the next queue */
512 urbp->qh->link = eurbp->qh->link;
514 wmb(); /* Make sure we flush everything */
516 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
518 list_add_tail(&urbp->queue_list, &furbp->queue_list);
523 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
525 struct urb_priv *urbp, *nurbp;
526 struct list_head *head, *tmp;
527 struct urb_priv *purbp;
528 struct uhci_td *pltd;
533 if (list_empty(&urbp->queue_list))
536 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
539 * Fix up the toggle for the following URBs in the queue.
540 * Only needed for bulk and interrupt: control and isochronous
541 * endpoints don't propagate toggles between messages.
543 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
545 /* We just set the toggle in uhci_unlink_generic */
546 toggle = usb_gettoggle(urb->dev,
547 usb_pipeendpoint(urb->pipe),
548 usb_pipeout(urb->pipe));
550 /* If we're in the middle of the queue, grab the */
551 /* toggle from the TD previous to us */
552 purbp = list_entry(urbp->queue_list.prev,
553 struct urb_priv, queue_list);
554 pltd = list_entry(purbp->td_list.prev,
555 struct uhci_td, list);
556 toggle = uhci_toggle(td_token(pltd)) ^ 1;
559 head = &urbp->queue_list;
561 while (head != tmp) {
562 struct urb_priv *turbp;
564 turbp = list_entry(tmp, struct urb_priv, queue_list);
569 toggle = uhci_fixup_toggle(turbp->urb, toggle);
572 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
573 usb_pipeout(urb->pipe), toggle);
577 /* We're somewhere in the middle (or end). The case where
578 * we're at the head is handled in uhci_remove_qh(). */
579 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
582 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
584 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
586 /* The next URB happens to be the beginning, so */
587 /* we're the last, end the chain */
588 pltd->link = UHCI_PTR_TERM;
591 /* urbp->queue_list is handled in uhci_remove_qh() */
594 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
596 struct urb_priv *urbp;
598 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
602 memset((void *)urbp, 0, sizeof(*urbp));
604 urbp->inserttime = jiffies;
605 urbp->fsbrtime = jiffies;
608 INIT_LIST_HEAD(&urbp->td_list);
609 INIT_LIST_HEAD(&urbp->queue_list);
610 INIT_LIST_HEAD(&urbp->urb_list);
612 list_add_tail(&urbp->urb_list, &uhci->urb_list);
619 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
621 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
625 list_add_tail(&td->list, &urbp->td_list);
628 static void uhci_remove_td_from_urb(struct uhci_td *td)
630 if (list_empty(&td->list))
633 list_del_init(&td->list);
638 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
640 struct list_head *head, *tmp;
641 struct urb_priv *urbp;
644 urbp = (struct urb_priv *)urb->hcpriv;
648 if (!list_empty(&urbp->urb_list))
649 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
650 "or uhci->remove_list!\n", urb);
652 age = uhci_get_current_frame_number(uhci);
653 if (age != uhci->td_remove_age) {
654 uhci_free_pending_tds(uhci);
655 uhci->td_remove_age = age;
658 /* Check to see if the remove list is empty. Set the IOC bit */
659 /* to force an interrupt so we can remove the TD's*/
660 if (list_empty(&uhci->td_remove_list))
661 uhci_set_next_interrupt(uhci);
663 head = &urbp->td_list;
665 while (tmp != head) {
666 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
670 uhci_remove_td_from_urb(td);
671 uhci_remove_td(uhci, td);
672 list_add(&td->remove_list, &uhci->td_remove_list);
676 kmem_cache_free(uhci_up_cachep, urbp);
679 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
681 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
683 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
685 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
686 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
690 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
692 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
694 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
697 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
702 * Map status to standard result codes
704 * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
705 * Note: status does not include the TD_CTRL_NAK bit.
706 * <dir_out> is True for output TDs and False for input TDs.
708 static int uhci_map_status(int status, int dir_out)
712 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
714 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
720 if (status & TD_CTRL_BABBLE) /* Babble */
722 if (status & TD_CTRL_DBUFERR) /* Buffer error */
724 if (status & TD_CTRL_STALLED) /* Stalled */
726 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
733 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
735 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
737 struct uhci_qh *qh, *skelqh;
738 unsigned long destination, status;
739 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
740 int len = urb->transfer_buffer_length;
741 dma_addr_t data = urb->transfer_dma;
743 /* The "pipe" thing contains the destination in bits 8--18 */
744 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
747 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
748 if (urb->dev->speed == USB_SPEED_LOW)
749 status |= TD_CTRL_LS;
752 * Build the TD for the control request setup packet
754 td = uhci_alloc_td(uhci, urb->dev);
758 uhci_add_td_to_urb(urb, td);
759 uhci_fill_td(td, status, destination | uhci_explen(7),
763 * If direction is "send", change the packet ID from SETUP (0x2D)
764 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
765 * set Short Packet Detect (SPD) for all data packets.
767 if (usb_pipeout(urb->pipe))
768 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
770 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
771 status |= TD_CTRL_SPD;
775 * Build the DATA TD's
783 td = uhci_alloc_td(uhci, urb->dev);
787 /* Alternate Data0/1 (start with Data1) */
788 destination ^= TD_TOKEN_TOGGLE;
790 uhci_add_td_to_urb(urb, td);
791 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
799 * Build the final TD for control status
801 td = uhci_alloc_td(uhci, urb->dev);
806 * It's IN if the pipe is an output pipe or we're not expecting
809 destination &= ~TD_TOKEN_PID_MASK;
810 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
811 destination |= USB_PID_IN;
813 destination |= USB_PID_OUT;
815 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
817 status &= ~TD_CTRL_SPD;
819 uhci_add_td_to_urb(urb, td);
820 uhci_fill_td(td, status | TD_CTRL_IOC,
821 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
823 qh = uhci_alloc_qh(uhci, urb->dev);
830 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
832 /* Low-speed transfers get a different queue, and won't hog the bus */
833 if (urb->dev->speed == USB_SPEED_LOW)
834 skelqh = uhci->skel_ls_control_qh;
836 skelqh = uhci->skel_fs_control_qh;
837 uhci_inc_fsbr(uhci, urb);
841 uhci_append_queued_urb(uhci, eurb, urb);
843 uhci_insert_qh(uhci, skelqh, urb);
849 * If control-IN transfer was short, the status packet wasn't sent.
850 * This routine changes the element pointer in the QH to point at the
851 * status TD. It's safe to do this even while the QH is live, because
852 * the hardware only updates the element pointer following a successful
853 * transfer. The inactive TD for the short packet won't cause an update,
854 * so the pointer won't get overwritten. The next time the controller
855 * sees this QH, it will send the status packet.
857 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
859 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
862 urbp->short_control_packet = 1;
864 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
865 urbp->qh->element = td->dma_handle;
871 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
873 struct list_head *tmp, *head;
874 struct urb_priv *urbp = urb->hcpriv;
879 if (list_empty(&urbp->td_list))
882 head = &urbp->td_list;
884 if (urbp->short_control_packet) {
890 td = list_entry(tmp, struct uhci_td, list);
892 /* The first TD is the SETUP stage, check the status, but skip */
894 status = uhci_status_bits(td_status(td));
895 if (status & TD_CTRL_ACTIVE)
901 urb->actual_length = 0;
903 /* The rest of the TD's (but the last) are data */
905 while (tmp != head && tmp->next != head) {
906 td = list_entry(tmp, struct uhci_td, list);
910 status = uhci_status_bits(td_status(td));
911 if (status & TD_CTRL_ACTIVE)
914 urb->actual_length += uhci_actual_length(td_status(td));
919 /* Check to see if we received a short packet */
920 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
921 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
926 if (uhci_packetid(td_token(td)) == USB_PID_IN)
927 return usb_control_retrigger_status(uhci, urb);
934 td = list_entry(tmp, struct uhci_td, list);
936 /* Control status stage */
937 status = td_status(td);
939 #ifdef I_HAVE_BUGGY_APC_BACKUPS
940 /* APC BackUPS Pro kludge */
941 /* It tries to send all of the descriptor instead of the amount */
943 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
944 status & TD_CTRL_ACTIVE &&
945 status & TD_CTRL_NAK)
949 status = uhci_status_bits(status);
950 if (status & TD_CTRL_ACTIVE)
959 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
962 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
963 /* Some debugging code */
964 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
965 __FUNCTION__, status);
968 /* Print the chain for debugging purposes */
969 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
979 * Common submit for bulk and interrupt
981 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
985 unsigned long destination, status;
986 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
987 int len = urb->transfer_buffer_length;
988 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
989 dma_addr_t data = urb->transfer_dma;
994 /* The "pipe" thing contains the destination in bits 8--18 */
995 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
997 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
998 if (urb->dev->speed == USB_SPEED_LOW)
999 status |= TD_CTRL_LS;
1000 if (usb_pipein(urb->pipe))
1001 status |= TD_CTRL_SPD;
1004 * Build the DATA TD's
1006 do { /* Allow zero length packets */
1007 int pktsze = maxsze;
1009 if (pktsze >= len) {
1011 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1012 status &= ~TD_CTRL_SPD;
1015 td = uhci_alloc_td(uhci, urb->dev);
1019 uhci_add_td_to_urb(urb, td);
1020 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
1021 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1022 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1028 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1029 usb_pipeout(urb->pipe));
1033 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1034 * is OUT and the transfer_length was an exact multiple of maxsze,
1035 * hence (len = transfer_length - N * maxsze) == 0
1036 * however, if transfer_length == 0, the zero packet was already
1039 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
1040 !len && urb->transfer_buffer_length) {
1041 td = uhci_alloc_td(uhci, urb->dev);
1045 uhci_add_td_to_urb(urb, td);
1046 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
1047 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1048 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1051 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1052 usb_pipeout(urb->pipe));
1055 /* Set the interrupt-on-completion flag on the last packet.
1056 * A more-or-less typical 4 KB URB (= size of one memory page)
1057 * will require about 3 ms to transfer; that's a little on the
1058 * fast side but not enough to justify delaying an interrupt
1059 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1061 td->status |= cpu_to_le32(TD_CTRL_IOC);
1063 qh = uhci_alloc_qh(uhci, urb->dev);
1070 /* Always breadth first */
1071 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1074 uhci_append_queued_urb(uhci, eurb, urb);
1076 uhci_insert_qh(uhci, skelqh, urb);
1078 return -EINPROGRESS;
1082 * Common result for bulk and interrupt
1084 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1086 struct list_head *tmp, *head;
1087 struct urb_priv *urbp = urb->hcpriv;
1089 unsigned int status = 0;
1092 urb->actual_length = 0;
1094 head = &urbp->td_list;
1096 while (tmp != head) {
1097 td = list_entry(tmp, struct uhci_td, list);
1101 status = uhci_status_bits(td_status(td));
1102 if (status & TD_CTRL_ACTIVE)
1103 return -EINPROGRESS;
1105 urb->actual_length += uhci_actual_length(td_status(td));
1110 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1111 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1122 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1124 /* endpoint has stalled - mark it halted */
1125 usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
1126 uhci_packetout(td_token(td)));
1130 * Enable this chunk of code if you want to see some more debugging.
1131 * But be careful, it has the tendancy to starve out khubd and prevent
1132 * disconnects from happening successfully if you have a slow debug
1133 * log interface (like a serial console.
1136 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1137 /* Some debugging code */
1138 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1139 __FUNCTION__, status);
1142 /* Print the chain for debugging purposes */
1143 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1152 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1156 /* Can't have low-speed bulk transfers */
1157 if (urb->dev->speed == USB_SPEED_LOW)
1160 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1161 if (ret == -EINPROGRESS)
1162 uhci_inc_fsbr(uhci, urb);
1167 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1169 /* USB 1.1 interrupt transfers only involve one packet per interval;
1170 * that's the uhci_submit_common() "breadth first" policy. Drivers
1171 * can submit urbs of any length, but longer ones might need many
1172 * intervals to complete.
1174 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1178 * Isochronous transfers
1180 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1182 struct urb *last_urb = NULL;
1183 struct list_head *tmp, *head;
1186 head = &uhci->urb_list;
1188 while (tmp != head) {
1189 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1190 struct urb *u = up->urb;
1194 /* look for pending URB's with identical pipe handle */
1195 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1196 (u->status == -EINPROGRESS) && (u != urb)) {
1198 *start = u->start_frame;
1204 *end = (last_urb->start_frame + last_urb->number_of_packets *
1205 last_urb->interval) & (UHCI_NUMFRAMES-1);
1208 ret = -1; /* no previous urb found */
1213 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1216 unsigned int start = 0, end = 0;
1218 if (urb->number_of_packets > 900) /* 900? Why? */
1221 limits = isochronous_find_limits(uhci, urb, &start, &end);
1223 if (urb->transfer_flags & URB_ISO_ASAP) {
1227 curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
1228 urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1230 urb->start_frame = end;
1232 urb->start_frame %= UHCI_NUMFRAMES;
1233 /* FIXME: Sanity check */
1240 * Isochronous transfers
1242 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1246 int status, destination;
1248 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1249 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1251 ret = isochronous_find_start(uhci, urb);
1255 frame = urb->start_frame;
1256 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1257 if (!urb->iso_frame_desc[i].length)
1260 td = uhci_alloc_td(uhci, urb->dev);
1264 uhci_add_td_to_urb(urb, td);
1265 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1266 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1268 if (i + 1 >= urb->number_of_packets)
1269 td->status |= cpu_to_le32(TD_CTRL_IOC);
1271 uhci_insert_td_frame_list(uhci, td, frame);
1274 return -EINPROGRESS;
1277 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1279 struct list_head *tmp, *head;
1280 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1284 urb->actual_length = 0;
1287 head = &urbp->td_list;
1289 while (tmp != head) {
1290 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1295 if (td_status(td) & TD_CTRL_ACTIVE)
1296 return -EINPROGRESS;
1298 actlength = uhci_actual_length(td_status(td));
1299 urb->iso_frame_desc[i].actual_length = actlength;
1300 urb->actual_length += actlength;
1302 status = uhci_map_status(uhci_status_bits(td_status(td)),
1303 usb_pipeout(urb->pipe));
1304 urb->iso_frame_desc[i].status = status;
1316 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1318 struct list_head *tmp, *head;
1320 /* We don't match Isoc transfers since they are special */
1321 if (usb_pipeisoc(urb->pipe))
1324 head = &uhci->urb_list;
1326 while (tmp != head) {
1327 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1328 struct urb *u = up->urb;
1332 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1333 /* For control, ignore the direction */
1334 if (usb_pipecontrol(urb->pipe) &&
1335 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1337 else if (u->pipe == urb->pipe)
1345 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1348 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1349 unsigned long flags;
1353 spin_lock_irqsave(&uhci->schedule_lock, flags);
1355 if (urb->status != -EINPROGRESS) /* URB already unlinked! */
1358 eurb = uhci_find_urb_ep(uhci, urb);
1360 if (!uhci_alloc_urb_priv(uhci, urb)) {
1365 switch (usb_pipetype(urb->pipe)) {
1367 ret = uhci_submit_control(uhci, urb, eurb);
1369 case PIPE_INTERRUPT:
1371 bustime = usb_check_bandwidth(urb->dev, urb);
1375 ret = uhci_submit_interrupt(uhci, urb, eurb);
1376 if (ret == -EINPROGRESS)
1377 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1379 } else { /* inherit from parent */
1380 urb->bandwidth = eurb->bandwidth;
1381 ret = uhci_submit_interrupt(uhci, urb, eurb);
1385 ret = uhci_submit_bulk(uhci, urb, eurb);
1387 case PIPE_ISOCHRONOUS:
1388 bustime = usb_check_bandwidth(urb->dev, urb);
1394 ret = uhci_submit_isochronous(uhci, urb);
1395 if (ret == -EINPROGRESS)
1396 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1400 if (ret != -EINPROGRESS) {
1401 /* Submit failed, so delete it from the urb_list */
1402 struct urb_priv *urbp = urb->hcpriv;
1404 list_del_init(&urbp->urb_list);
1405 uhci_destroy_urb_priv(uhci, urb);
1410 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1415 * Return the result of a transfer
1417 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1419 int ret = -EINPROGRESS;
1420 struct urb_priv *urbp;
1422 spin_lock(&urb->lock);
1424 urbp = (struct urb_priv *)urb->hcpriv;
1426 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1429 switch (usb_pipetype(urb->pipe)) {
1431 ret = uhci_result_control(uhci, urb);
1434 case PIPE_INTERRUPT:
1435 ret = uhci_result_common(uhci, urb);
1437 case PIPE_ISOCHRONOUS:
1438 ret = uhci_result_isochronous(uhci, urb);
1442 if (ret == -EINPROGRESS)
1446 switch (usb_pipetype(urb->pipe)) {
1449 case PIPE_ISOCHRONOUS:
1450 /* Release bandwidth for Interrupt or Isoc. transfers */
1452 usb_release_bandwidth(urb->dev, urb, 1);
1453 uhci_unlink_generic(uhci, urb);
1455 case PIPE_INTERRUPT:
1456 /* Release bandwidth for Interrupt or Isoc. transfers */
1457 /* Make sure we don't release if we have a queued URB */
1458 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1459 usb_release_bandwidth(urb->dev, urb, 0);
1461 /* bandwidth was passed on to queued URB, */
1462 /* so don't let usb_unlink_urb() release it */
1464 uhci_unlink_generic(uhci, urb);
1467 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1469 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1472 /* Move it from uhci->urb_list to uhci->complete_list */
1473 uhci_moveto_complete(uhci, urbp);
1476 spin_unlock(&urb->lock);
1479 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1481 struct list_head *head, *tmp;
1482 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1485 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1488 * Now we need to find out what the last successful toggle was
1489 * so we can update the local data toggle for the next transfer
1491 * There's 3 way's the last successful completed TD is found:
1493 * 1) The TD is NOT active and the actual length < expected length
1494 * 2) The TD is NOT active and it's the last TD in the chain
1495 * 3) The TD is active and the previous TD is NOT active
1497 * Control and Isochronous ignore the toggle, so this is safe
1500 head = &urbp->td_list;
1502 while (tmp != head) {
1503 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1507 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1508 (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
1510 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1511 uhci_packetout(td_token(td)),
1512 uhci_toggle(td_token(td)) ^ 1);
1513 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1514 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1515 uhci_packetout(td_token(td)),
1516 uhci_toggle(td_token(td)));
1518 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1521 uhci_delete_queued_urb(uhci, urb);
1523 /* The interrupt loop will reclaim the QH's */
1524 uhci_remove_qh(uhci, urbp->qh);
1528 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1530 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1531 unsigned long flags;
1532 struct urb_priv *urbp;
1535 spin_lock_irqsave(&uhci->schedule_lock, flags);
1537 if (!urbp) /* URB was never linked! */
1539 list_del_init(&urbp->urb_list);
1541 uhci_unlink_generic(uhci, urb);
1543 age = uhci_get_current_frame_number(uhci);
1544 if (age != uhci->urb_remove_age) {
1545 uhci_remove_pending_urbps(uhci);
1546 uhci->urb_remove_age = age;
1549 /* If we're the first, set the next interrupt bit */
1550 if (list_empty(&uhci->urb_remove_list))
1551 uhci_set_next_interrupt(uhci);
1552 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1555 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1559 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1561 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1562 struct list_head *head, *tmp;
1565 uhci_dec_fsbr(uhci, urb);
1567 urbp->fsbr_timeout = 1;
1570 * Ideally we would want to fix qh->element as well, but it's
1571 * read/write by the HC, so that can introduce a race. It's not
1572 * really worth the hassle
1575 head = &urbp->td_list;
1577 while (tmp != head) {
1578 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1583 * Make sure we don't do the last one (since it'll have the
1584 * TERM bit set) as well as we skip every so many TD's to
1585 * make sure it doesn't hog the bandwidth
1587 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1588 td->link |= UHCI_PTR_DEPTH;
1597 * uhci_get_current_frame_number()
1599 * returns the current frame number for a USB bus/controller.
1601 static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1603 return inw(uhci->io_addr + USBFRNUM);
1606 static int init_stall_timer(struct usb_hcd *hcd);
1608 static void stall_callback(unsigned long ptr)
1610 struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1611 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1612 struct list_head list, *tmp, *head;
1613 unsigned long flags;
1615 INIT_LIST_HEAD(&list);
1617 spin_lock_irqsave(&uhci->schedule_lock, flags);
1618 if (!list_empty(&uhci->urb_remove_list) &&
1619 uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1620 uhci_remove_pending_urbps(uhci);
1621 uhci_finish_completion(hcd, NULL);
1624 head = &uhci->urb_list;
1626 while (tmp != head) {
1627 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1628 struct urb *u = up->urb;
1632 spin_lock(&u->lock);
1634 /* Check if the FSBR timed out */
1635 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1636 uhci_fsbr_timeout(uhci, u);
1638 /* Check if the URB timed out */
1639 if (u->timeout && u->status == -EINPROGRESS &&
1640 time_after_eq(jiffies, up->inserttime + u->timeout)) {
1641 u->status = -ETIMEDOUT;
1642 list_move_tail(&up->urb_list, &list);
1645 spin_unlock(&u->lock);
1647 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1651 while (tmp != head) {
1652 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1653 struct urb *u = up->urb;
1657 uhci_urb_dequeue(hcd, u);
1660 /* Really disable FSBR */
1661 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1662 uhci->fsbrtimeout = 0;
1663 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1666 /* Poll for and perform state transitions */
1667 hc_state_transitions(uhci);
1669 init_stall_timer(hcd);
1672 static int init_stall_timer(struct usb_hcd *hcd)
1674 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1676 init_timer(&uhci->stall_timer);
1677 uhci->stall_timer.function = stall_callback;
1678 uhci->stall_timer.data = (unsigned long)hcd;
1679 uhci->stall_timer.expires = jiffies + (HZ / 10);
1680 add_timer(&uhci->stall_timer);
1685 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1687 struct list_head *tmp, *head;
1689 head = &uhci->qh_remove_list;
1691 while (tmp != head) {
1692 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
1696 list_del_init(&qh->remove_list);
1698 uhci_free_qh(uhci, qh);
1702 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1704 struct list_head *tmp, *head;
1706 head = &uhci->td_remove_list;
1708 while (tmp != head) {
1709 struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list);
1713 list_del_init(&td->remove_list);
1715 uhci_free_td(uhci, td);
1719 static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1721 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1723 uhci_destroy_urb_priv(uhci, urb);
1725 spin_unlock(&uhci->schedule_lock);
1726 usb_hcd_giveback_urb(hcd, urb, regs);
1727 spin_lock(&uhci->schedule_lock);
1730 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1732 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1733 struct list_head *tmp, *head;
1735 head = &uhci->complete_list;
1737 while (tmp != head) {
1738 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1739 struct urb *urb = urbp->urb;
1741 list_del_init(&urbp->urb_list);
1742 uhci_finish_urb(hcd, urb, regs);
1744 head = &uhci->complete_list;
1749 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1752 /* Splice the urb_remove_list onto the end of the complete_list */
1753 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1756 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1758 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1759 unsigned int io_addr = uhci->io_addr;
1760 unsigned short status;
1761 struct list_head *tmp, *head;
1765 * Read the interrupt status, and write it back to clear the
1766 * interrupt cause. Contrary to the UHCI specification, the
1767 * "HC Halted" status bit is persistent: it is RO, not R/WC.
1769 status = inw(io_addr + USBSTS);
1770 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
1772 outw(status, io_addr + USBSTS); /* Clear it */
1774 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1775 if (status & USBSTS_HSE)
1776 dev_err(uhci_dev(uhci), "host system error, "
1778 if (status & USBSTS_HCPE)
1779 dev_err(uhci_dev(uhci), "host controller process "
1780 "error, something bad happened!\n");
1781 if ((status & USBSTS_HCH) && uhci->state > 0) {
1782 dev_err(uhci_dev(uhci), "host controller halted, "
1784 /* FIXME: Reset the controller, fix the offending TD */
1788 if (status & USBSTS_RD)
1789 uhci->resume_detect = 1;
1791 spin_lock(&uhci->schedule_lock);
1793 age = uhci_get_current_frame_number(uhci);
1794 if (age != uhci->qh_remove_age)
1795 uhci_free_pending_qhs(uhci);
1796 if (age != uhci->td_remove_age)
1797 uhci_free_pending_tds(uhci);
1798 if (age != uhci->urb_remove_age)
1799 uhci_remove_pending_urbps(uhci);
1801 if (list_empty(&uhci->urb_remove_list) &&
1802 list_empty(&uhci->td_remove_list) &&
1803 list_empty(&uhci->qh_remove_list))
1804 uhci_clear_next_interrupt(uhci);
1806 uhci_set_next_interrupt(uhci);
1808 /* Walk the list of pending URB's to see which ones completed */
1809 head = &uhci->urb_list;
1811 while (tmp != head) {
1812 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1813 struct urb *urb = urbp->urb;
1817 /* Checks the status and does all of the magic necessary */
1818 uhci_transfer_result(uhci, urb);
1820 uhci_finish_completion(hcd, regs);
1822 spin_unlock(&uhci->schedule_lock);
1824 /* Wake up anyone waiting for an URB to complete */
1825 wake_up_all(&uhci->waitqh);
1830 static void reset_hc(struct uhci_hcd *uhci)
1832 unsigned int io_addr = uhci->io_addr;
1834 /* Global reset for 50ms */
1835 uhci->state = UHCI_RESET;
1836 outw(USBCMD_GRESET, io_addr + USBCMD);
1837 set_current_state(TASK_UNINTERRUPTIBLE);
1838 schedule_timeout((HZ*50+999) / 1000);
1839 outw(0, io_addr + USBCMD);
1841 /* Another 10ms delay */
1842 set_current_state(TASK_UNINTERRUPTIBLE);
1843 schedule_timeout((HZ*10+999) / 1000);
1844 uhci->resume_detect = 0;
1847 static void suspend_hc(struct uhci_hcd *uhci)
1849 unsigned int io_addr = uhci->io_addr;
1851 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1852 uhci->state = UHCI_SUSPENDED;
1853 uhci->resume_detect = 0;
1854 outw(USBCMD_EGSM, io_addr + USBCMD);
1857 static void wakeup_hc(struct uhci_hcd *uhci)
1859 unsigned int io_addr = uhci->io_addr;
1861 switch (uhci->state) {
1862 case UHCI_SUSPENDED: /* Start the resume */
1863 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1865 /* Global resume for >= 20ms */
1866 outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1867 uhci->state = UHCI_RESUMING_1;
1868 uhci->state_end = jiffies + (20*HZ+999) / 1000;
1871 case UHCI_RESUMING_1: /* End global resume */
1872 uhci->state = UHCI_RESUMING_2;
1873 outw(0, io_addr + USBCMD);
1876 case UHCI_RESUMING_2: /* Wait for EOP to be sent */
1877 if (inw(io_addr + USBCMD) & USBCMD_FGR)
1880 /* Run for at least 1 second, and
1881 * mark it configured with a 64-byte max packet */
1882 uhci->state = UHCI_RUNNING_GRACE;
1883 uhci->state_end = jiffies + HZ;
1884 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1888 case UHCI_RUNNING_GRACE: /* Now allowed to suspend */
1889 uhci->state = UHCI_RUNNING;
1897 static int ports_active(struct uhci_hcd *uhci)
1899 unsigned int io_addr = uhci->io_addr;
1903 for (i = 0; i < uhci->rh_numports; i++)
1904 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1909 static int suspend_allowed(struct uhci_hcd *uhci)
1911 unsigned int io_addr = uhci->io_addr;
1914 if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1917 /* Some of Intel's USB controllers have a bug that causes false
1918 * resume indications if any port has an over current condition.
1919 * To prevent problems, we will not allow a global suspend if
1922 * Some motherboards using Intel's chipsets (but not using all
1923 * the USB ports) appear to hardwire the over current inputs active
1924 * to disable the USB ports.
1927 /* check for over current condition on any port */
1928 for (i = 0; i < uhci->rh_numports; i++) {
1929 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1936 static void hc_state_transitions(struct uhci_hcd *uhci)
1938 switch (uhci->state) {
1941 /* global suspend if nothing connected for 1 second */
1942 if (!ports_active(uhci) && suspend_allowed(uhci)) {
1943 uhci->state = UHCI_SUSPENDING_GRACE;
1944 uhci->state_end = jiffies + HZ;
1948 case UHCI_SUSPENDING_GRACE:
1949 if (ports_active(uhci))
1950 uhci->state = UHCI_RUNNING;
1951 else if (time_after_eq(jiffies, uhci->state_end))
1955 case UHCI_SUSPENDED:
1957 /* wakeup if requested by a device */
1958 if (uhci->resume_detect)
1962 case UHCI_RESUMING_1:
1963 case UHCI_RESUMING_2:
1964 case UHCI_RUNNING_GRACE:
1965 if (time_after_eq(jiffies, uhci->state_end))
1974 static void start_hc(struct uhci_hcd *uhci)
1976 unsigned int io_addr = uhci->io_addr;
1980 * Reset the HC - this will force us to get a
1981 * new notification of any already connected
1982 * ports due to the virtual disconnect that it
1985 outw(USBCMD_HCRESET, io_addr + USBCMD);
1986 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1988 dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1993 /* Turn on all interrupts */
1994 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1997 /* Start at frame 0 */
1998 outw(0, io_addr + USBFRNUM);
1999 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2001 /* Run and mark it configured with a 64-byte max packet */
2002 uhci->state = UHCI_RUNNING_GRACE;
2003 uhci->state_end = jiffies + HZ;
2004 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2006 uhci->hcd.state = USB_STATE_RUNNING;
2010 * De-allocate all resources..
2012 static void release_uhci(struct uhci_hcd *uhci)
2016 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2017 if (uhci->skelqh[i]) {
2018 uhci_free_qh(uhci, uhci->skelqh[i]);
2019 uhci->skelqh[i] = NULL;
2022 if (uhci->term_td) {
2023 uhci_free_td(uhci, uhci->term_td);
2024 uhci->term_td = NULL;
2027 if (uhci->qh_pool) {
2028 dma_pool_destroy(uhci->qh_pool);
2029 uhci->qh_pool = NULL;
2032 if (uhci->td_pool) {
2033 dma_pool_destroy(uhci->td_pool);
2034 uhci->td_pool = NULL;
2038 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2039 uhci->fl, uhci->fl->dma_handle);
2043 #ifdef CONFIG_PROC_FS
2044 if (uhci->proc_entry) {
2045 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
2046 uhci->proc_entry = NULL;
2051 static int uhci_reset(struct usb_hcd *hcd)
2053 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2055 uhci->io_addr = (unsigned long) hcd->regs;
2057 /* Turn off all interrupts */
2058 outw(0, uhci->io_addr + USBINTR);
2060 /* Maybe kick BIOS off this hardware. Then reset, so we won't get
2061 * interrupts from any previous setup.
2064 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2070 * Allocate a frame list, and then setup the skeleton
2072 * The hardware doesn't really know any difference
2073 * in the queues, but the order does matter for the
2074 * protocols higher up. The order is:
2076 * - any isochronous events handled before any
2077 * of the queues. We don't do that here, because
2078 * we'll create the actual TD entries on demand.
2079 * - The first queue is the interrupt queue.
2080 * - The second queue is the control queue, split into low- and full-speed
2081 * - The third queue is bulk queue.
2082 * - The fourth queue is the bandwidth reclamation queue, which loops back
2083 * to the full-speed control queue.
2085 static int uhci_start(struct usb_hcd *hcd)
2087 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2088 int retval = -EBUSY;
2091 dma_addr_t dma_handle;
2092 struct usb_device *udev;
2093 #ifdef CONFIG_PROC_FS
2094 struct proc_dir_entry *ent;
2097 io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
2099 #ifdef CONFIG_PROC_FS
2100 ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2102 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
2104 goto err_create_proc_entry;
2108 ent->proc_fops = &uhci_proc_operations;
2110 uhci->proc_entry = ent;
2114 uhci->fsbrtimeout = 0;
2116 spin_lock_init(&uhci->schedule_lock);
2117 INIT_LIST_HEAD(&uhci->qh_remove_list);
2119 INIT_LIST_HEAD(&uhci->td_remove_list);
2121 INIT_LIST_HEAD(&uhci->urb_remove_list);
2123 INIT_LIST_HEAD(&uhci->urb_list);
2125 INIT_LIST_HEAD(&uhci->complete_list);
2127 init_waitqueue_head(&uhci->waitqh);
2129 uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2132 dev_err(uhci_dev(uhci), "unable to allocate "
2133 "consistent memory for frame list\n");
2137 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2139 uhci->fl->dma_handle = dma_handle;
2141 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2142 sizeof(struct uhci_td), 16, 0);
2143 if (!uhci->td_pool) {
2144 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2145 goto err_create_td_pool;
2148 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2149 sizeof(struct uhci_qh), 16, 0);
2150 if (!uhci->qh_pool) {
2151 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2152 goto err_create_qh_pool;
2155 /* Initialize the root hub */
2157 /* UHCI specs says devices must have 2 ports, but goes on to say */
2158 /* they may have more but give no way to determine how many they */
2159 /* have. However, according to the UHCI spec, Bit 7 is always set */
2160 /* to 1. So we try to use this to our advantage */
2161 for (port = 0; port < (io_size - 0x10) / 2; port++) {
2162 unsigned int portstatus;
2164 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2165 if (!(portstatus & 0x0080))
2169 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2171 /* This is experimental so anything less than 2 or greater than 8 is */
2172 /* something weird and we'll ignore it */
2173 if (port < 2 || port > UHCI_RH_MAXCHILD) {
2174 dev_info(uhci_dev(uhci), "port count misdetected? "
2175 "forcing to 2 ports\n");
2179 uhci->rh_numports = port;
2181 hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self, 0);
2183 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2184 goto err_alloc_root_hub;
2187 uhci->term_td = uhci_alloc_td(uhci, udev);
2188 if (!uhci->term_td) {
2189 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2190 goto err_alloc_term_td;
2193 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2194 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2195 if (!uhci->skelqh[i]) {
2196 dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2197 goto err_alloc_skelqh;
2202 * 8 Interrupt queues; link all higher int queues to int1,
2203 * then link int1 to control and control to bulk
2205 uhci->skel_int128_qh->link =
2206 uhci->skel_int64_qh->link =
2207 uhci->skel_int32_qh->link =
2208 uhci->skel_int16_qh->link =
2209 uhci->skel_int8_qh->link =
2210 uhci->skel_int4_qh->link =
2211 uhci->skel_int2_qh->link =
2212 cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2213 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2215 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2216 uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2217 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2219 /* This dummy TD is to work around a bug in Intel PIIX controllers */
2220 uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2221 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2222 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2224 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2225 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2228 * Fill the frame list: make all entries point to the proper
2231 * The interrupt queues will be interleaved as evenly as possible.
2232 * There's not much to be done about period-1 interrupts; they have
2233 * to occur in every frame. But we can schedule period-2 interrupts
2234 * in odd-numbered frames, period-4 interrupts in frames congruent
2235 * to 2 (mod 4), and so on. This way each frame only has two
2236 * interrupt QHs, which will help spread out bandwidth utilization.
2238 for (i = 0; i < UHCI_NUMFRAMES; i++) {
2242 * ffs (Find First bit Set) does exactly what we need:
2243 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6],
2244 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2245 * ffs > 6 => not on any high-period queue, so use
2246 * skel_int1_qh = skelqh[7].
2247 * Add UHCI_NUMFRAMES to insure at least one bit is set.
2249 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2253 /* Only place we don't use the frame list routines */
2254 uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2259 init_stall_timer(hcd);
2261 udev->speed = USB_SPEED_FULL;
2263 if (usb_register_root_hub(udev, uhci_dev(uhci)) != 0) {
2264 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2266 goto err_start_root_hub;
2277 del_timer_sync(&uhci->stall_timer);
2280 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2281 if (uhci->skelqh[i]) {
2282 uhci_free_qh(uhci, uhci->skelqh[i]);
2283 uhci->skelqh[i] = NULL;
2286 uhci_free_td(uhci, uhci->term_td);
2287 uhci->term_td = NULL;
2291 hcd->self.root_hub = NULL;
2294 dma_pool_destroy(uhci->qh_pool);
2295 uhci->qh_pool = NULL;
2298 dma_pool_destroy(uhci->td_pool);
2299 uhci->td_pool = NULL;
2302 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2303 uhci->fl, uhci->fl->dma_handle);
2307 #ifdef CONFIG_PROC_FS
2308 remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2309 uhci->proc_entry = NULL;
2311 err_create_proc_entry:
2317 static void uhci_stop(struct usb_hcd *hcd)
2319 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2321 del_timer_sync(&uhci->stall_timer);
2324 * At this point, we're guaranteed that no new connects can be made
2325 * to this bus since there are no more parents
2330 spin_lock_irq(&uhci->schedule_lock);
2331 uhci_free_pending_qhs(uhci);
2332 uhci_free_pending_tds(uhci);
2333 uhci_remove_pending_urbps(uhci);
2334 uhci_finish_completion(hcd, NULL);
2336 uhci_free_pending_qhs(uhci);
2337 uhci_free_pending_tds(uhci);
2338 spin_unlock_irq(&uhci->schedule_lock);
2340 /* Wake up anyone waiting for an URB to complete */
2341 wake_up_all(&uhci->waitqh);
2347 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2349 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2351 /* Don't try to suspend broken motherboards, reset instead */
2352 if (suspend_allowed(uhci)) {
2354 uhci->saved_framenumber =
2355 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2361 static int uhci_resume(struct usb_hcd *hcd)
2363 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2365 pci_set_master(to_pci_dev(uhci_dev(uhci)));
2367 if (uhci->state == UHCI_SUSPENDED) {
2370 * Some systems don't maintain the UHCI register values
2371 * during a PM suspend/resume cycle, so reinitialize
2372 * the Frame Number, the Framelist Base Address, and the
2373 * Interrupt Enable registers.
2375 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2376 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2377 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2378 USBINTR_SP, uhci->io_addr + USBINTR);
2379 uhci->resume_detect = 1;
2384 uhci->hcd.state = USB_STATE_RUNNING;
2389 static struct usb_hcd *uhci_hcd_alloc(void)
2391 struct uhci_hcd *uhci;
2393 uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2397 memset(uhci, 0, sizeof(*uhci));
2398 uhci->hcd.product_desc = "UHCI Host Controller";
2402 static void uhci_hcd_free(struct usb_hcd *hcd)
2404 kfree(hcd_to_uhci(hcd));
2407 /* Are there any URBs for a particular device/endpoint on a given list? */
2408 static int urbs_for_ep_list(struct list_head *head,
2409 struct hcd_dev *hdev, int ep)
2411 struct urb_priv *urbp;
2413 list_for_each_entry(urbp, head, urb_list) {
2414 struct urb *urb = urbp->urb;
2416 if (hdev == urb->dev->hcpriv && ep ==
2417 (usb_pipeendpoint(urb->pipe) |
2418 usb_pipein(urb->pipe)))
2424 /* Are there any URBs for a particular device/endpoint? */
2425 static int urbs_for_ep(struct uhci_hcd *uhci, struct hcd_dev *hdev, int ep)
2429 spin_lock_irq(&uhci->schedule_lock);
2430 rc = (urbs_for_ep_list(&uhci->urb_list, hdev, ep) ||
2431 urbs_for_ep_list(&uhci->complete_list, hdev, ep) ||
2432 urbs_for_ep_list(&uhci->urb_remove_list, hdev, ep));
2433 spin_unlock_irq(&uhci->schedule_lock);
2437 /* Wait until all the URBs for a particular device/endpoint are gone */
2438 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2439 struct hcd_dev *hdev, int endpoint)
2441 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2443 wait_event_interruptible(uhci->waitqh,
2444 !urbs_for_ep(uhci, hdev, endpoint));
2447 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2449 return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2452 static const char hcd_name[] = "uhci_hcd";
2454 static const struct hc_driver uhci_driver = {
2455 .description = hcd_name,
2457 /* Generic hardware linkage */
2461 /* Basic lifecycle operations */
2462 .reset = uhci_reset,
2463 .start = uhci_start,
2465 .suspend = uhci_suspend,
2466 .resume = uhci_resume,
2470 .hcd_alloc = uhci_hcd_alloc,
2471 .hcd_free = uhci_hcd_free,
2473 .urb_enqueue = uhci_urb_enqueue,
2474 .urb_dequeue = uhci_urb_dequeue,
2476 .endpoint_disable = uhci_hcd_endpoint_disable,
2477 .get_frame_number = uhci_hcd_get_frame_number,
2479 .hub_status_data = uhci_hub_status_data,
2480 .hub_control = uhci_hub_control,
2483 static const struct pci_device_id uhci_pci_ids[] = { {
2484 /* handle any USB UHCI controller */
2485 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2486 .driver_data = (unsigned long) &uhci_driver,
2487 }, { /* end: all zeroes */ }
2490 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2492 static struct pci_driver uhci_pci_driver = {
2493 .name = (char *)hcd_name,
2494 .id_table = uhci_pci_ids,
2496 .probe = usb_hcd_pci_probe,
2497 .remove = usb_hcd_pci_remove,
2500 .suspend = usb_hcd_pci_suspend,
2501 .resume = usb_hcd_pci_resume,
2505 static int __init uhci_hcd_init(void)
2507 int retval = -ENOMEM;
2509 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2515 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2520 #ifdef CONFIG_PROC_FS
2521 uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
2522 if (!uhci_proc_root)
2526 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2527 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2528 if (!uhci_up_cachep)
2531 retval = pci_module_init(&uhci_pci_driver);
2538 if (kmem_cache_destroy(uhci_up_cachep))
2539 warn("not all urb_priv's were freed!");
2543 #ifdef CONFIG_PROC_FS
2544 remove_proc_entry("driver/uhci", 0);
2556 static void __exit uhci_hcd_cleanup(void)
2558 pci_unregister_driver(&uhci_pci_driver);
2560 if (kmem_cache_destroy(uhci_up_cachep))
2561 warn("not all urb_priv's were freed!");
2563 #ifdef CONFIG_PROC_FS
2564 remove_proc_entry("driver/uhci", 0);
2571 module_init(uhci_hcd_init);
2572 module_exit(uhci_hcd_cleanup);
2574 MODULE_AUTHOR(DRIVER_AUTHOR);
2575 MODULE_DESCRIPTION(DRIVER_DESC);
2576 MODULE_LICENSE("GPL");