2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
18 * Intel documents this fairly well, and as far as I know there
19 * are no royalties or anything like that, but even so there are
20 * people who decided that they want to do the same thing in a
21 * completely different way.
23 * WARNING! The USB documentation is downright evil. Most of it
24 * is just crap, written by a committee. You're better off ignoring
25 * most of it, the important stuff is:
26 * - the low-level protocol (fairly simple but lots of small details)
27 * - working around the horridness of the rest
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54 #include <linux/bitops.h>
56 #include <asm/uaccess.h>
59 #include <asm/system.h>
61 #include "../core/hcd.h"
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
74 * debug = 0, no debugging messages
75 * debug = 1, dump failed URB's except for stalls
76 * debug = 2, dump all failed URB's (including stalls)
77 * show all queues in /proc/driver/uhci/[pci_addr]
78 * debug = 3, show all TD's in URB's when dumping
85 module_param(debug, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(debug, "Debug level");
88 #define ERRBUF_LEN (32 * 1024)
91 #include "uhci-debug.c"
93 static kmem_cache_t *uhci_up_cachep; /* urb_priv */
95 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
103 static void hc_state_transitions(struct uhci_hcd *uhci);
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT msecs_to_jiffies(50)
107 #define FSBR_DELAY msecs_to_jiffies(50)
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
115 * Technically, updating td->status here is a race, but it's not really a
116 * problem. The worst that can happen is that we set the IOC bit again
117 * generating a spurious interrupt. We could fix this by creating another
118 * QH and leaving the IOC bit always set, but then we would have to play
119 * games with the FSBR code to make sure we get the correct order in all
120 * the cases. I don't think it's worth the effort
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
124 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
129 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
133 struct urb_priv *urbp)
135 list_move_tail(&urbp->urb_list, &uhci->complete_list);
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
140 dma_addr_t dma_handle;
143 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
147 td->dma_handle = dma_handle;
149 td->link = UHCI_PTR_TERM;
155 INIT_LIST_HEAD(&td->list);
156 INIT_LIST_HEAD(&td->remove_list);
157 INIT_LIST_HEAD(&td->fl_list);
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165 u32 token, u32 buffer)
167 td->status = cpu_to_le32(status);
168 td->token = cpu_to_le32(token);
169 td->buffer = cpu_to_le32(buffer);
173 * We insert Isochronous URB's directly into the frame list at the beginning
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
177 framenum &= (UHCI_NUMFRAMES - 1);
179 td->frame = framenum;
181 /* Is there a TD already mapped there? */
182 if (uhci->fl->frame_cpu[framenum]) {
183 struct uhci_td *ftd, *ltd;
185 ftd = uhci->fl->frame_cpu[framenum];
186 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
188 list_add_tail(&td->fl_list, &ftd->fl_list);
190 td->link = ltd->link;
192 ltd->link = cpu_to_le32(td->dma_handle);
194 td->link = uhci->fl->frame[framenum];
196 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197 uhci->fl->frame_cpu[framenum] = td;
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
203 /* If it's not inserted, don't remove it */
204 if (td->frame == -1 && list_empty(&td->fl_list))
207 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208 if (list_empty(&td->fl_list)) {
209 uhci->fl->frame[td->frame] = td->link;
210 uhci->fl->frame_cpu[td->frame] = NULL;
214 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216 uhci->fl->frame_cpu[td->frame] = ntd;
221 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222 ptd->link = td->link;
226 td->link = UHCI_PTR_TERM;
228 list_del_init(&td->fl_list);
233 * Inserts a td list into qh.
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
237 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
241 /* Ordering isn't important here yet since the QH hasn't been */
242 /* inserted into the schedule yet */
243 plink = &qh->element;
244 list_for_each_entry(td, &urbp->td_list, list) {
245 *plink = cpu_to_le32(td->dma_handle) | breadth;
248 *plink = UHCI_PTR_TERM;
251 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
253 if (!list_empty(&td->list))
254 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
255 if (!list_empty(&td->remove_list))
256 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
257 if (!list_empty(&td->fl_list))
258 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
261 usb_put_dev(td->dev);
263 dma_pool_free(uhci->td_pool, td, td->dma_handle);
266 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
268 dma_addr_t dma_handle;
271 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
275 qh->dma_handle = dma_handle;
277 qh->element = UHCI_PTR_TERM;
278 qh->link = UHCI_PTR_TERM;
283 INIT_LIST_HEAD(&qh->list);
284 INIT_LIST_HEAD(&qh->remove_list);
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
293 if (!list_empty(&qh->list))
294 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
295 if (!list_empty(&qh->remove_list))
296 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
299 usb_put_dev(qh->dev);
301 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
305 * Append this urb's qh after the last qh in skelqh->list
307 * Note that urb_priv.queue_list doesn't have a separate queue head;
308 * it's a ring with every element "live".
310 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
312 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
313 struct urb_priv *turbp;
316 /* Grab the last QH */
317 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
319 /* Point to the next skelqh */
320 urbp->qh->link = lqh->link;
321 wmb(); /* Ordering is important */
324 * Patch QHs for previous endpoint's queued URBs? HC goes
325 * here next, not to the next skelqh it now points to.
327 * lqh --> td ... --> qh ... --> td --> qh ... --> td
330 * +<----------------+-----------------+
332 * newqh --> td ... --> td
337 * The HC could see (and use!) any of these as we write them.
339 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
341 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
342 turbp->qh->link = lqh->link;
345 list_add_tail(&urbp->qh->list, &skelqh->list);
349 * Start removal of QH from schedule; it finishes next frame.
350 * TDs should be unlinked before this is called.
352 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
362 * Only go through the hoops if it's actually linked in
364 if (!list_empty(&qh->list)) {
366 /* If our queue is nonempty, make the next URB the head */
367 if (!list_empty(&qh->urbp->queue_list)) {
368 struct urb_priv *nurbp;
370 nurbp = list_entry(qh->urbp->queue_list.next,
371 struct urb_priv, queue_list);
373 list_add(&nurbp->qh->list, &qh->list);
374 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
378 /* Fix up the previous QH's queue to link to either
379 * the new head of this queue or the start of the
380 * next endpoint's queue. */
381 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
384 struct urb_priv *turbp;
386 list_for_each_entry(turbp, &pqh->urbp->queue_list,
388 turbp->qh->link = newlink;
392 /* Leave qh->link in case the HC is on the QH now, it will */
393 /* continue the rest of the schedule */
394 qh->element = UHCI_PTR_TERM;
396 list_del_init(&qh->list);
399 list_del_init(&qh->urbp->queue_list);
402 age = uhci_get_current_frame_number(uhci);
403 if (age != uhci->qh_remove_age) {
404 uhci_free_pending_qhs(uhci);
405 uhci->qh_remove_age = age;
408 /* Check to see if the remove list is empty. Set the IOC bit */
409 /* to force an interrupt so we can remove the QH */
410 if (list_empty(&uhci->qh_remove_list))
411 uhci_set_next_interrupt(uhci);
413 list_add(&qh->remove_list, &uhci->qh_remove_list);
416 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
418 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
421 list_for_each_entry(td, &urbp->td_list, list) {
423 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
425 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
433 /* This function will append one URB's QH to another URB's QH. This is for */
434 /* queuing interrupt, control or bulk transfers */
435 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
437 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
438 struct uhci_td *lltd;
440 eurbp = eurb->hcpriv;
443 /* Find the first URB in the queue */
446 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
451 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
453 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
455 /* Control transfers always start with toggle 0 */
456 if (!usb_pipecontrol(urb->pipe))
457 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
458 usb_pipeout(urb->pipe),
459 uhci_fixup_toggle(urb,
460 uhci_toggle(td_token(lltd)) ^ 1));
462 /* All qh's in the queue need to link to the next queue */
463 urbp->qh->link = eurbp->qh->link;
465 wmb(); /* Make sure we flush everything */
467 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
469 list_add_tail(&urbp->queue_list, &furbp->queue_list);
474 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
476 struct urb_priv *urbp, *nurbp, *purbp, *turbp;
477 struct uhci_td *pltd;
482 if (list_empty(&urbp->queue_list))
485 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
488 * Fix up the toggle for the following URBs in the queue.
489 * Only needed for bulk and interrupt: control and isochronous
490 * endpoints don't propagate toggles between messages.
492 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
494 /* We just set the toggle in uhci_unlink_generic */
495 toggle = usb_gettoggle(urb->dev,
496 usb_pipeendpoint(urb->pipe),
497 usb_pipeout(urb->pipe));
499 /* If we're in the middle of the queue, grab the */
500 /* toggle from the TD previous to us */
501 purbp = list_entry(urbp->queue_list.prev,
502 struct urb_priv, queue_list);
503 pltd = list_entry(purbp->td_list.prev,
504 struct uhci_td, list);
505 toggle = uhci_toggle(td_token(pltd)) ^ 1;
508 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
511 toggle = uhci_fixup_toggle(turbp->urb, toggle);
514 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
515 usb_pipeout(urb->pipe), toggle);
519 /* We're somewhere in the middle (or end). The case where
520 * we're at the head is handled in uhci_remove_qh(). */
521 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
524 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
526 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
528 /* The next URB happens to be the beginning, so */
529 /* we're the last, end the chain */
530 pltd->link = UHCI_PTR_TERM;
533 /* urbp->queue_list is handled in uhci_remove_qh() */
536 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
538 struct urb_priv *urbp;
540 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
544 memset((void *)urbp, 0, sizeof(*urbp));
546 urbp->inserttime = jiffies;
547 urbp->fsbrtime = jiffies;
550 INIT_LIST_HEAD(&urbp->td_list);
551 INIT_LIST_HEAD(&urbp->queue_list);
552 INIT_LIST_HEAD(&urbp->urb_list);
554 list_add_tail(&urbp->urb_list, &uhci->urb_list);
561 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
563 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
567 list_add_tail(&td->list, &urbp->td_list);
570 static void uhci_remove_td_from_urb(struct uhci_td *td)
572 if (list_empty(&td->list))
575 list_del_init(&td->list);
580 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
582 struct uhci_td *td, *tmp;
583 struct urb_priv *urbp;
586 urbp = (struct urb_priv *)urb->hcpriv;
590 if (!list_empty(&urbp->urb_list))
591 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
592 "or uhci->remove_list!\n", urb);
594 age = uhci_get_current_frame_number(uhci);
595 if (age != uhci->td_remove_age) {
596 uhci_free_pending_tds(uhci);
597 uhci->td_remove_age = age;
600 /* Check to see if the remove list is empty. Set the IOC bit */
601 /* to force an interrupt so we can remove the TD's*/
602 if (list_empty(&uhci->td_remove_list))
603 uhci_set_next_interrupt(uhci);
605 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
606 uhci_remove_td_from_urb(td);
607 uhci_remove_td(uhci, td);
608 list_add(&td->remove_list, &uhci->td_remove_list);
612 kmem_cache_free(uhci_up_cachep, urbp);
615 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
617 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
619 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
621 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
622 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
626 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
628 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
630 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
633 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
638 * Map status to standard result codes
640 * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
641 * Note: status does not include the TD_CTRL_NAK bit.
642 * <dir_out> is True for output TDs and False for input TDs.
644 static int uhci_map_status(int status, int dir_out)
648 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
650 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
656 if (status & TD_CTRL_BABBLE) /* Babble */
658 if (status & TD_CTRL_DBUFERR) /* Buffer error */
660 if (status & TD_CTRL_STALLED) /* Stalled */
662 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
669 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
671 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
673 struct uhci_qh *qh, *skelqh;
674 unsigned long destination, status;
675 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
676 int len = urb->transfer_buffer_length;
677 dma_addr_t data = urb->transfer_dma;
679 /* The "pipe" thing contains the destination in bits 8--18 */
680 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
683 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
684 if (urb->dev->speed == USB_SPEED_LOW)
685 status |= TD_CTRL_LS;
688 * Build the TD for the control request setup packet
690 td = uhci_alloc_td(uhci, urb->dev);
694 uhci_add_td_to_urb(urb, td);
695 uhci_fill_td(td, status, destination | uhci_explen(7),
699 * If direction is "send", change the packet ID from SETUP (0x2D)
700 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
701 * set Short Packet Detect (SPD) for all data packets.
703 if (usb_pipeout(urb->pipe))
704 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
706 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
707 status |= TD_CTRL_SPD;
711 * Build the DATA TD's
719 td = uhci_alloc_td(uhci, urb->dev);
723 /* Alternate Data0/1 (start with Data1) */
724 destination ^= TD_TOKEN_TOGGLE;
726 uhci_add_td_to_urb(urb, td);
727 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
735 * Build the final TD for control status
737 td = uhci_alloc_td(uhci, urb->dev);
742 * It's IN if the pipe is an output pipe or we're not expecting
745 destination &= ~TD_TOKEN_PID_MASK;
746 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
747 destination |= USB_PID_IN;
749 destination |= USB_PID_OUT;
751 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
753 status &= ~TD_CTRL_SPD;
755 uhci_add_td_to_urb(urb, td);
756 uhci_fill_td(td, status | TD_CTRL_IOC,
757 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
759 qh = uhci_alloc_qh(uhci, urb->dev);
766 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
768 /* Low-speed transfers get a different queue, and won't hog the bus.
769 * Also, some devices enumerate better without FSBR; the easiest way
770 * to do that is to put URBs on the low-speed queue while the device
771 * is in the DEFAULT state. */
772 if (urb->dev->speed == USB_SPEED_LOW ||
773 urb->dev->state == USB_STATE_DEFAULT)
774 skelqh = uhci->skel_ls_control_qh;
776 skelqh = uhci->skel_fs_control_qh;
777 uhci_inc_fsbr(uhci, urb);
781 uhci_append_queued_urb(uhci, eurb, urb);
783 uhci_insert_qh(uhci, skelqh, urb);
789 * If control-IN transfer was short, the status packet wasn't sent.
790 * This routine changes the element pointer in the QH to point at the
791 * status TD. It's safe to do this even while the QH is live, because
792 * the hardware only updates the element pointer following a successful
793 * transfer. The inactive TD for the short packet won't cause an update,
794 * so the pointer won't get overwritten. The next time the controller
795 * sees this QH, it will send the status packet.
797 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
799 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
802 urbp->short_control_packet = 1;
804 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
805 urbp->qh->element = cpu_to_le32(td->dma_handle);
811 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
813 struct list_head *tmp, *head;
814 struct urb_priv *urbp = urb->hcpriv;
819 if (list_empty(&urbp->td_list))
822 head = &urbp->td_list;
824 if (urbp->short_control_packet) {
830 td = list_entry(tmp, struct uhci_td, list);
832 /* The first TD is the SETUP stage, check the status, but skip */
834 status = uhci_status_bits(td_status(td));
835 if (status & TD_CTRL_ACTIVE)
841 urb->actual_length = 0;
843 /* The rest of the TD's (but the last) are data */
845 while (tmp != head && tmp->next != head) {
846 td = list_entry(tmp, struct uhci_td, list);
850 status = uhci_status_bits(td_status(td));
851 if (status & TD_CTRL_ACTIVE)
854 urb->actual_length += uhci_actual_length(td_status(td));
859 /* Check to see if we received a short packet */
860 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
861 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
866 if (uhci_packetid(td_token(td)) == USB_PID_IN)
867 return usb_control_retrigger_status(uhci, urb);
874 td = list_entry(tmp, struct uhci_td, list);
876 /* Control status stage */
877 status = td_status(td);
879 #ifdef I_HAVE_BUGGY_APC_BACKUPS
880 /* APC BackUPS Pro kludge */
881 /* It tries to send all of the descriptor instead of the amount */
883 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
884 status & TD_CTRL_ACTIVE &&
885 status & TD_CTRL_NAK)
889 status = uhci_status_bits(status);
890 if (status & TD_CTRL_ACTIVE)
899 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
902 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
903 /* Some debugging code */
904 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
905 __FUNCTION__, status);
908 /* Print the chain for debugging purposes */
909 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
919 * Common submit for bulk and interrupt
921 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
925 unsigned long destination, status;
926 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
927 int len = urb->transfer_buffer_length;
928 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
929 dma_addr_t data = urb->transfer_dma;
934 /* The "pipe" thing contains the destination in bits 8--18 */
935 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
937 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
938 if (urb->dev->speed == USB_SPEED_LOW)
939 status |= TD_CTRL_LS;
940 if (usb_pipein(urb->pipe))
941 status |= TD_CTRL_SPD;
944 * Build the DATA TD's
946 do { /* Allow zero length packets */
951 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
952 status &= ~TD_CTRL_SPD;
955 td = uhci_alloc_td(uhci, urb->dev);
959 uhci_add_td_to_urb(urb, td);
960 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
961 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
962 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
968 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
969 usb_pipeout(urb->pipe));
973 * URB_ZERO_PACKET means adding a 0-length packet, if direction
974 * is OUT and the transfer_length was an exact multiple of maxsze,
975 * hence (len = transfer_length - N * maxsze) == 0
976 * however, if transfer_length == 0, the zero packet was already
979 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
980 !len && urb->transfer_buffer_length) {
981 td = uhci_alloc_td(uhci, urb->dev);
985 uhci_add_td_to_urb(urb, td);
986 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
987 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
988 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
991 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
992 usb_pipeout(urb->pipe));
995 /* Set the interrupt-on-completion flag on the last packet.
996 * A more-or-less typical 4 KB URB (= size of one memory page)
997 * will require about 3 ms to transfer; that's a little on the
998 * fast side but not enough to justify delaying an interrupt
999 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1001 td->status |= cpu_to_le32(TD_CTRL_IOC);
1003 qh = uhci_alloc_qh(uhci, urb->dev);
1010 /* Always breadth first */
1011 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1014 uhci_append_queued_urb(uhci, eurb, urb);
1016 uhci_insert_qh(uhci, skelqh, urb);
1018 return -EINPROGRESS;
1022 * Common result for bulk and interrupt
1024 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1026 struct urb_priv *urbp = urb->hcpriv;
1028 unsigned int status = 0;
1031 urb->actual_length = 0;
1033 list_for_each_entry(td, &urbp->td_list, list) {
1034 status = uhci_status_bits(td_status(td));
1035 if (status & TD_CTRL_ACTIVE)
1036 return -EINPROGRESS;
1038 urb->actual_length += uhci_actual_length(td_status(td));
1043 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1044 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1055 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1059 * Enable this chunk of code if you want to see some more debugging.
1060 * But be careful, it has the tendancy to starve out khubd and prevent
1061 * disconnects from happening successfully if you have a slow debug
1062 * log interface (like a serial console.
1065 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1066 /* Some debugging code */
1067 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1068 __FUNCTION__, status);
1071 /* Print the chain for debugging purposes */
1072 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1081 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1085 /* Can't have low-speed bulk transfers */
1086 if (urb->dev->speed == USB_SPEED_LOW)
1089 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1090 if (ret == -EINPROGRESS)
1091 uhci_inc_fsbr(uhci, urb);
1096 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1098 /* USB 1.1 interrupt transfers only involve one packet per interval;
1099 * that's the uhci_submit_common() "breadth first" policy. Drivers
1100 * can submit urbs of any length, but longer ones might need many
1101 * intervals to complete.
1103 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1107 * Isochronous transfers
1109 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1111 struct urb *last_urb = NULL;
1112 struct urb_priv *up;
1115 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1116 struct urb *u = up->urb;
1118 /* look for pending URB's with identical pipe handle */
1119 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1120 (u->status == -EINPROGRESS) && (u != urb)) {
1122 *start = u->start_frame;
1128 *end = (last_urb->start_frame + last_urb->number_of_packets *
1129 last_urb->interval) & (UHCI_NUMFRAMES-1);
1132 ret = -1; /* no previous urb found */
1137 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1140 unsigned int start = 0, end = 0;
1142 if (urb->number_of_packets > 900) /* 900? Why? */
1145 limits = isochronous_find_limits(uhci, urb, &start, &end);
1147 if (urb->transfer_flags & URB_ISO_ASAP) {
1150 (uhci_get_current_frame_number(uhci) +
1151 10) & (UHCI_NUMFRAMES - 1);
1153 urb->start_frame = end;
1155 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1156 /* FIXME: Sanity check */
1163 * Isochronous transfers
1165 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1169 int status, destination;
1171 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1172 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1174 ret = isochronous_find_start(uhci, urb);
1178 frame = urb->start_frame;
1179 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1180 if (!urb->iso_frame_desc[i].length)
1183 td = uhci_alloc_td(uhci, urb->dev);
1187 uhci_add_td_to_urb(urb, td);
1188 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1189 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1191 if (i + 1 >= urb->number_of_packets)
1192 td->status |= cpu_to_le32(TD_CTRL_IOC);
1194 uhci_insert_td_frame_list(uhci, td, frame);
1197 return -EINPROGRESS;
1200 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1203 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1207 urb->actual_length = 0;
1210 list_for_each_entry(td, &urbp->td_list, list) {
1213 if (td_status(td) & TD_CTRL_ACTIVE)
1214 return -EINPROGRESS;
1216 actlength = uhci_actual_length(td_status(td));
1217 urb->iso_frame_desc[i].actual_length = actlength;
1218 urb->actual_length += actlength;
1220 status = uhci_map_status(uhci_status_bits(td_status(td)),
1221 usb_pipeout(urb->pipe));
1222 urb->iso_frame_desc[i].status = status;
1234 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1236 struct urb_priv *up;
1238 /* We don't match Isoc transfers since they are special */
1239 if (usb_pipeisoc(urb->pipe))
1242 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1243 struct urb *u = up->urb;
1245 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1246 /* For control, ignore the direction */
1247 if (usb_pipecontrol(urb->pipe) &&
1248 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1250 else if (u->pipe == urb->pipe)
1258 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1261 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1262 unsigned long flags;
1266 spin_lock_irqsave(&uhci->schedule_lock, flags);
1269 if (ret != -EINPROGRESS) /* URB already unlinked! */
1272 eurb = uhci_find_urb_ep(uhci, urb);
1274 if (!uhci_alloc_urb_priv(uhci, urb)) {
1279 switch (usb_pipetype(urb->pipe)) {
1281 ret = uhci_submit_control(uhci, urb, eurb);
1283 case PIPE_INTERRUPT:
1285 bustime = usb_check_bandwidth(urb->dev, urb);
1289 ret = uhci_submit_interrupt(uhci, urb, eurb);
1290 if (ret == -EINPROGRESS)
1291 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1293 } else { /* inherit from parent */
1294 urb->bandwidth = eurb->bandwidth;
1295 ret = uhci_submit_interrupt(uhci, urb, eurb);
1299 ret = uhci_submit_bulk(uhci, urb, eurb);
1301 case PIPE_ISOCHRONOUS:
1302 bustime = usb_check_bandwidth(urb->dev, urb);
1308 ret = uhci_submit_isochronous(uhci, urb);
1309 if (ret == -EINPROGRESS)
1310 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1314 if (ret != -EINPROGRESS) {
1315 /* Submit failed, so delete it from the urb_list */
1316 struct urb_priv *urbp = urb->hcpriv;
1318 list_del_init(&urbp->urb_list);
1319 uhci_destroy_urb_priv(uhci, urb);
1324 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1329 * Return the result of a transfer
1331 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1333 int ret = -EINPROGRESS;
1334 struct urb_priv *urbp;
1336 spin_lock(&urb->lock);
1338 urbp = (struct urb_priv *)urb->hcpriv;
1340 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1343 switch (usb_pipetype(urb->pipe)) {
1345 ret = uhci_result_control(uhci, urb);
1348 case PIPE_INTERRUPT:
1349 ret = uhci_result_common(uhci, urb);
1351 case PIPE_ISOCHRONOUS:
1352 ret = uhci_result_isochronous(uhci, urb);
1356 if (ret == -EINPROGRESS)
1360 switch (usb_pipetype(urb->pipe)) {
1363 case PIPE_ISOCHRONOUS:
1364 /* Release bandwidth for Interrupt or Isoc. transfers */
1366 usb_release_bandwidth(urb->dev, urb, 1);
1367 uhci_unlink_generic(uhci, urb);
1369 case PIPE_INTERRUPT:
1370 /* Release bandwidth for Interrupt or Isoc. transfers */
1371 /* Make sure we don't release if we have a queued URB */
1372 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1373 usb_release_bandwidth(urb->dev, urb, 0);
1375 /* bandwidth was passed on to queued URB, */
1376 /* so don't let usb_unlink_urb() release it */
1378 uhci_unlink_generic(uhci, urb);
1381 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1383 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1386 /* Move it from uhci->urb_list to uhci->complete_list */
1387 uhci_moveto_complete(uhci, urbp);
1390 spin_unlock(&urb->lock);
1393 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1395 struct list_head *head;
1397 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1400 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1403 * Now we need to find out what the last successful toggle was
1404 * so we can update the local data toggle for the next transfer
1406 * There are 2 ways the last successful completed TD is found:
1408 * 1) The TD is NOT active and the actual length < expected length
1409 * 2) The TD is NOT active and it's the last TD in the chain
1411 * and a third way the first uncompleted TD is found:
1413 * 3) The TD is active and the previous TD is NOT active
1415 * Control and Isochronous ignore the toggle, so this is safe
1418 * FIXME: The toggle fixups won't be 100% reliable until we
1419 * change over to using a single queue for each endpoint and
1420 * stop the queue before unlinking.
1422 head = &urbp->td_list;
1423 list_for_each_entry(td, head, list) {
1424 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1425 (uhci_actual_length(td_status(td)) <
1426 uhci_expected_length(td_token(td)) ||
1427 td->list.next == head))
1428 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1429 uhci_packetout(td_token(td)),
1430 uhci_toggle(td_token(td)) ^ 1);
1431 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1432 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1433 uhci_packetout(td_token(td)),
1434 uhci_toggle(td_token(td)));
1436 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1439 uhci_delete_queued_urb(uhci, urb);
1441 /* The interrupt loop will reclaim the QH's */
1442 uhci_remove_qh(uhci, urbp->qh);
1446 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1448 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1449 unsigned long flags;
1450 struct urb_priv *urbp;
1453 spin_lock_irqsave(&uhci->schedule_lock, flags);
1455 if (!urbp) /* URB was never linked! */
1457 list_del_init(&urbp->urb_list);
1459 uhci_unlink_generic(uhci, urb);
1461 age = uhci_get_current_frame_number(uhci);
1462 if (age != uhci->urb_remove_age) {
1463 uhci_remove_pending_urbps(uhci);
1464 uhci->urb_remove_age = age;
1467 /* If we're the first, set the next interrupt bit */
1468 if (list_empty(&uhci->urb_remove_list))
1469 uhci_set_next_interrupt(uhci);
1470 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1473 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1477 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1479 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1480 struct list_head *head;
1484 uhci_dec_fsbr(uhci, urb);
1486 urbp->fsbr_timeout = 1;
1489 * Ideally we would want to fix qh->element as well, but it's
1490 * read/write by the HC, so that can introduce a race. It's not
1491 * really worth the hassle
1494 head = &urbp->td_list;
1495 list_for_each_entry(td, head, list) {
1497 * Make sure we don't do the last one (since it'll have the
1498 * TERM bit set) as well as we skip every so many TD's to
1499 * make sure it doesn't hog the bandwidth
1501 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1502 (DEPTH_INTERVAL - 1))
1503 td->link |= UHCI_PTR_DEPTH;
1512 * uhci_get_current_frame_number()
1514 * returns the current frame number for a USB bus/controller.
1516 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1518 return inw(uhci->io_addr + USBFRNUM);
1521 static int init_stall_timer(struct usb_hcd *hcd);
1523 static void stall_callback(unsigned long ptr)
1525 struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1526 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1527 struct urb_priv *up;
1528 unsigned long flags;
1529 int called_uhci_finish_completion = 0;
1531 spin_lock_irqsave(&uhci->schedule_lock, flags);
1532 if (!list_empty(&uhci->urb_remove_list) &&
1533 uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1534 uhci_remove_pending_urbps(uhci);
1535 uhci_finish_completion(hcd, NULL);
1536 called_uhci_finish_completion = 1;
1539 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1540 struct urb *u = up->urb;
1542 spin_lock(&u->lock);
1544 /* Check if the FSBR timed out */
1545 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1546 uhci_fsbr_timeout(uhci, u);
1548 spin_unlock(&u->lock);
1550 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1552 /* Wake up anyone waiting for an URB to complete */
1553 if (called_uhci_finish_completion)
1554 wake_up_all(&uhci->waitqh);
1556 /* Really disable FSBR */
1557 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1558 uhci->fsbrtimeout = 0;
1559 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1562 /* Poll for and perform state transitions */
1563 hc_state_transitions(uhci);
1564 if (unlikely(uhci->suspended_ports && uhci->state != UHCI_SUSPENDED))
1565 uhci_check_resume(uhci);
1567 init_stall_timer(hcd);
1570 static int init_stall_timer(struct usb_hcd *hcd)
1572 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1574 init_timer(&uhci->stall_timer);
1575 uhci->stall_timer.function = stall_callback;
1576 uhci->stall_timer.data = (unsigned long)hcd;
1577 uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
1578 add_timer(&uhci->stall_timer);
1583 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1585 struct uhci_qh *qh, *tmp;
1587 list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1588 list_del_init(&qh->remove_list);
1590 uhci_free_qh(uhci, qh);
1594 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1596 struct uhci_td *td, *tmp;
1598 list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1599 list_del_init(&td->remove_list);
1601 uhci_free_td(uhci, td);
1606 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1607 __releases(uhci->schedule_lock)
1608 __acquires(uhci->schedule_lock)
1610 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1612 uhci_destroy_urb_priv(uhci, urb);
1614 spin_unlock(&uhci->schedule_lock);
1615 usb_hcd_giveback_urb(hcd, urb, regs);
1616 spin_lock(&uhci->schedule_lock);
1619 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1621 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1622 struct urb_priv *urbp, *tmp;
1624 list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1625 struct urb *urb = urbp->urb;
1627 list_del_init(&urbp->urb_list);
1628 uhci_finish_urb(hcd, urb, regs);
1632 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1635 /* Splice the urb_remove_list onto the end of the complete_list */
1636 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1639 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1641 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1642 unsigned long io_addr = uhci->io_addr;
1643 unsigned short status;
1644 struct urb_priv *urbp, *tmp;
1648 * Read the interrupt status, and write it back to clear the
1649 * interrupt cause. Contrary to the UHCI specification, the
1650 * "HC Halted" status bit is persistent: it is RO, not R/WC.
1652 status = inw(io_addr + USBSTS);
1653 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
1655 outw(status, io_addr + USBSTS); /* Clear it */
1657 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1658 if (status & USBSTS_HSE)
1659 dev_err(uhci_dev(uhci), "host system error, "
1661 if (status & USBSTS_HCPE)
1662 dev_err(uhci_dev(uhci), "host controller process "
1663 "error, something bad happened!\n");
1664 if ((status & USBSTS_HCH) && uhci->state > 0) {
1665 dev_err(uhci_dev(uhci), "host controller halted, "
1667 /* FIXME: Reset the controller, fix the offending TD */
1671 if (status & USBSTS_RD)
1672 uhci->resume_detect = 1;
1674 spin_lock(&uhci->schedule_lock);
1676 age = uhci_get_current_frame_number(uhci);
1677 if (age != uhci->qh_remove_age)
1678 uhci_free_pending_qhs(uhci);
1679 if (age != uhci->td_remove_age)
1680 uhci_free_pending_tds(uhci);
1681 if (age != uhci->urb_remove_age)
1682 uhci_remove_pending_urbps(uhci);
1684 if (list_empty(&uhci->urb_remove_list) &&
1685 list_empty(&uhci->td_remove_list) &&
1686 list_empty(&uhci->qh_remove_list))
1687 uhci_clear_next_interrupt(uhci);
1689 uhci_set_next_interrupt(uhci);
1691 /* Walk the list of pending URBs to see which ones completed
1692 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1693 list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1694 struct urb *urb = urbp->urb;
1696 /* Checks the status and does all of the magic necessary */
1697 uhci_transfer_result(uhci, urb);
1699 uhci_finish_completion(hcd, regs);
1701 spin_unlock(&uhci->schedule_lock);
1703 /* Wake up anyone waiting for an URB to complete */
1704 wake_up_all(&uhci->waitqh);
1709 static void reset_hc(struct uhci_hcd *uhci)
1711 unsigned long io_addr = uhci->io_addr;
1713 /* Turn off PIRQ, SMI, and all interrupts. This also turns off
1714 * the BIOS's USB Legacy Support.
1716 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
1717 outw(0, uhci->io_addr + USBINTR);
1719 /* Global reset for 50ms */
1720 uhci->state = UHCI_RESET;
1721 outw(USBCMD_GRESET, io_addr + USBCMD);
1723 outw(0, io_addr + USBCMD);
1725 /* Another 10ms delay */
1727 uhci->resume_detect = 0;
1730 static void suspend_hc(struct uhci_hcd *uhci)
1732 unsigned long io_addr = uhci->io_addr;
1734 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1735 uhci->state = UHCI_SUSPENDED;
1736 uhci->resume_detect = 0;
1737 outw(USBCMD_EGSM, io_addr + USBCMD);
1740 static void wakeup_hc(struct uhci_hcd *uhci)
1742 unsigned long io_addr = uhci->io_addr;
1744 switch (uhci->state) {
1745 case UHCI_SUSPENDED: /* Start the resume */
1746 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1748 /* Global resume for >= 20ms */
1749 outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1750 uhci->state = UHCI_RESUMING_1;
1751 uhci->state_end = jiffies + msecs_to_jiffies(20);
1754 case UHCI_RESUMING_1: /* End global resume */
1755 uhci->state = UHCI_RESUMING_2;
1756 outw(0, io_addr + USBCMD);
1759 case UHCI_RESUMING_2: /* Wait for EOP to be sent */
1760 if (inw(io_addr + USBCMD) & USBCMD_FGR)
1763 /* Run for at least 1 second, and
1764 * mark it configured with a 64-byte max packet */
1765 uhci->state = UHCI_RUNNING_GRACE;
1766 uhci->state_end = jiffies + HZ;
1767 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1771 case UHCI_RUNNING_GRACE: /* Now allowed to suspend */
1772 uhci->state = UHCI_RUNNING;
1780 static int ports_active(struct uhci_hcd *uhci)
1782 unsigned long io_addr = uhci->io_addr;
1786 for (i = 0; i < uhci->rh_numports; i++)
1787 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1792 static int suspend_allowed(struct uhci_hcd *uhci)
1794 unsigned long io_addr = uhci->io_addr;
1797 if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1800 /* Some of Intel's USB controllers have a bug that causes false
1801 * resume indications if any port has an over current condition.
1802 * To prevent problems, we will not allow a global suspend if
1805 * Some motherboards using Intel's chipsets (but not using all
1806 * the USB ports) appear to hardwire the over current inputs active
1807 * to disable the USB ports.
1810 /* check for over current condition on any port */
1811 for (i = 0; i < uhci->rh_numports; i++) {
1812 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1819 static void hc_state_transitions(struct uhci_hcd *uhci)
1821 switch (uhci->state) {
1824 /* global suspend if nothing connected for 1 second */
1825 if (!ports_active(uhci) && suspend_allowed(uhci)) {
1826 uhci->state = UHCI_SUSPENDING_GRACE;
1827 uhci->state_end = jiffies + HZ;
1831 case UHCI_SUSPENDING_GRACE:
1832 if (ports_active(uhci))
1833 uhci->state = UHCI_RUNNING;
1834 else if (time_after_eq(jiffies, uhci->state_end))
1838 case UHCI_SUSPENDED:
1840 /* wakeup if requested by a device */
1841 if (uhci->resume_detect)
1845 case UHCI_RESUMING_1:
1846 case UHCI_RESUMING_2:
1847 case UHCI_RUNNING_GRACE:
1848 if (time_after_eq(jiffies, uhci->state_end))
1857 static int start_hc(struct uhci_hcd *uhci)
1859 unsigned long io_addr = uhci->io_addr;
1863 * Reset the HC - this will force us to get a
1864 * new notification of any already connected
1865 * ports due to the virtual disconnect that it
1868 outw(USBCMD_HCRESET, io_addr + USBCMD);
1869 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1870 if (--timeout < 0) {
1871 dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1877 /* Turn on PIRQ and all interrupts */
1878 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
1880 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1883 /* Start at frame 0 */
1884 outw(0, io_addr + USBFRNUM);
1885 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
1887 /* Run and mark it configured with a 64-byte max packet */
1888 uhci->state = UHCI_RUNNING_GRACE;
1889 uhci->state_end = jiffies + HZ;
1890 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
1892 uhci->hcd.state = USB_STATE_RUNNING;
1897 * De-allocate all resources..
1899 static void release_uhci(struct uhci_hcd *uhci)
1903 for (i = 0; i < UHCI_NUM_SKELQH; i++)
1904 if (uhci->skelqh[i]) {
1905 uhci_free_qh(uhci, uhci->skelqh[i]);
1906 uhci->skelqh[i] = NULL;
1909 if (uhci->term_td) {
1910 uhci_free_td(uhci, uhci->term_td);
1911 uhci->term_td = NULL;
1914 if (uhci->qh_pool) {
1915 dma_pool_destroy(uhci->qh_pool);
1916 uhci->qh_pool = NULL;
1919 if (uhci->td_pool) {
1920 dma_pool_destroy(uhci->td_pool);
1921 uhci->td_pool = NULL;
1925 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
1926 uhci->fl, uhci->fl->dma_handle);
1930 #ifdef CONFIG_PROC_FS
1931 if (uhci->proc_entry) {
1932 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
1933 uhci->proc_entry = NULL;
1938 static int uhci_reset(struct usb_hcd *hcd)
1940 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1942 uhci->io_addr = (unsigned long) hcd->regs;
1944 /* Kick BIOS off this hardware and reset, so we won't get
1945 * interrupts from any previous setup.
1952 * Allocate a frame list, and then setup the skeleton
1954 * The hardware doesn't really know any difference
1955 * in the queues, but the order does matter for the
1956 * protocols higher up. The order is:
1958 * - any isochronous events handled before any
1959 * of the queues. We don't do that here, because
1960 * we'll create the actual TD entries on demand.
1961 * - The first queue is the interrupt queue.
1962 * - The second queue is the control queue, split into low- and full-speed
1963 * - The third queue is bulk queue.
1964 * - The fourth queue is the bandwidth reclamation queue, which loops back
1965 * to the full-speed control queue.
1967 static int uhci_start(struct usb_hcd *hcd)
1969 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1970 int retval = -EBUSY;
1973 dma_addr_t dma_handle;
1974 struct usb_device *udev;
1975 #ifdef CONFIG_PROC_FS
1976 struct proc_dir_entry *ent;
1979 io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
1981 #ifdef CONFIG_PROC_FS
1982 ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
1984 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
1986 goto err_create_proc_entry;
1990 ent->proc_fops = &uhci_proc_operations;
1992 uhci->proc_entry = ent;
1996 uhci->fsbrtimeout = 0;
1998 spin_lock_init(&uhci->schedule_lock);
1999 INIT_LIST_HEAD(&uhci->qh_remove_list);
2001 INIT_LIST_HEAD(&uhci->td_remove_list);
2003 INIT_LIST_HEAD(&uhci->urb_remove_list);
2005 INIT_LIST_HEAD(&uhci->urb_list);
2007 INIT_LIST_HEAD(&uhci->complete_list);
2009 init_waitqueue_head(&uhci->waitqh);
2011 uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2014 dev_err(uhci_dev(uhci), "unable to allocate "
2015 "consistent memory for frame list\n");
2019 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2021 uhci->fl->dma_handle = dma_handle;
2023 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2024 sizeof(struct uhci_td), 16, 0);
2025 if (!uhci->td_pool) {
2026 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2027 goto err_create_td_pool;
2030 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2031 sizeof(struct uhci_qh), 16, 0);
2032 if (!uhci->qh_pool) {
2033 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2034 goto err_create_qh_pool;
2037 /* Initialize the root hub */
2039 /* UHCI specs says devices must have 2 ports, but goes on to say */
2040 /* they may have more but give no way to determine how many they */
2041 /* have. However, according to the UHCI spec, Bit 7 is always set */
2042 /* to 1. So we try to use this to our advantage */
2043 for (port = 0; port < (io_size - 0x10) / 2; port++) {
2044 unsigned int portstatus;
2046 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2047 if (!(portstatus & 0x0080))
2051 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2053 /* This is experimental so anything less than 2 or greater than 8 is */
2054 /* something weird and we'll ignore it */
2055 if (port < 2 || port > UHCI_RH_MAXCHILD) {
2056 dev_info(uhci_dev(uhci), "port count misdetected? "
2057 "forcing to 2 ports\n");
2061 uhci->rh_numports = port;
2063 udev = usb_alloc_dev(NULL, &hcd->self, 0);
2065 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2066 goto err_alloc_root_hub;
2069 uhci->term_td = uhci_alloc_td(uhci, udev);
2070 if (!uhci->term_td) {
2071 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2072 goto err_alloc_term_td;
2075 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2076 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2077 if (!uhci->skelqh[i]) {
2078 dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2079 goto err_alloc_skelqh;
2084 * 8 Interrupt queues; link all higher int queues to int1,
2085 * then link int1 to control and control to bulk
2087 uhci->skel_int128_qh->link =
2088 uhci->skel_int64_qh->link =
2089 uhci->skel_int32_qh->link =
2090 uhci->skel_int16_qh->link =
2091 uhci->skel_int8_qh->link =
2092 uhci->skel_int4_qh->link =
2093 uhci->skel_int2_qh->link =
2094 cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2095 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2097 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2098 uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2099 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2101 /* This dummy TD is to work around a bug in Intel PIIX controllers */
2102 uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2103 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2104 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2106 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2107 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2110 * Fill the frame list: make all entries point to the proper
2113 * The interrupt queues will be interleaved as evenly as possible.
2114 * There's not much to be done about period-1 interrupts; they have
2115 * to occur in every frame. But we can schedule period-2 interrupts
2116 * in odd-numbered frames, period-4 interrupts in frames congruent
2117 * to 2 (mod 4), and so on. This way each frame only has two
2118 * interrupt QHs, which will help spread out bandwidth utilization.
2120 for (i = 0; i < UHCI_NUMFRAMES; i++) {
2124 * ffs (Find First bit Set) does exactly what we need:
2125 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6],
2126 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2127 * ffs > 6 => not on any high-period queue, so use
2128 * skel_int1_qh = skelqh[7].
2129 * Add UHCI_NUMFRAMES to insure at least one bit is set.
2131 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2135 /* Only place we don't use the frame list routines */
2136 uhci->fl->frame[i] = UHCI_PTR_QH |
2137 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2141 * Some architectures require a full mb() to enforce completion of
2142 * the memory writes above before the I/O transfers in start_hc().
2145 if ((retval = start_hc(uhci)) != 0)
2146 goto err_alloc_skelqh;
2148 init_stall_timer(hcd);
2150 udev->speed = USB_SPEED_FULL;
2152 if (hcd_register_root(udev, &uhci->hcd) != 0) {
2153 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2155 goto err_start_root_hub;
2166 del_timer_sync(&uhci->stall_timer);
2169 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2170 if (uhci->skelqh[i]) {
2171 uhci_free_qh(uhci, uhci->skelqh[i]);
2172 uhci->skelqh[i] = NULL;
2175 uhci_free_td(uhci, uhci->term_td);
2176 uhci->term_td = NULL;
2182 dma_pool_destroy(uhci->qh_pool);
2183 uhci->qh_pool = NULL;
2186 dma_pool_destroy(uhci->td_pool);
2187 uhci->td_pool = NULL;
2190 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2191 uhci->fl, uhci->fl->dma_handle);
2195 #ifdef CONFIG_PROC_FS
2196 remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2197 uhci->proc_entry = NULL;
2199 err_create_proc_entry:
2205 static void uhci_stop(struct usb_hcd *hcd)
2207 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2209 del_timer_sync(&uhci->stall_timer);
2212 * At this point, we're guaranteed that no new connects can be made
2213 * to this bus since there are no more parents
2218 spin_lock_irq(&uhci->schedule_lock);
2219 uhci_free_pending_qhs(uhci);
2220 uhci_free_pending_tds(uhci);
2221 uhci_remove_pending_urbps(uhci);
2222 uhci_finish_completion(hcd, NULL);
2224 uhci_free_pending_qhs(uhci);
2225 uhci_free_pending_tds(uhci);
2226 spin_unlock_irq(&uhci->schedule_lock);
2228 /* Wake up anyone waiting for an URB to complete */
2229 wake_up_all(&uhci->waitqh);
2235 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2237 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2239 /* Don't try to suspend broken motherboards, reset instead */
2240 if (suspend_allowed(uhci)) {
2242 uhci->saved_framenumber =
2243 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2249 static int uhci_resume(struct usb_hcd *hcd)
2251 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2254 pci_set_master(to_pci_dev(uhci_dev(uhci)));
2256 if (uhci->state == UHCI_SUSPENDED) {
2259 * Some systems don't maintain the UHCI register values
2260 * during a PM suspend/resume cycle, so reinitialize
2261 * the Frame Number, Framelist Base Address, Interrupt
2262 * Enable, and Legacy Support registers.
2264 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2266 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2267 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2268 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2269 USBINTR_SP, uhci->io_addr + USBINTR);
2270 uhci->resume_detect = 1;
2271 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2275 if ((rc = start_hc(uhci)) != 0)
2278 uhci->hcd.state = USB_STATE_RUNNING;
2283 static struct usb_hcd *uhci_hcd_alloc(void)
2285 struct uhci_hcd *uhci;
2287 uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2291 memset(uhci, 0, sizeof(*uhci));
2292 uhci->hcd.product_desc = "UHCI Host Controller";
2296 /* Are there any URBs for a particular device/endpoint on a given list? */
2297 static int urbs_for_ep_list(struct list_head *head,
2298 struct hcd_dev *hdev, int ep)
2300 struct urb_priv *urbp;
2302 list_for_each_entry(urbp, head, urb_list) {
2303 struct urb *urb = urbp->urb;
2305 if (hdev == urb->dev->hcpriv && ep ==
2306 (usb_pipeendpoint(urb->pipe) |
2307 usb_pipein(urb->pipe)))
2313 /* Are there any URBs for a particular device/endpoint? */
2314 static int urbs_for_ep(struct uhci_hcd *uhci, struct hcd_dev *hdev, int ep)
2318 spin_lock_irq(&uhci->schedule_lock);
2319 rc = (urbs_for_ep_list(&uhci->urb_list, hdev, ep) ||
2320 urbs_for_ep_list(&uhci->complete_list, hdev, ep) ||
2321 urbs_for_ep_list(&uhci->urb_remove_list, hdev, ep));
2322 spin_unlock_irq(&uhci->schedule_lock);
2326 /* Wait until all the URBs for a particular device/endpoint are gone */
2327 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2328 struct hcd_dev *hdev, int endpoint)
2330 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2332 wait_event_interruptible(uhci->waitqh,
2333 !urbs_for_ep(uhci, hdev, endpoint));
2336 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2338 return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2341 static const char hcd_name[] = "uhci_hcd";
2343 static const struct hc_driver uhci_driver = {
2344 .description = hcd_name,
2346 /* Generic hardware linkage */
2350 /* Basic lifecycle operations */
2351 .reset = uhci_reset,
2352 .start = uhci_start,
2354 .suspend = uhci_suspend,
2355 .resume = uhci_resume,
2359 .hcd_alloc = uhci_hcd_alloc,
2361 .urb_enqueue = uhci_urb_enqueue,
2362 .urb_dequeue = uhci_urb_dequeue,
2364 .endpoint_disable = uhci_hcd_endpoint_disable,
2365 .get_frame_number = uhci_hcd_get_frame_number,
2367 .hub_status_data = uhci_hub_status_data,
2368 .hub_control = uhci_hub_control,
2371 static const struct pci_device_id uhci_pci_ids[] = { {
2372 /* handle any USB UHCI controller */
2373 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2374 .driver_data = (unsigned long) &uhci_driver,
2375 }, { /* end: all zeroes */ }
2378 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2380 static struct pci_driver uhci_pci_driver = {
2381 .name = (char *)hcd_name,
2382 .id_table = uhci_pci_ids,
2384 .probe = usb_hcd_pci_probe,
2385 .remove = usb_hcd_pci_remove,
2388 .suspend = usb_hcd_pci_suspend,
2389 .resume = usb_hcd_pci_resume,
2393 static int __init uhci_hcd_init(void)
2395 int retval = -ENOMEM;
2397 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2403 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2408 #ifdef CONFIG_PROC_FS
2409 uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, NULL);
2410 if (!uhci_proc_root)
2414 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2415 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2416 if (!uhci_up_cachep)
2419 retval = pci_register_driver(&uhci_pci_driver);
2426 if (kmem_cache_destroy(uhci_up_cachep))
2427 warn("not all urb_priv's were freed!");
2431 #ifdef CONFIG_PROC_FS
2432 remove_proc_entry("driver/uhci", NULL);
2444 static void __exit uhci_hcd_cleanup(void)
2446 pci_unregister_driver(&uhci_pci_driver);
2448 if (kmem_cache_destroy(uhci_up_cachep))
2449 warn("not all urb_priv's were freed!");
2451 #ifdef CONFIG_PROC_FS
2452 remove_proc_entry("driver/uhci", NULL);
2459 module_init(uhci_hcd_init);
2460 module_exit(uhci_hcd_cleanup);
2462 MODULE_AUTHOR(DRIVER_AUTHOR);
2463 MODULE_DESCRIPTION(DRIVER_DESC);
2464 MODULE_LICENSE("GPL");