2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
18 * Intel documents this fairly well, and as far as I know there
19 * are no royalties or anything like that, but even so there are
20 * people who decided that they want to do the same thing in a
21 * completely different way.
23 * WARNING! The USB documentation is downright evil. Most of it
24 * is just crap, written by a committee. You're better off ignoring
25 * most of it, the important stuff is:
26 * - the low-level protocol (fairly simple but lots of small details)
27 * - working around the horridness of the rest
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
55 #include <asm/bitops.h>
56 #include <asm/uaccess.h>
59 #include <asm/system.h>
61 #include "../core/hcd.h"
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
74 * debug = 0, no debugging messages
75 * debug = 1, dump failed URB's except for stalls
76 * debug = 2, dump all failed URB's (including stalls)
77 * show all queues in /proc/driver/uhci/[pci_addr]
78 * debug = 3, show all TD's in URB's when dumping
85 MODULE_PARM(debug, "i");
86 MODULE_PARM_DESC(debug, "Debug level");
88 #define ERRBUF_LEN (32 * 1024)
91 #include "uhci-debug.c"
93 static kmem_cache_t *uhci_up_cachep; /* urb_priv */
95 static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
103 static void hc_state_transitions(struct uhci_hcd *uhci);
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT msecs_to_jiffies(50)
107 #define FSBR_DELAY msecs_to_jiffies(50)
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
115 * Technically, updating td->status here is a race, but it's not really a
116 * problem. The worst that can happen is that we set the IOC bit again
117 * generating a spurious interrupt. We could fix this by creating another
118 * QH and leaving the IOC bit always set, but then we would have to play
119 * games with the FSBR code to make sure we get the correct order in all
120 * the cases. I don't think it's worth the effort
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
124 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
129 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
133 struct urb_priv *urbp)
135 list_move_tail(&urbp->urb_list, &uhci->complete_list);
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
140 dma_addr_t dma_handle;
143 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
147 td->dma_handle = dma_handle;
149 td->link = UHCI_PTR_TERM;
155 INIT_LIST_HEAD(&td->list);
156 INIT_LIST_HEAD(&td->remove_list);
157 INIT_LIST_HEAD(&td->fl_list);
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165 u32 token, u32 buffer)
167 td->status = cpu_to_le32(status);
168 td->token = cpu_to_le32(token);
169 td->buffer = cpu_to_le32(buffer);
173 * We insert Isochronous URB's directly into the frame list at the beginning
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
177 framenum %= UHCI_NUMFRAMES;
179 td->frame = framenum;
181 /* Is there a TD already mapped there? */
182 if (uhci->fl->frame_cpu[framenum]) {
183 struct uhci_td *ftd, *ltd;
185 ftd = uhci->fl->frame_cpu[framenum];
186 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
188 list_add_tail(&td->fl_list, &ftd->fl_list);
190 td->link = ltd->link;
192 ltd->link = cpu_to_le32(td->dma_handle);
194 td->link = uhci->fl->frame[framenum];
196 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197 uhci->fl->frame_cpu[framenum] = td;
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
203 /* If it's not inserted, don't remove it */
204 if (td->frame == -1 && list_empty(&td->fl_list))
207 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208 if (list_empty(&td->fl_list)) {
209 uhci->fl->frame[td->frame] = td->link;
210 uhci->fl->frame_cpu[td->frame] = NULL;
214 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216 uhci->fl->frame_cpu[td->frame] = ntd;
221 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222 ptd->link = td->link;
226 td->link = UHCI_PTR_TERM;
228 list_del_init(&td->fl_list);
233 * Inserts a td into qh list at the top.
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth)
237 struct list_head *tmp, *head;
238 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
239 struct uhci_td *td, *ptd;
241 if (list_empty(&urbp->td_list))
244 head = &urbp->td_list;
247 /* Ordering isn't important here yet since the QH hasn't been */
248 /* inserted into the schedule yet */
249 td = list_entry(tmp, struct uhci_td, list);
251 /* Add the first TD to the QH element pointer */
252 qh->element = cpu_to_le32(td->dma_handle) | breadth;
256 /* Then link the rest of the TD's */
258 while (tmp != head) {
259 td = list_entry(tmp, struct uhci_td, list);
263 ptd->link = cpu_to_le32(td->dma_handle) | breadth;
268 ptd->link = UHCI_PTR_TERM;
271 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
273 if (!list_empty(&td->list))
274 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
275 if (!list_empty(&td->remove_list))
276 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
277 if (!list_empty(&td->fl_list))
278 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
281 usb_put_dev(td->dev);
283 dma_pool_free(uhci->td_pool, td, td->dma_handle);
286 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
288 dma_addr_t dma_handle;
291 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
295 qh->dma_handle = dma_handle;
297 qh->element = UHCI_PTR_TERM;
298 qh->link = UHCI_PTR_TERM;
303 INIT_LIST_HEAD(&qh->list);
304 INIT_LIST_HEAD(&qh->remove_list);
311 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
313 if (!list_empty(&qh->list))
314 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
315 if (!list_empty(&qh->remove_list))
316 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
319 usb_put_dev(qh->dev);
321 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
325 * Append this urb's qh after the last qh in skelqh->list
327 * Note that urb_priv.queue_list doesn't have a separate queue head;
328 * it's a ring with every element "live".
330 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
332 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
333 struct list_head *tmp;
336 /* Grab the last QH */
337 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
339 /* Point to the next skelqh */
340 urbp->qh->link = lqh->link;
341 wmb(); /* Ordering is important */
344 * Patch QHs for previous endpoint's queued URBs? HC goes
345 * here next, not to the next skelqh it now points to.
347 * lqh --> td ... --> qh ... --> td --> qh ... --> td
350 * +<----------------+-----------------+
352 * newqh --> td ... --> td
357 * The HC could see (and use!) any of these as we write them.
359 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
361 list_for_each (tmp, &lqh->urbp->queue_list) {
362 struct urb_priv *turbp =
363 list_entry(tmp, struct urb_priv, queue_list);
365 turbp->qh->link = lqh->link;
369 list_add_tail(&urbp->qh->list, &skelqh->list);
373 * Start removal of QH from schedule; it finishes next frame.
374 * TDs should be unlinked before this is called.
376 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
386 * Only go through the hoops if it's actually linked in
388 if (!list_empty(&qh->list)) {
390 /* If our queue is nonempty, make the next URB the head */
391 if (!list_empty(&qh->urbp->queue_list)) {
392 struct urb_priv *nurbp;
394 nurbp = list_entry(qh->urbp->queue_list.next,
395 struct urb_priv, queue_list);
397 list_add(&nurbp->qh->list, &qh->list);
398 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
402 /* Fix up the previous QH's queue to link to either
403 * the new head of this queue or the start of the
404 * next endpoint's queue. */
405 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
408 struct list_head *head, *tmp;
410 head = &pqh->urbp->queue_list;
412 while (head != tmp) {
413 struct urb_priv *turbp =
414 list_entry(tmp, struct urb_priv, queue_list);
418 turbp->qh->link = newlink;
423 /* Leave qh->link in case the HC is on the QH now, it will */
424 /* continue the rest of the schedule */
425 qh->element = UHCI_PTR_TERM;
427 list_del_init(&qh->list);
430 list_del_init(&qh->urbp->queue_list);
433 age = uhci_get_current_frame_number(uhci);
434 if (age != uhci->qh_remove_age) {
435 uhci_free_pending_qhs(uhci);
436 uhci->qh_remove_age = age;
439 /* Check to see if the remove list is empty. Set the IOC bit */
440 /* to force an interrupt so we can remove the QH */
441 if (list_empty(&uhci->qh_remove_list))
442 uhci_set_next_interrupt(uhci);
444 list_add(&qh->remove_list, &uhci->qh_remove_list);
447 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
449 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
450 struct list_head *head, *tmp;
452 head = &urbp->td_list;
454 while (head != tmp) {
455 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
460 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
462 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
471 /* This function will append one URB's QH to another URB's QH. This is for */
472 /* queuing interrupt, control or bulk transfers */
473 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
475 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
476 struct list_head *tmp;
477 struct uhci_td *lltd;
479 eurbp = eurb->hcpriv;
482 /* Find the first URB in the queue */
484 struct list_head *head = &eurbp->queue_list;
487 while (tmp != head) {
488 struct urb_priv *turbp =
489 list_entry(tmp, struct urb_priv, queue_list);
497 tmp = &eurbp->queue_list;
499 furbp = list_entry(tmp, struct urb_priv, queue_list);
500 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
502 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
504 /* Control transfers always start with toggle 0 */
505 if (!usb_pipecontrol(urb->pipe))
506 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
507 usb_pipeout(urb->pipe),
508 uhci_fixup_toggle(urb,
509 uhci_toggle(td_token(lltd)) ^ 1));
511 /* All qh's in the queue need to link to the next queue */
512 urbp->qh->link = eurbp->qh->link;
514 wmb(); /* Make sure we flush everything */
516 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
518 list_add_tail(&urbp->queue_list, &furbp->queue_list);
523 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
525 struct urb_priv *urbp, *nurbp;
526 struct list_head *head, *tmp;
527 struct urb_priv *purbp;
528 struct uhci_td *pltd;
533 if (list_empty(&urbp->queue_list))
536 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
539 * Fix up the toggle for the following URBs in the queue.
540 * Only needed for bulk and interrupt: control and isochronous
541 * endpoints don't propagate toggles between messages.
543 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
545 /* We just set the toggle in uhci_unlink_generic */
546 toggle = usb_gettoggle(urb->dev,
547 usb_pipeendpoint(urb->pipe),
548 usb_pipeout(urb->pipe));
550 /* If we're in the middle of the queue, grab the */
551 /* toggle from the TD previous to us */
552 purbp = list_entry(urbp->queue_list.prev,
553 struct urb_priv, queue_list);
554 pltd = list_entry(purbp->td_list.prev,
555 struct uhci_td, list);
556 toggle = uhci_toggle(td_token(pltd)) ^ 1;
559 head = &urbp->queue_list;
561 while (head != tmp) {
562 struct urb_priv *turbp;
564 turbp = list_entry(tmp, struct urb_priv, queue_list);
569 toggle = uhci_fixup_toggle(turbp->urb, toggle);
572 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
573 usb_pipeout(urb->pipe), toggle);
577 /* We're somewhere in the middle (or end). The case where
578 * we're at the head is handled in uhci_remove_qh(). */
579 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
582 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
584 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
586 /* The next URB happens to be the beginning, so */
587 /* we're the last, end the chain */
588 pltd->link = UHCI_PTR_TERM;
591 /* urbp->queue_list is handled in uhci_remove_qh() */
594 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
596 struct urb_priv *urbp;
598 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
602 memset((void *)urbp, 0, sizeof(*urbp));
604 urbp->inserttime = jiffies;
605 urbp->fsbrtime = jiffies;
608 INIT_LIST_HEAD(&urbp->td_list);
609 INIT_LIST_HEAD(&urbp->queue_list);
610 INIT_LIST_HEAD(&urbp->urb_list);
612 list_add_tail(&urbp->urb_list, &uhci->urb_list);
619 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
621 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
625 list_add_tail(&td->list, &urbp->td_list);
628 static void uhci_remove_td_from_urb(struct uhci_td *td)
630 if (list_empty(&td->list))
633 list_del_init(&td->list);
638 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
640 struct list_head *head, *tmp;
641 struct urb_priv *urbp;
644 urbp = (struct urb_priv *)urb->hcpriv;
648 if (!list_empty(&urbp->urb_list))
649 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
650 "or uhci->remove_list!\n", urb);
652 age = uhci_get_current_frame_number(uhci);
653 if (age != uhci->td_remove_age) {
654 uhci_free_pending_tds(uhci);
655 uhci->td_remove_age = age;
658 /* Check to see if the remove list is empty. Set the IOC bit */
659 /* to force an interrupt so we can remove the TD's*/
660 if (list_empty(&uhci->td_remove_list))
661 uhci_set_next_interrupt(uhci);
663 head = &urbp->td_list;
665 while (tmp != head) {
666 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
670 uhci_remove_td_from_urb(td);
671 uhci_remove_td(uhci, td);
672 list_add(&td->remove_list, &uhci->td_remove_list);
676 kmem_cache_free(uhci_up_cachep, urbp);
679 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
681 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
683 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
685 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
686 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
690 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
692 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
694 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
697 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
702 * Map status to standard result codes
704 * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
705 * Note: status does not include the TD_CTRL_NAK bit.
706 * <dir_out> is True for output TDs and False for input TDs.
708 static int uhci_map_status(int status, int dir_out)
712 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
714 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
720 if (status & TD_CTRL_BABBLE) /* Babble */
722 if (status & TD_CTRL_DBUFERR) /* Buffer error */
724 if (status & TD_CTRL_STALLED) /* Stalled */
726 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
733 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
735 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
737 struct uhci_qh *qh, *skelqh;
738 unsigned long destination, status;
739 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
740 int len = urb->transfer_buffer_length;
741 dma_addr_t data = urb->transfer_dma;
743 /* The "pipe" thing contains the destination in bits 8--18 */
744 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
747 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
748 if (urb->dev->speed == USB_SPEED_LOW)
749 status |= TD_CTRL_LS;
752 * Build the TD for the control request setup packet
754 td = uhci_alloc_td(uhci, urb->dev);
758 uhci_add_td_to_urb(urb, td);
759 uhci_fill_td(td, status, destination | uhci_explen(7),
763 * If direction is "send", change the packet ID from SETUP (0x2D)
764 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
765 * set Short Packet Detect (SPD) for all data packets.
767 if (usb_pipeout(urb->pipe))
768 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
770 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
771 status |= TD_CTRL_SPD;
775 * Build the DATA TD's
783 td = uhci_alloc_td(uhci, urb->dev);
787 /* Alternate Data0/1 (start with Data1) */
788 destination ^= TD_TOKEN_TOGGLE;
790 uhci_add_td_to_urb(urb, td);
791 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
799 * Build the final TD for control status
801 td = uhci_alloc_td(uhci, urb->dev);
806 * It's IN if the pipe is an output pipe or we're not expecting
809 destination &= ~TD_TOKEN_PID_MASK;
810 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
811 destination |= USB_PID_IN;
813 destination |= USB_PID_OUT;
815 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
817 status &= ~TD_CTRL_SPD;
819 uhci_add_td_to_urb(urb, td);
820 uhci_fill_td(td, status | TD_CTRL_IOC,
821 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
823 qh = uhci_alloc_qh(uhci, urb->dev);
830 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
832 /* Low-speed transfers get a different queue, and won't hog the bus */
833 if (urb->dev->speed == USB_SPEED_LOW)
834 skelqh = uhci->skel_ls_control_qh;
836 skelqh = uhci->skel_fs_control_qh;
837 uhci_inc_fsbr(uhci, urb);
841 uhci_append_queued_urb(uhci, eurb, urb);
843 uhci_insert_qh(uhci, skelqh, urb);
849 * If control-IN transfer was short, the status packet wasn't sent.
850 * This routine changes the element pointer in the QH to point at the
851 * status TD. It's safe to do this even while the QH is live, because
852 * the hardware only updates the element pointer following a successful
853 * transfer. The inactive TD for the short packet won't cause an update,
854 * so the pointer won't get overwritten. The next time the controller
855 * sees this QH, it will send the status packet.
857 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
859 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
862 urbp->short_control_packet = 1;
864 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
865 urbp->qh->element = td->dma_handle;
871 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
873 struct list_head *tmp, *head;
874 struct urb_priv *urbp = urb->hcpriv;
879 if (list_empty(&urbp->td_list))
882 head = &urbp->td_list;
884 if (urbp->short_control_packet) {
890 td = list_entry(tmp, struct uhci_td, list);
892 /* The first TD is the SETUP stage, check the status, but skip */
894 status = uhci_status_bits(td_status(td));
895 if (status & TD_CTRL_ACTIVE)
901 urb->actual_length = 0;
903 /* The rest of the TD's (but the last) are data */
905 while (tmp != head && tmp->next != head) {
906 td = list_entry(tmp, struct uhci_td, list);
910 status = uhci_status_bits(td_status(td));
911 if (status & TD_CTRL_ACTIVE)
914 urb->actual_length += uhci_actual_length(td_status(td));
919 /* Check to see if we received a short packet */
920 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
921 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
926 if (uhci_packetid(td_token(td)) == USB_PID_IN)
927 return usb_control_retrigger_status(uhci, urb);
934 td = list_entry(tmp, struct uhci_td, list);
936 /* Control status stage */
937 status = td_status(td);
939 #ifdef I_HAVE_BUGGY_APC_BACKUPS
940 /* APC BackUPS Pro kludge */
941 /* It tries to send all of the descriptor instead of the amount */
943 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
944 status & TD_CTRL_ACTIVE &&
945 status & TD_CTRL_NAK)
949 status = uhci_status_bits(status);
950 if (status & TD_CTRL_ACTIVE)
959 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
962 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
963 /* Some debugging code */
964 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
965 __FUNCTION__, status);
968 /* Print the chain for debugging purposes */
969 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
979 * Common submit for bulk and interrupt
981 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
985 unsigned long destination, status;
986 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
987 int len = urb->transfer_buffer_length;
988 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
989 dma_addr_t data = urb->transfer_dma;
994 /* The "pipe" thing contains the destination in bits 8--18 */
995 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
997 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
998 if (urb->dev->speed == USB_SPEED_LOW)
999 status |= TD_CTRL_LS;
1000 if (usb_pipein(urb->pipe))
1001 status |= TD_CTRL_SPD;
1004 * Build the DATA TD's
1006 do { /* Allow zero length packets */
1007 int pktsze = maxsze;
1009 if (pktsze >= len) {
1011 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1012 status &= ~TD_CTRL_SPD;
1015 td = uhci_alloc_td(uhci, urb->dev);
1019 uhci_add_td_to_urb(urb, td);
1020 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
1021 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1022 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1028 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1029 usb_pipeout(urb->pipe));
1033 * URB_ZERO_PACKET means adding a 0-length packet, if direction
1034 * is OUT and the transfer_length was an exact multiple of maxsze,
1035 * hence (len = transfer_length - N * maxsze) == 0
1036 * however, if transfer_length == 0, the zero packet was already
1039 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
1040 !len && urb->transfer_buffer_length) {
1041 td = uhci_alloc_td(uhci, urb->dev);
1045 uhci_add_td_to_urb(urb, td);
1046 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
1047 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1048 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1051 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1052 usb_pipeout(urb->pipe));
1055 /* Set the interrupt-on-completion flag on the last packet.
1056 * A more-or-less typical 4 KB URB (= size of one memory page)
1057 * will require about 3 ms to transfer; that's a little on the
1058 * fast side but not enough to justify delaying an interrupt
1059 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1061 td->status |= cpu_to_le32(TD_CTRL_IOC);
1063 qh = uhci_alloc_qh(uhci, urb->dev);
1070 /* Always breadth first */
1071 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1074 uhci_append_queued_urb(uhci, eurb, urb);
1076 uhci_insert_qh(uhci, skelqh, urb);
1078 return -EINPROGRESS;
1082 * Common result for bulk and interrupt
1084 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1086 struct list_head *tmp, *head;
1087 struct urb_priv *urbp = urb->hcpriv;
1089 unsigned int status = 0;
1092 urb->actual_length = 0;
1094 head = &urbp->td_list;
1096 while (tmp != head) {
1097 td = list_entry(tmp, struct uhci_td, list);
1101 status = uhci_status_bits(td_status(td));
1102 if (status & TD_CTRL_ACTIVE)
1103 return -EINPROGRESS;
1105 urb->actual_length += uhci_actual_length(td_status(td));
1110 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1111 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1122 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1124 /* endpoint has stalled - mark it halted */
1125 usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
1126 uhci_packetout(td_token(td)));
1130 * Enable this chunk of code if you want to see some more debugging.
1131 * But be careful, it has the tendancy to starve out khubd and prevent
1132 * disconnects from happening successfully if you have a slow debug
1133 * log interface (like a serial console.
1136 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1137 /* Some debugging code */
1138 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1139 __FUNCTION__, status);
1142 /* Print the chain for debugging purposes */
1143 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1152 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1156 /* Can't have low-speed bulk transfers */
1157 if (urb->dev->speed == USB_SPEED_LOW)
1160 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1161 if (ret == -EINPROGRESS)
1162 uhci_inc_fsbr(uhci, urb);
1167 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1169 /* USB 1.1 interrupt transfers only involve one packet per interval;
1170 * that's the uhci_submit_common() "breadth first" policy. Drivers
1171 * can submit urbs of any length, but longer ones might need many
1172 * intervals to complete.
1174 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1178 * Isochronous transfers
1180 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1182 struct urb *last_urb = NULL;
1183 struct list_head *tmp, *head;
1186 head = &uhci->urb_list;
1188 while (tmp != head) {
1189 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1190 struct urb *u = up->urb;
1194 /* look for pending URB's with identical pipe handle */
1195 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1196 (u->status == -EINPROGRESS) && (u != urb)) {
1198 *start = u->start_frame;
1204 *end = (last_urb->start_frame + last_urb->number_of_packets *
1205 last_urb->interval) & (UHCI_NUMFRAMES-1);
1208 ret = -1; /* no previous urb found */
1213 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1216 unsigned int start = 0, end = 0;
1218 if (urb->number_of_packets > 900) /* 900? Why? */
1221 limits = isochronous_find_limits(uhci, urb, &start, &end);
1223 if (urb->transfer_flags & URB_ISO_ASAP) {
1227 curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
1228 urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1230 urb->start_frame = end;
1232 urb->start_frame %= UHCI_NUMFRAMES;
1233 /* FIXME: Sanity check */
1240 * Isochronous transfers
1242 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1246 int status, destination;
1248 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1249 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1251 ret = isochronous_find_start(uhci, urb);
1255 frame = urb->start_frame;
1256 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1257 if (!urb->iso_frame_desc[i].length)
1260 td = uhci_alloc_td(uhci, urb->dev);
1264 uhci_add_td_to_urb(urb, td);
1265 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1266 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1268 if (i + 1 >= urb->number_of_packets)
1269 td->status |= cpu_to_le32(TD_CTRL_IOC);
1271 uhci_insert_td_frame_list(uhci, td, frame);
1274 return -EINPROGRESS;
1277 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1279 struct list_head *tmp, *head;
1280 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1284 urb->actual_length = 0;
1287 head = &urbp->td_list;
1289 while (tmp != head) {
1290 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1295 if (td_status(td) & TD_CTRL_ACTIVE)
1296 return -EINPROGRESS;
1298 actlength = uhci_actual_length(td_status(td));
1299 urb->iso_frame_desc[i].actual_length = actlength;
1300 urb->actual_length += actlength;
1302 status = uhci_map_status(uhci_status_bits(td_status(td)),
1303 usb_pipeout(urb->pipe));
1304 urb->iso_frame_desc[i].status = status;
1316 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1318 struct list_head *tmp, *head;
1320 /* We don't match Isoc transfers since they are special */
1321 if (usb_pipeisoc(urb->pipe))
1324 head = &uhci->urb_list;
1326 while (tmp != head) {
1327 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1328 struct urb *u = up->urb;
1332 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1333 /* For control, ignore the direction */
1334 if (usb_pipecontrol(urb->pipe) &&
1335 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1337 else if (u->pipe == urb->pipe)
1345 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1348 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1349 unsigned long flags;
1353 spin_lock_irqsave(&uhci->schedule_lock, flags);
1355 if (urb->status != -EINPROGRESS) /* URB already unlinked! */
1358 eurb = uhci_find_urb_ep(uhci, urb);
1360 if (!uhci_alloc_urb_priv(uhci, urb)) {
1365 switch (usb_pipetype(urb->pipe)) {
1367 ret = uhci_submit_control(uhci, urb, eurb);
1369 case PIPE_INTERRUPT:
1371 bustime = usb_check_bandwidth(urb->dev, urb);
1375 ret = uhci_submit_interrupt(uhci, urb, eurb);
1376 if (ret == -EINPROGRESS)
1377 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1379 } else { /* inherit from parent */
1380 urb->bandwidth = eurb->bandwidth;
1381 ret = uhci_submit_interrupt(uhci, urb, eurb);
1385 ret = uhci_submit_bulk(uhci, urb, eurb);
1387 case PIPE_ISOCHRONOUS:
1388 bustime = usb_check_bandwidth(urb->dev, urb);
1394 ret = uhci_submit_isochronous(uhci, urb);
1395 if (ret == -EINPROGRESS)
1396 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1400 if (ret != -EINPROGRESS) {
1401 /* Submit failed, so delete it from the urb_list */
1402 struct urb_priv *urbp = urb->hcpriv;
1404 list_del_init(&urbp->urb_list);
1405 uhci_destroy_urb_priv(uhci, urb);
1410 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1415 * Return the result of a transfer
1417 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1419 int ret = -EINPROGRESS;
1420 struct urb_priv *urbp;
1422 spin_lock(&urb->lock);
1424 urbp = (struct urb_priv *)urb->hcpriv;
1426 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1429 switch (usb_pipetype(urb->pipe)) {
1431 ret = uhci_result_control(uhci, urb);
1434 case PIPE_INTERRUPT:
1435 ret = uhci_result_common(uhci, urb);
1437 case PIPE_ISOCHRONOUS:
1438 ret = uhci_result_isochronous(uhci, urb);
1442 if (ret == -EINPROGRESS)
1446 switch (usb_pipetype(urb->pipe)) {
1449 case PIPE_ISOCHRONOUS:
1450 /* Release bandwidth for Interrupt or Isoc. transfers */
1452 usb_release_bandwidth(urb->dev, urb, 1);
1453 uhci_unlink_generic(uhci, urb);
1455 case PIPE_INTERRUPT:
1456 /* Release bandwidth for Interrupt or Isoc. transfers */
1457 /* Make sure we don't release if we have a queued URB */
1458 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1459 usb_release_bandwidth(urb->dev, urb, 0);
1461 /* bandwidth was passed on to queued URB, */
1462 /* so don't let usb_unlink_urb() release it */
1464 uhci_unlink_generic(uhci, urb);
1467 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1469 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1472 /* Move it from uhci->urb_list to uhci->complete_list */
1473 uhci_moveto_complete(uhci, urbp);
1476 spin_unlock(&urb->lock);
1479 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1481 struct list_head *head, *tmp;
1482 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1485 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1488 * Now we need to find out what the last successful toggle was
1489 * so we can update the local data toggle for the next transfer
1491 * There's 3 way's the last successful completed TD is found:
1493 * 1) The TD is NOT active and the actual length < expected length
1494 * 2) The TD is NOT active and it's the last TD in the chain
1495 * 3) The TD is active and the previous TD is NOT active
1497 * Control and Isochronous ignore the toggle, so this is safe
1500 head = &urbp->td_list;
1502 while (tmp != head) {
1503 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1507 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1508 (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
1510 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1511 uhci_packetout(td_token(td)),
1512 uhci_toggle(td_token(td)) ^ 1);
1513 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1514 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1515 uhci_packetout(td_token(td)),
1516 uhci_toggle(td_token(td)));
1518 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1521 uhci_delete_queued_urb(uhci, urb);
1523 /* The interrupt loop will reclaim the QH's */
1524 uhci_remove_qh(uhci, urbp->qh);
1528 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1530 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1531 unsigned long flags;
1532 struct urb_priv *urbp;
1535 spin_lock_irqsave(&uhci->schedule_lock, flags);
1537 if (!urbp) /* URB was never linked! */
1539 list_del_init(&urbp->urb_list);
1541 uhci_unlink_generic(uhci, urb);
1543 age = uhci_get_current_frame_number(uhci);
1544 if (age != uhci->urb_remove_age) {
1545 uhci_remove_pending_urbps(uhci);
1546 uhci->urb_remove_age = age;
1549 /* If we're the first, set the next interrupt bit */
1550 if (list_empty(&uhci->urb_remove_list))
1551 uhci_set_next_interrupt(uhci);
1552 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1555 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1559 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1561 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1562 struct list_head *head, *tmp;
1565 uhci_dec_fsbr(uhci, urb);
1567 urbp->fsbr_timeout = 1;
1570 * Ideally we would want to fix qh->element as well, but it's
1571 * read/write by the HC, so that can introduce a race. It's not
1572 * really worth the hassle
1575 head = &urbp->td_list;
1577 while (tmp != head) {
1578 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1583 * Make sure we don't do the last one (since it'll have the
1584 * TERM bit set) as well as we skip every so many TD's to
1585 * make sure it doesn't hog the bandwidth
1587 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1588 td->link |= UHCI_PTR_DEPTH;
1597 * uhci_get_current_frame_number()
1599 * returns the current frame number for a USB bus/controller.
1601 static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1603 return inw(uhci->io_addr + USBFRNUM);
1606 static int init_stall_timer(struct usb_hcd *hcd);
1608 static void stall_callback(unsigned long ptr)
1610 struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1611 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1612 struct list_head list, *tmp, *head;
1613 unsigned long flags;
1614 int called_uhci_finish_completion = 0;
1616 INIT_LIST_HEAD(&list);
1618 spin_lock_irqsave(&uhci->schedule_lock, flags);
1619 if (!list_empty(&uhci->urb_remove_list) &&
1620 uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1621 uhci_remove_pending_urbps(uhci);
1622 uhci_finish_completion(hcd, NULL);
1623 called_uhci_finish_completion = 1;
1626 head = &uhci->urb_list;
1628 while (tmp != head) {
1629 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1630 struct urb *u = up->urb;
1634 spin_lock(&u->lock);
1636 /* Check if the FSBR timed out */
1637 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1638 uhci_fsbr_timeout(uhci, u);
1640 /* Check if the URB timed out */
1641 if (u->timeout && u->status == -EINPROGRESS &&
1642 time_after_eq(jiffies, up->inserttime + u->timeout)) {
1643 u->status = -ETIMEDOUT;
1644 list_move_tail(&up->urb_list, &list);
1647 spin_unlock(&u->lock);
1649 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1651 /* Wake up anyone waiting for an URB to complete */
1652 if (called_uhci_finish_completion)
1653 wake_up_all(&uhci->waitqh);
1657 while (tmp != head) {
1658 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1659 struct urb *u = up->urb;
1663 uhci_urb_dequeue(hcd, u);
1666 /* Really disable FSBR */
1667 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1668 uhci->fsbrtimeout = 0;
1669 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1672 /* Poll for and perform state transitions */
1673 hc_state_transitions(uhci);
1675 init_stall_timer(hcd);
1678 static int init_stall_timer(struct usb_hcd *hcd)
1680 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1682 init_timer(&uhci->stall_timer);
1683 uhci->stall_timer.function = stall_callback;
1684 uhci->stall_timer.data = (unsigned long)hcd;
1685 uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
1686 add_timer(&uhci->stall_timer);
1691 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1693 struct list_head *tmp, *head;
1695 head = &uhci->qh_remove_list;
1697 while (tmp != head) {
1698 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
1702 list_del_init(&qh->remove_list);
1704 uhci_free_qh(uhci, qh);
1708 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1710 struct list_head *tmp, *head;
1712 head = &uhci->td_remove_list;
1714 while (tmp != head) {
1715 struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list);
1719 list_del_init(&td->remove_list);
1721 uhci_free_td(uhci, td);
1725 static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1727 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1729 uhci_destroy_urb_priv(uhci, urb);
1731 spin_unlock(&uhci->schedule_lock);
1732 usb_hcd_giveback_urb(hcd, urb, regs);
1733 spin_lock(&uhci->schedule_lock);
1736 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1738 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1739 struct list_head *tmp, *head;
1741 head = &uhci->complete_list;
1743 while (tmp != head) {
1744 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1745 struct urb *urb = urbp->urb;
1747 list_del_init(&urbp->urb_list);
1748 uhci_finish_urb(hcd, urb, regs);
1750 head = &uhci->complete_list;
1755 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1758 /* Splice the urb_remove_list onto the end of the complete_list */
1759 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1762 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1764 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1765 unsigned int io_addr = uhci->io_addr;
1766 unsigned short status;
1767 struct list_head *tmp, *head;
1771 * Read the interrupt status, and write it back to clear the
1772 * interrupt cause. Contrary to the UHCI specification, the
1773 * "HC Halted" status bit is persistent: it is RO, not R/WC.
1775 status = inw(io_addr + USBSTS);
1776 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
1778 outw(status, io_addr + USBSTS); /* Clear it */
1780 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1781 if (status & USBSTS_HSE)
1782 dev_err(uhci_dev(uhci), "host system error, "
1784 if (status & USBSTS_HCPE)
1785 dev_err(uhci_dev(uhci), "host controller process "
1786 "error, something bad happened!\n");
1787 if ((status & USBSTS_HCH) && uhci->state > 0) {
1788 dev_err(uhci_dev(uhci), "host controller halted, "
1790 /* FIXME: Reset the controller, fix the offending TD */
1794 if (status & USBSTS_RD)
1795 uhci->resume_detect = 1;
1797 spin_lock(&uhci->schedule_lock);
1799 age = uhci_get_current_frame_number(uhci);
1800 if (age != uhci->qh_remove_age)
1801 uhci_free_pending_qhs(uhci);
1802 if (age != uhci->td_remove_age)
1803 uhci_free_pending_tds(uhci);
1804 if (age != uhci->urb_remove_age)
1805 uhci_remove_pending_urbps(uhci);
1807 if (list_empty(&uhci->urb_remove_list) &&
1808 list_empty(&uhci->td_remove_list) &&
1809 list_empty(&uhci->qh_remove_list))
1810 uhci_clear_next_interrupt(uhci);
1812 uhci_set_next_interrupt(uhci);
1814 /* Walk the list of pending URB's to see which ones completed */
1815 head = &uhci->urb_list;
1817 while (tmp != head) {
1818 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1819 struct urb *urb = urbp->urb;
1823 /* Checks the status and does all of the magic necessary */
1824 uhci_transfer_result(uhci, urb);
1826 uhci_finish_completion(hcd, regs);
1828 spin_unlock(&uhci->schedule_lock);
1830 /* Wake up anyone waiting for an URB to complete */
1831 wake_up_all(&uhci->waitqh);
1836 static void reset_hc(struct uhci_hcd *uhci)
1838 unsigned int io_addr = uhci->io_addr;
1840 /* Turn off PIRQ, SMI, and all interrupts. This also turns off
1841 * the BIOS's USB Legacy Support.
1843 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
1844 outw(0, uhci->io_addr + USBINTR);
1846 /* Global reset for 50ms */
1847 uhci->state = UHCI_RESET;
1848 outw(USBCMD_GRESET, io_addr + USBCMD);
1850 outw(0, io_addr + USBCMD);
1852 /* Another 10ms delay */
1854 uhci->resume_detect = 0;
1857 static void suspend_hc(struct uhci_hcd *uhci)
1859 unsigned int io_addr = uhci->io_addr;
1861 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1862 uhci->state = UHCI_SUSPENDED;
1863 uhci->resume_detect = 0;
1864 outw(USBCMD_EGSM, io_addr + USBCMD);
1867 static void wakeup_hc(struct uhci_hcd *uhci)
1869 unsigned int io_addr = uhci->io_addr;
1871 switch (uhci->state) {
1872 case UHCI_SUSPENDED: /* Start the resume */
1873 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1875 /* Global resume for >= 20ms */
1876 outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1877 uhci->state = UHCI_RESUMING_1;
1878 uhci->state_end = jiffies + msecs_to_jiffies(20);
1881 case UHCI_RESUMING_1: /* End global resume */
1882 uhci->state = UHCI_RESUMING_2;
1883 outw(0, io_addr + USBCMD);
1886 case UHCI_RESUMING_2: /* Wait for EOP to be sent */
1887 if (inw(io_addr + USBCMD) & USBCMD_FGR)
1890 /* Run for at least 1 second, and
1891 * mark it configured with a 64-byte max packet */
1892 uhci->state = UHCI_RUNNING_GRACE;
1893 uhci->state_end = jiffies + HZ;
1894 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1898 case UHCI_RUNNING_GRACE: /* Now allowed to suspend */
1899 uhci->state = UHCI_RUNNING;
1907 static int ports_active(struct uhci_hcd *uhci)
1909 unsigned int io_addr = uhci->io_addr;
1913 for (i = 0; i < uhci->rh_numports; i++)
1914 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1919 static int suspend_allowed(struct uhci_hcd *uhci)
1921 unsigned int io_addr = uhci->io_addr;
1924 if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1927 /* Some of Intel's USB controllers have a bug that causes false
1928 * resume indications if any port has an over current condition.
1929 * To prevent problems, we will not allow a global suspend if
1932 * Some motherboards using Intel's chipsets (but not using all
1933 * the USB ports) appear to hardwire the over current inputs active
1934 * to disable the USB ports.
1937 /* check for over current condition on any port */
1938 for (i = 0; i < uhci->rh_numports; i++) {
1939 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1946 static void hc_state_transitions(struct uhci_hcd *uhci)
1948 switch (uhci->state) {
1951 /* global suspend if nothing connected for 1 second */
1952 if (!ports_active(uhci) && suspend_allowed(uhci)) {
1953 uhci->state = UHCI_SUSPENDING_GRACE;
1954 uhci->state_end = jiffies + HZ;
1958 case UHCI_SUSPENDING_GRACE:
1959 if (ports_active(uhci))
1960 uhci->state = UHCI_RUNNING;
1961 else if (time_after_eq(jiffies, uhci->state_end))
1965 case UHCI_SUSPENDED:
1967 /* wakeup if requested by a device */
1968 if (uhci->resume_detect)
1972 case UHCI_RESUMING_1:
1973 case UHCI_RESUMING_2:
1974 case UHCI_RUNNING_GRACE:
1975 if (time_after_eq(jiffies, uhci->state_end))
1984 static void start_hc(struct uhci_hcd *uhci)
1986 unsigned int io_addr = uhci->io_addr;
1990 * Reset the HC - this will force us to get a
1991 * new notification of any already connected
1992 * ports due to the virtual disconnect that it
1995 outw(USBCMD_HCRESET, io_addr + USBCMD);
1996 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1998 dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
2003 /* Turn on PIRQ and all interrupts */
2004 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2006 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
2009 /* Start at frame 0 */
2010 outw(0, io_addr + USBFRNUM);
2011 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2013 /* Run and mark it configured with a 64-byte max packet */
2014 uhci->state = UHCI_RUNNING_GRACE;
2015 uhci->state_end = jiffies + HZ;
2016 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2018 uhci->hcd.state = USB_STATE_RUNNING;
2022 * De-allocate all resources..
2024 static void release_uhci(struct uhci_hcd *uhci)
2028 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2029 if (uhci->skelqh[i]) {
2030 uhci_free_qh(uhci, uhci->skelqh[i]);
2031 uhci->skelqh[i] = NULL;
2034 if (uhci->term_td) {
2035 uhci_free_td(uhci, uhci->term_td);
2036 uhci->term_td = NULL;
2039 if (uhci->qh_pool) {
2040 dma_pool_destroy(uhci->qh_pool);
2041 uhci->qh_pool = NULL;
2044 if (uhci->td_pool) {
2045 dma_pool_destroy(uhci->td_pool);
2046 uhci->td_pool = NULL;
2050 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2051 uhci->fl, uhci->fl->dma_handle);
2055 #ifdef CONFIG_PROC_FS
2056 if (uhci->proc_entry) {
2057 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
2058 uhci->proc_entry = NULL;
2063 static int uhci_reset(struct usb_hcd *hcd)
2065 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2067 uhci->io_addr = (unsigned long) hcd->regs;
2069 /* Kick BIOS off this hardware and reset, so we won't get
2070 * interrupts from any previous setup.
2077 * Allocate a frame list, and then setup the skeleton
2079 * The hardware doesn't really know any difference
2080 * in the queues, but the order does matter for the
2081 * protocols higher up. The order is:
2083 * - any isochronous events handled before any
2084 * of the queues. We don't do that here, because
2085 * we'll create the actual TD entries on demand.
2086 * - The first queue is the interrupt queue.
2087 * - The second queue is the control queue, split into low- and full-speed
2088 * - The third queue is bulk queue.
2089 * - The fourth queue is the bandwidth reclamation queue, which loops back
2090 * to the full-speed control queue.
2092 static int uhci_start(struct usb_hcd *hcd)
2094 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2095 int retval = -EBUSY;
2098 dma_addr_t dma_handle;
2099 struct usb_device *udev;
2100 #ifdef CONFIG_PROC_FS
2101 struct proc_dir_entry *ent;
2104 io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
2106 #ifdef CONFIG_PROC_FS
2107 ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2109 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
2111 goto err_create_proc_entry;
2115 ent->proc_fops = &uhci_proc_operations;
2117 uhci->proc_entry = ent;
2121 uhci->fsbrtimeout = 0;
2123 spin_lock_init(&uhci->schedule_lock);
2124 INIT_LIST_HEAD(&uhci->qh_remove_list);
2126 INIT_LIST_HEAD(&uhci->td_remove_list);
2128 INIT_LIST_HEAD(&uhci->urb_remove_list);
2130 INIT_LIST_HEAD(&uhci->urb_list);
2132 INIT_LIST_HEAD(&uhci->complete_list);
2134 init_waitqueue_head(&uhci->waitqh);
2136 uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2139 dev_err(uhci_dev(uhci), "unable to allocate "
2140 "consistent memory for frame list\n");
2144 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2146 uhci->fl->dma_handle = dma_handle;
2148 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2149 sizeof(struct uhci_td), 16, 0);
2150 if (!uhci->td_pool) {
2151 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2152 goto err_create_td_pool;
2155 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2156 sizeof(struct uhci_qh), 16, 0);
2157 if (!uhci->qh_pool) {
2158 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2159 goto err_create_qh_pool;
2162 /* Initialize the root hub */
2164 /* UHCI specs says devices must have 2 ports, but goes on to say */
2165 /* they may have more but give no way to determine how many they */
2166 /* have. However, according to the UHCI spec, Bit 7 is always set */
2167 /* to 1. So we try to use this to our advantage */
2168 for (port = 0; port < (io_size - 0x10) / 2; port++) {
2169 unsigned int portstatus;
2171 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2172 if (!(portstatus & 0x0080))
2176 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2178 /* This is experimental so anything less than 2 or greater than 8 is */
2179 /* something weird and we'll ignore it */
2180 if (port < 2 || port > UHCI_RH_MAXCHILD) {
2181 dev_info(uhci_dev(uhci), "port count misdetected? "
2182 "forcing to 2 ports\n");
2186 uhci->rh_numports = port;
2188 udev = usb_alloc_dev(NULL, &hcd->self, 0);
2190 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2191 goto err_alloc_root_hub;
2194 uhci->term_td = uhci_alloc_td(uhci, udev);
2195 if (!uhci->term_td) {
2196 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2197 goto err_alloc_term_td;
2200 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2201 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2202 if (!uhci->skelqh[i]) {
2203 dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2204 goto err_alloc_skelqh;
2209 * 8 Interrupt queues; link all higher int queues to int1,
2210 * then link int1 to control and control to bulk
2212 uhci->skel_int128_qh->link =
2213 uhci->skel_int64_qh->link =
2214 uhci->skel_int32_qh->link =
2215 uhci->skel_int16_qh->link =
2216 uhci->skel_int8_qh->link =
2217 uhci->skel_int4_qh->link =
2218 uhci->skel_int2_qh->link =
2219 cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2220 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2222 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2223 uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2224 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2226 /* This dummy TD is to work around a bug in Intel PIIX controllers */
2227 uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2228 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2229 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2231 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2232 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2235 * Fill the frame list: make all entries point to the proper
2238 * The interrupt queues will be interleaved as evenly as possible.
2239 * There's not much to be done about period-1 interrupts; they have
2240 * to occur in every frame. But we can schedule period-2 interrupts
2241 * in odd-numbered frames, period-4 interrupts in frames congruent
2242 * to 2 (mod 4), and so on. This way each frame only has two
2243 * interrupt QHs, which will help spread out bandwidth utilization.
2245 for (i = 0; i < UHCI_NUMFRAMES; i++) {
2249 * ffs (Find First bit Set) does exactly what we need:
2250 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6],
2251 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2252 * ffs > 6 => not on any high-period queue, so use
2253 * skel_int1_qh = skelqh[7].
2254 * Add UHCI_NUMFRAMES to insure at least one bit is set.
2256 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2260 /* Only place we don't use the frame list routines */
2261 uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2266 init_stall_timer(hcd);
2268 udev->speed = USB_SPEED_FULL;
2270 if (hcd_register_root(udev, &uhci->hcd) != 0) {
2271 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2273 goto err_start_root_hub;
2284 del_timer_sync(&uhci->stall_timer);
2287 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2288 if (uhci->skelqh[i]) {
2289 uhci_free_qh(uhci, uhci->skelqh[i]);
2290 uhci->skelqh[i] = NULL;
2293 uhci_free_td(uhci, uhci->term_td);
2294 uhci->term_td = NULL;
2300 dma_pool_destroy(uhci->qh_pool);
2301 uhci->qh_pool = NULL;
2304 dma_pool_destroy(uhci->td_pool);
2305 uhci->td_pool = NULL;
2308 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2309 uhci->fl, uhci->fl->dma_handle);
2313 #ifdef CONFIG_PROC_FS
2314 remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2315 uhci->proc_entry = NULL;
2317 err_create_proc_entry:
2323 static void uhci_stop(struct usb_hcd *hcd)
2325 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2327 del_timer_sync(&uhci->stall_timer);
2330 * At this point, we're guaranteed that no new connects can be made
2331 * to this bus since there are no more parents
2336 spin_lock_irq(&uhci->schedule_lock);
2337 uhci_free_pending_qhs(uhci);
2338 uhci_free_pending_tds(uhci);
2339 uhci_remove_pending_urbps(uhci);
2340 uhci_finish_completion(hcd, NULL);
2342 uhci_free_pending_qhs(uhci);
2343 uhci_free_pending_tds(uhci);
2344 spin_unlock_irq(&uhci->schedule_lock);
2346 /* Wake up anyone waiting for an URB to complete */
2347 wake_up_all(&uhci->waitqh);
2353 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2355 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2357 /* Don't try to suspend broken motherboards, reset instead */
2358 if (suspend_allowed(uhci)) {
2360 uhci->saved_framenumber =
2361 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2367 static int uhci_resume(struct usb_hcd *hcd)
2369 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2371 pci_set_master(to_pci_dev(uhci_dev(uhci)));
2373 if (uhci->state == UHCI_SUSPENDED) {
2376 * Some systems don't maintain the UHCI register values
2377 * during a PM suspend/resume cycle, so reinitialize
2378 * the Frame Number, Framelist Base Address, Interrupt
2379 * Enable, and Legacy Support registers.
2381 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2383 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2384 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2385 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2386 USBINTR_SP, uhci->io_addr + USBINTR);
2387 uhci->resume_detect = 1;
2388 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2394 uhci->hcd.state = USB_STATE_RUNNING;
2399 static struct usb_hcd *uhci_hcd_alloc(void)
2401 struct uhci_hcd *uhci;
2403 uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2407 memset(uhci, 0, sizeof(*uhci));
2408 uhci->hcd.product_desc = "UHCI Host Controller";
2412 static void uhci_hcd_free(struct usb_hcd *hcd)
2414 kfree(hcd_to_uhci(hcd));
2417 /* Are there any URBs for a particular device/endpoint on a given list? */
2418 static int urbs_for_ep_list(struct list_head *head,
2419 struct hcd_dev *hdev, int ep)
2421 struct urb_priv *urbp;
2423 list_for_each_entry(urbp, head, urb_list) {
2424 struct urb *urb = urbp->urb;
2426 if (hdev == urb->dev->hcpriv && ep ==
2427 (usb_pipeendpoint(urb->pipe) |
2428 usb_pipein(urb->pipe)))
2434 /* Are there any URBs for a particular device/endpoint? */
2435 static int urbs_for_ep(struct uhci_hcd *uhci, struct hcd_dev *hdev, int ep)
2439 spin_lock_irq(&uhci->schedule_lock);
2440 rc = (urbs_for_ep_list(&uhci->urb_list, hdev, ep) ||
2441 urbs_for_ep_list(&uhci->complete_list, hdev, ep) ||
2442 urbs_for_ep_list(&uhci->urb_remove_list, hdev, ep));
2443 spin_unlock_irq(&uhci->schedule_lock);
2447 /* Wait until all the URBs for a particular device/endpoint are gone */
2448 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2449 struct hcd_dev *hdev, int endpoint)
2451 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2453 wait_event_interruptible(uhci->waitqh,
2454 !urbs_for_ep(uhci, hdev, endpoint));
2457 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2459 return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2462 static const char hcd_name[] = "uhci_hcd";
2464 static const struct hc_driver uhci_driver = {
2465 .description = hcd_name,
2467 /* Generic hardware linkage */
2471 /* Basic lifecycle operations */
2472 .reset = uhci_reset,
2473 .start = uhci_start,
2475 .suspend = uhci_suspend,
2476 .resume = uhci_resume,
2480 .hcd_alloc = uhci_hcd_alloc,
2481 .hcd_free = uhci_hcd_free,
2483 .urb_enqueue = uhci_urb_enqueue,
2484 .urb_dequeue = uhci_urb_dequeue,
2486 .endpoint_disable = uhci_hcd_endpoint_disable,
2487 .get_frame_number = uhci_hcd_get_frame_number,
2489 .hub_status_data = uhci_hub_status_data,
2490 .hub_control = uhci_hub_control,
2493 static const struct pci_device_id uhci_pci_ids[] = { {
2494 /* handle any USB UHCI controller */
2495 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2496 .driver_data = (unsigned long) &uhci_driver,
2497 }, { /* end: all zeroes */ }
2500 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2502 static struct pci_driver uhci_pci_driver = {
2503 .name = (char *)hcd_name,
2504 .id_table = uhci_pci_ids,
2506 .probe = usb_hcd_pci_probe,
2507 .remove = usb_hcd_pci_remove,
2510 .suspend = usb_hcd_pci_suspend,
2511 .resume = usb_hcd_pci_resume,
2515 static int __init uhci_hcd_init(void)
2517 int retval = -ENOMEM;
2519 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2525 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2530 #ifdef CONFIG_PROC_FS
2531 uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
2532 if (!uhci_proc_root)
2536 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2537 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2538 if (!uhci_up_cachep)
2541 retval = pci_module_init(&uhci_pci_driver);
2548 if (kmem_cache_destroy(uhci_up_cachep))
2549 warn("not all urb_priv's were freed!");
2553 #ifdef CONFIG_PROC_FS
2554 remove_proc_entry("driver/uhci", 0);
2566 static void __exit uhci_hcd_cleanup(void)
2568 pci_unregister_driver(&uhci_pci_driver);
2570 if (kmem_cache_destroy(uhci_up_cachep))
2571 warn("not all urb_priv's were freed!");
2573 #ifdef CONFIG_PROC_FS
2574 remove_proc_entry("driver/uhci", 0);
2581 module_init(uhci_hcd_init);
2582 module_exit(uhci_hcd_cleanup);
2584 MODULE_AUTHOR(DRIVER_AUTHOR);
2585 MODULE_DESCRIPTION(DRIVER_DESC);
2586 MODULE_LICENSE("GPL");