2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
18 * Intel documents this fairly well, and as far as I know there
19 * are no royalties or anything like that, but even so there are
20 * people who decided that they want to do the same thing in a
21 * completely different way.
23 * WARNING! The USB documentation is downright evil. Most of it
24 * is just crap, written by a committee. You're better off ignoring
25 * most of it, the important stuff is:
26 * - the low-level protocol (fairly simple but lots of small details)
27 * - working around the horridness of the rest
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/debugfs.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54 #include <linux/bitops.h>
56 #include <asm/uaccess.h>
59 #include <asm/system.h>
61 #include "../core/hcd.h"
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
74 * debug = 0, no debugging messages
75 * debug = 1, dump failed URB's except for stalls
76 * debug = 2, dump all failed URB's (including stalls)
77 * show all queues in /debug/uhci/[pci_addr]
78 * debug = 3, show all TD's in URB's when dumping
85 module_param(debug, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(debug, "Debug level");
88 #define ERRBUF_LEN (32 * 1024)
91 #include "uhci-debug.c"
93 static kmem_cache_t *uhci_up_cachep; /* urb_priv */
95 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
103 static void hc_state_transitions(struct uhci_hcd *uhci);
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT msecs_to_jiffies(50)
107 #define FSBR_DELAY msecs_to_jiffies(50)
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
115 * Technically, updating td->status here is a race, but it's not really a
116 * problem. The worst that can happen is that we set the IOC bit again
117 * generating a spurious interrupt. We could fix this by creating another
118 * QH and leaving the IOC bit always set, but then we would have to play
119 * games with the FSBR code to make sure we get the correct order in all
120 * the cases. I don't think it's worth the effort
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
124 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
129 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
133 struct urb_priv *urbp)
135 list_move_tail(&urbp->urb_list, &uhci->complete_list);
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
140 dma_addr_t dma_handle;
143 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
147 td->dma_handle = dma_handle;
149 td->link = UHCI_PTR_TERM;
155 INIT_LIST_HEAD(&td->list);
156 INIT_LIST_HEAD(&td->remove_list);
157 INIT_LIST_HEAD(&td->fl_list);
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165 u32 token, u32 buffer)
167 td->status = cpu_to_le32(status);
168 td->token = cpu_to_le32(token);
169 td->buffer = cpu_to_le32(buffer);
173 * We insert Isochronous URB's directly into the frame list at the beginning
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
177 framenum &= (UHCI_NUMFRAMES - 1);
179 td->frame = framenum;
181 /* Is there a TD already mapped there? */
182 if (uhci->fl->frame_cpu[framenum]) {
183 struct uhci_td *ftd, *ltd;
185 ftd = uhci->fl->frame_cpu[framenum];
186 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
188 list_add_tail(&td->fl_list, &ftd->fl_list);
190 td->link = ltd->link;
192 ltd->link = cpu_to_le32(td->dma_handle);
194 td->link = uhci->fl->frame[framenum];
196 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197 uhci->fl->frame_cpu[framenum] = td;
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
203 /* If it's not inserted, don't remove it */
204 if (td->frame == -1 && list_empty(&td->fl_list))
207 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208 if (list_empty(&td->fl_list)) {
209 uhci->fl->frame[td->frame] = td->link;
210 uhci->fl->frame_cpu[td->frame] = NULL;
214 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216 uhci->fl->frame_cpu[td->frame] = ntd;
221 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222 ptd->link = td->link;
226 td->link = UHCI_PTR_TERM;
228 list_del_init(&td->fl_list);
233 * Inserts a td list into qh.
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
237 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
241 /* Ordering isn't important here yet since the QH hasn't been */
242 /* inserted into the schedule yet */
243 plink = &qh->element;
244 list_for_each_entry(td, &urbp->td_list, list) {
245 *plink = cpu_to_le32(td->dma_handle) | breadth;
248 *plink = UHCI_PTR_TERM;
251 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
253 if (!list_empty(&td->list))
254 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
255 if (!list_empty(&td->remove_list))
256 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
257 if (!list_empty(&td->fl_list))
258 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
261 usb_put_dev(td->dev);
263 dma_pool_free(uhci->td_pool, td, td->dma_handle);
266 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
268 dma_addr_t dma_handle;
271 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
275 qh->dma_handle = dma_handle;
277 qh->element = UHCI_PTR_TERM;
278 qh->link = UHCI_PTR_TERM;
283 INIT_LIST_HEAD(&qh->list);
284 INIT_LIST_HEAD(&qh->remove_list);
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
293 if (!list_empty(&qh->list))
294 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
295 if (!list_empty(&qh->remove_list))
296 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
299 usb_put_dev(qh->dev);
301 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
305 * Append this urb's qh after the last qh in skelqh->list
307 * Note that urb_priv.queue_list doesn't have a separate queue head;
308 * it's a ring with every element "live".
310 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
312 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
313 struct urb_priv *turbp;
316 /* Grab the last QH */
317 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
319 /* Point to the next skelqh */
320 urbp->qh->link = lqh->link;
321 wmb(); /* Ordering is important */
324 * Patch QHs for previous endpoint's queued URBs? HC goes
325 * here next, not to the next skelqh it now points to.
327 * lqh --> td ... --> qh ... --> td --> qh ... --> td
330 * +<----------------+-----------------+
332 * newqh --> td ... --> td
337 * The HC could see (and use!) any of these as we write them.
339 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
341 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
342 turbp->qh->link = lqh->link;
345 list_add_tail(&urbp->qh->list, &skelqh->list);
349 * Start removal of QH from schedule; it finishes next frame.
350 * TDs should be unlinked before this is called.
352 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
362 * Only go through the hoops if it's actually linked in
364 if (!list_empty(&qh->list)) {
366 /* If our queue is nonempty, make the next URB the head */
367 if (!list_empty(&qh->urbp->queue_list)) {
368 struct urb_priv *nurbp;
370 nurbp = list_entry(qh->urbp->queue_list.next,
371 struct urb_priv, queue_list);
373 list_add(&nurbp->qh->list, &qh->list);
374 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
378 /* Fix up the previous QH's queue to link to either
379 * the new head of this queue or the start of the
380 * next endpoint's queue. */
381 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
384 struct urb_priv *turbp;
386 list_for_each_entry(turbp, &pqh->urbp->queue_list,
388 turbp->qh->link = newlink;
392 /* Leave qh->link in case the HC is on the QH now, it will */
393 /* continue the rest of the schedule */
394 qh->element = UHCI_PTR_TERM;
396 list_del_init(&qh->list);
399 list_del_init(&qh->urbp->queue_list);
402 age = uhci_get_current_frame_number(uhci);
403 if (age != uhci->qh_remove_age) {
404 uhci_free_pending_qhs(uhci);
405 uhci->qh_remove_age = age;
408 /* Check to see if the remove list is empty. Set the IOC bit */
409 /* to force an interrupt so we can remove the QH */
410 if (list_empty(&uhci->qh_remove_list))
411 uhci_set_next_interrupt(uhci);
413 list_add(&qh->remove_list, &uhci->qh_remove_list);
416 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
418 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
421 list_for_each_entry(td, &urbp->td_list, list) {
423 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
425 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
433 /* This function will append one URB's QH to another URB's QH. This is for */
434 /* queuing interrupt, control or bulk transfers */
435 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
437 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
438 struct uhci_td *lltd;
440 eurbp = eurb->hcpriv;
443 /* Find the first URB in the queue */
446 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
451 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
453 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
455 /* Control transfers always start with toggle 0 */
456 if (!usb_pipecontrol(urb->pipe))
457 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
458 usb_pipeout(urb->pipe),
459 uhci_fixup_toggle(urb,
460 uhci_toggle(td_token(lltd)) ^ 1));
462 /* All qh's in the queue need to link to the next queue */
463 urbp->qh->link = eurbp->qh->link;
465 wmb(); /* Make sure we flush everything */
467 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
469 list_add_tail(&urbp->queue_list, &furbp->queue_list);
474 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
476 struct urb_priv *urbp, *nurbp, *purbp, *turbp;
477 struct uhci_td *pltd;
482 if (list_empty(&urbp->queue_list))
485 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
488 * Fix up the toggle for the following URBs in the queue.
489 * Only needed for bulk and interrupt: control and isochronous
490 * endpoints don't propagate toggles between messages.
492 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
494 /* We just set the toggle in uhci_unlink_generic */
495 toggle = usb_gettoggle(urb->dev,
496 usb_pipeendpoint(urb->pipe),
497 usb_pipeout(urb->pipe));
499 /* If we're in the middle of the queue, grab the */
500 /* toggle from the TD previous to us */
501 purbp = list_entry(urbp->queue_list.prev,
502 struct urb_priv, queue_list);
503 pltd = list_entry(purbp->td_list.prev,
504 struct uhci_td, list);
505 toggle = uhci_toggle(td_token(pltd)) ^ 1;
508 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
511 toggle = uhci_fixup_toggle(turbp->urb, toggle);
514 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
515 usb_pipeout(urb->pipe), toggle);
519 /* We're somewhere in the middle (or end). The case where
520 * we're at the head is handled in uhci_remove_qh(). */
521 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
524 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
526 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
528 /* The next URB happens to be the beginning, so */
529 /* we're the last, end the chain */
530 pltd->link = UHCI_PTR_TERM;
533 /* urbp->queue_list is handled in uhci_remove_qh() */
536 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
538 struct urb_priv *urbp;
540 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
544 memset((void *)urbp, 0, sizeof(*urbp));
546 urbp->inserttime = jiffies;
547 urbp->fsbrtime = jiffies;
550 INIT_LIST_HEAD(&urbp->td_list);
551 INIT_LIST_HEAD(&urbp->queue_list);
552 INIT_LIST_HEAD(&urbp->urb_list);
554 list_add_tail(&urbp->urb_list, &uhci->urb_list);
561 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
563 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
567 list_add_tail(&td->list, &urbp->td_list);
570 static void uhci_remove_td_from_urb(struct uhci_td *td)
572 if (list_empty(&td->list))
575 list_del_init(&td->list);
580 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
582 struct uhci_td *td, *tmp;
583 struct urb_priv *urbp;
586 urbp = (struct urb_priv *)urb->hcpriv;
590 if (!list_empty(&urbp->urb_list))
591 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
592 "or uhci->remove_list!\n", urb);
594 age = uhci_get_current_frame_number(uhci);
595 if (age != uhci->td_remove_age) {
596 uhci_free_pending_tds(uhci);
597 uhci->td_remove_age = age;
600 /* Check to see if the remove list is empty. Set the IOC bit */
601 /* to force an interrupt so we can remove the TD's*/
602 if (list_empty(&uhci->td_remove_list))
603 uhci_set_next_interrupt(uhci);
605 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
606 uhci_remove_td_from_urb(td);
607 uhci_remove_td(uhci, td);
608 list_add(&td->remove_list, &uhci->td_remove_list);
612 kmem_cache_free(uhci_up_cachep, urbp);
615 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
617 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
619 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
621 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
622 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
626 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
628 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
630 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
633 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
638 * Map status to standard result codes
640 * <status> is (td_status(td) & 0xF60000), a.k.a.
641 * uhci_status_bits(td_status(td)).
642 * Note: <status> does not include the TD_CTRL_NAK bit.
643 * <dir_out> is True for output TDs and False for input TDs.
645 static int uhci_map_status(int status, int dir_out)
649 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
651 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
657 if (status & TD_CTRL_BABBLE) /* Babble */
659 if (status & TD_CTRL_DBUFERR) /* Buffer error */
661 if (status & TD_CTRL_STALLED) /* Stalled */
663 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
670 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
672 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
674 struct uhci_qh *qh, *skelqh;
675 unsigned long destination, status;
676 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
677 int len = urb->transfer_buffer_length;
678 dma_addr_t data = urb->transfer_dma;
680 /* The "pipe" thing contains the destination in bits 8--18 */
681 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
684 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
685 if (urb->dev->speed == USB_SPEED_LOW)
686 status |= TD_CTRL_LS;
689 * Build the TD for the control request setup packet
691 td = uhci_alloc_td(uhci, urb->dev);
695 uhci_add_td_to_urb(urb, td);
696 uhci_fill_td(td, status, destination | uhci_explen(7),
700 * If direction is "send", change the packet ID from SETUP (0x2D)
701 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
702 * set Short Packet Detect (SPD) for all data packets.
704 if (usb_pipeout(urb->pipe))
705 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
707 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
708 status |= TD_CTRL_SPD;
712 * Build the DATA TD's
720 td = uhci_alloc_td(uhci, urb->dev);
724 /* Alternate Data0/1 (start with Data1) */
725 destination ^= TD_TOKEN_TOGGLE;
727 uhci_add_td_to_urb(urb, td);
728 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
736 * Build the final TD for control status
738 td = uhci_alloc_td(uhci, urb->dev);
743 * It's IN if the pipe is an output pipe or we're not expecting
746 destination &= ~TD_TOKEN_PID_MASK;
747 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
748 destination |= USB_PID_IN;
750 destination |= USB_PID_OUT;
752 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
754 status &= ~TD_CTRL_SPD;
756 uhci_add_td_to_urb(urb, td);
757 uhci_fill_td(td, status | TD_CTRL_IOC,
758 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
760 qh = uhci_alloc_qh(uhci, urb->dev);
767 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
769 /* Low-speed transfers get a different queue, and won't hog the bus.
770 * Also, some devices enumerate better without FSBR; the easiest way
771 * to do that is to put URBs on the low-speed queue while the device
772 * is in the DEFAULT state. */
773 if (urb->dev->speed == USB_SPEED_LOW ||
774 urb->dev->state == USB_STATE_DEFAULT)
775 skelqh = uhci->skel_ls_control_qh;
777 skelqh = uhci->skel_fs_control_qh;
778 uhci_inc_fsbr(uhci, urb);
782 uhci_append_queued_urb(uhci, eurb, urb);
784 uhci_insert_qh(uhci, skelqh, urb);
790 * If control-IN transfer was short, the status packet wasn't sent.
791 * This routine changes the element pointer in the QH to point at the
792 * status TD. It's safe to do this even while the QH is live, because
793 * the hardware only updates the element pointer following a successful
794 * transfer. The inactive TD for the short packet won't cause an update,
795 * so the pointer won't get overwritten. The next time the controller
796 * sees this QH, it will send the status packet.
798 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
800 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
803 urbp->short_control_packet = 1;
805 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
806 urbp->qh->element = cpu_to_le32(td->dma_handle);
812 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
814 struct list_head *tmp, *head;
815 struct urb_priv *urbp = urb->hcpriv;
820 if (list_empty(&urbp->td_list))
823 head = &urbp->td_list;
825 if (urbp->short_control_packet) {
831 td = list_entry(tmp, struct uhci_td, list);
833 /* The first TD is the SETUP stage, check the status, but skip */
835 status = uhci_status_bits(td_status(td));
836 if (status & TD_CTRL_ACTIVE)
842 urb->actual_length = 0;
844 /* The rest of the TD's (but the last) are data */
846 while (tmp != head && tmp->next != head) {
847 unsigned int ctrlstat;
849 td = list_entry(tmp, struct uhci_td, list);
852 ctrlstat = td_status(td);
853 status = uhci_status_bits(ctrlstat);
854 if (status & TD_CTRL_ACTIVE)
857 urb->actual_length += uhci_actual_length(ctrlstat);
862 /* Check to see if we received a short packet */
863 if (uhci_actual_length(ctrlstat) <
864 uhci_expected_length(td_token(td))) {
865 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
870 if (uhci_packetid(td_token(td)) == USB_PID_IN)
871 return usb_control_retrigger_status(uhci, urb);
878 td = list_entry(tmp, struct uhci_td, list);
880 /* Control status stage */
881 status = td_status(td);
883 #ifdef I_HAVE_BUGGY_APC_BACKUPS
884 /* APC BackUPS Pro kludge */
885 /* It tries to send all of the descriptor instead of the amount */
887 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
888 status & TD_CTRL_ACTIVE &&
889 status & TD_CTRL_NAK)
893 status = uhci_status_bits(status);
894 if (status & TD_CTRL_ACTIVE)
903 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
906 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
907 /* Some debugging code */
908 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
909 __FUNCTION__, status);
912 /* Print the chain for debugging purposes */
913 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
923 * Common submit for bulk and interrupt
925 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
929 unsigned long destination, status;
930 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
931 int len = urb->transfer_buffer_length;
932 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
933 dma_addr_t data = urb->transfer_dma;
938 /* The "pipe" thing contains the destination in bits 8--18 */
939 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
941 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
942 if (urb->dev->speed == USB_SPEED_LOW)
943 status |= TD_CTRL_LS;
944 if (usb_pipein(urb->pipe))
945 status |= TD_CTRL_SPD;
948 * Build the DATA TD's
950 do { /* Allow zero length packets */
955 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
956 status &= ~TD_CTRL_SPD;
959 td = uhci_alloc_td(uhci, urb->dev);
963 uhci_add_td_to_urb(urb, td);
964 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
965 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
966 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
972 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
973 usb_pipeout(urb->pipe));
977 * URB_ZERO_PACKET means adding a 0-length packet, if direction
978 * is OUT and the transfer_length was an exact multiple of maxsze,
979 * hence (len = transfer_length - N * maxsze) == 0
980 * however, if transfer_length == 0, the zero packet was already
983 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
984 !len && urb->transfer_buffer_length) {
985 td = uhci_alloc_td(uhci, urb->dev);
989 uhci_add_td_to_urb(urb, td);
990 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
991 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
992 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
995 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
996 usb_pipeout(urb->pipe));
999 /* Set the interrupt-on-completion flag on the last packet.
1000 * A more-or-less typical 4 KB URB (= size of one memory page)
1001 * will require about 3 ms to transfer; that's a little on the
1002 * fast side but not enough to justify delaying an interrupt
1003 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1005 td->status |= cpu_to_le32(TD_CTRL_IOC);
1007 qh = uhci_alloc_qh(uhci, urb->dev);
1014 /* Always breadth first */
1015 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1018 uhci_append_queued_urb(uhci, eurb, urb);
1020 uhci_insert_qh(uhci, skelqh, urb);
1022 return -EINPROGRESS;
1026 * Common result for bulk and interrupt
1028 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1030 struct urb_priv *urbp = urb->hcpriv;
1032 unsigned int status = 0;
1035 urb->actual_length = 0;
1037 list_for_each_entry(td, &urbp->td_list, list) {
1038 unsigned int ctrlstat = td_status(td);
1040 status = uhci_status_bits(ctrlstat);
1041 if (status & TD_CTRL_ACTIVE)
1042 return -EINPROGRESS;
1044 urb->actual_length += uhci_actual_length(ctrlstat);
1049 if (uhci_actual_length(ctrlstat) <
1050 uhci_expected_length(td_token(td))) {
1051 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1062 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1066 * Enable this chunk of code if you want to see some more debugging.
1067 * But be careful, it has the tendancy to starve out khubd and prevent
1068 * disconnects from happening successfully if you have a slow debug
1069 * log interface (like a serial console.
1072 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1073 /* Some debugging code */
1074 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1075 __FUNCTION__, status);
1078 /* Print the chain for debugging purposes */
1079 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1088 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1092 /* Can't have low-speed bulk transfers */
1093 if (urb->dev->speed == USB_SPEED_LOW)
1096 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1097 if (ret == -EINPROGRESS)
1098 uhci_inc_fsbr(uhci, urb);
1103 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1105 /* USB 1.1 interrupt transfers only involve one packet per interval;
1106 * that's the uhci_submit_common() "breadth first" policy. Drivers
1107 * can submit urbs of any length, but longer ones might need many
1108 * intervals to complete.
1110 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1114 * Isochronous transfers
1116 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1118 struct urb *last_urb = NULL;
1119 struct urb_priv *up;
1122 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1123 struct urb *u = up->urb;
1125 /* look for pending URB's with identical pipe handle */
1126 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1127 (u->status == -EINPROGRESS) && (u != urb)) {
1129 *start = u->start_frame;
1135 *end = (last_urb->start_frame + last_urb->number_of_packets *
1136 last_urb->interval) & (UHCI_NUMFRAMES-1);
1139 ret = -1; /* no previous urb found */
1144 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1147 unsigned int start = 0, end = 0;
1149 if (urb->number_of_packets > 900) /* 900? Why? */
1152 limits = isochronous_find_limits(uhci, urb, &start, &end);
1154 if (urb->transfer_flags & URB_ISO_ASAP) {
1157 (uhci_get_current_frame_number(uhci) +
1158 10) & (UHCI_NUMFRAMES - 1);
1160 urb->start_frame = end;
1162 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1163 /* FIXME: Sanity check */
1170 * Isochronous transfers
1172 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1176 int status, destination;
1178 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1179 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1181 ret = isochronous_find_start(uhci, urb);
1185 frame = urb->start_frame;
1186 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1187 if (!urb->iso_frame_desc[i].length)
1190 td = uhci_alloc_td(uhci, urb->dev);
1194 uhci_add_td_to_urb(urb, td);
1195 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1196 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1198 if (i + 1 >= urb->number_of_packets)
1199 td->status |= cpu_to_le32(TD_CTRL_IOC);
1201 uhci_insert_td_frame_list(uhci, td, frame);
1204 return -EINPROGRESS;
1207 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1210 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1214 urb->actual_length = 0;
1217 list_for_each_entry(td, &urbp->td_list, list) {
1219 unsigned int ctrlstat = td_status(td);
1221 if (ctrlstat & TD_CTRL_ACTIVE)
1222 return -EINPROGRESS;
1224 actlength = uhci_actual_length(ctrlstat);
1225 urb->iso_frame_desc[i].actual_length = actlength;
1226 urb->actual_length += actlength;
1228 status = uhci_map_status(uhci_status_bits(ctrlstat),
1229 usb_pipeout(urb->pipe));
1230 urb->iso_frame_desc[i].status = status;
1242 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1244 struct urb_priv *up;
1246 /* We don't match Isoc transfers since they are special */
1247 if (usb_pipeisoc(urb->pipe))
1250 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1251 struct urb *u = up->urb;
1253 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1254 /* For control, ignore the direction */
1255 if (usb_pipecontrol(urb->pipe) &&
1256 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1258 else if (u->pipe == urb->pipe)
1266 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1267 struct usb_host_endpoint *ep,
1268 struct urb *urb, int mem_flags)
1271 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1272 unsigned long flags;
1276 spin_lock_irqsave(&uhci->schedule_lock, flags);
1279 if (ret != -EINPROGRESS) /* URB already unlinked! */
1282 eurb = uhci_find_urb_ep(uhci, urb);
1284 if (!uhci_alloc_urb_priv(uhci, urb)) {
1289 switch (usb_pipetype(urb->pipe)) {
1291 ret = uhci_submit_control(uhci, urb, eurb);
1293 case PIPE_INTERRUPT:
1295 bustime = usb_check_bandwidth(urb->dev, urb);
1299 ret = uhci_submit_interrupt(uhci, urb, eurb);
1300 if (ret == -EINPROGRESS)
1301 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1303 } else { /* inherit from parent */
1304 urb->bandwidth = eurb->bandwidth;
1305 ret = uhci_submit_interrupt(uhci, urb, eurb);
1309 ret = uhci_submit_bulk(uhci, urb, eurb);
1311 case PIPE_ISOCHRONOUS:
1312 bustime = usb_check_bandwidth(urb->dev, urb);
1318 ret = uhci_submit_isochronous(uhci, urb);
1319 if (ret == -EINPROGRESS)
1320 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1324 if (ret != -EINPROGRESS) {
1325 /* Submit failed, so delete it from the urb_list */
1326 struct urb_priv *urbp = urb->hcpriv;
1328 list_del_init(&urbp->urb_list);
1329 uhci_destroy_urb_priv(uhci, urb);
1334 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1339 * Return the result of a transfer
1341 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1343 int ret = -EINPROGRESS;
1344 struct urb_priv *urbp;
1346 spin_lock(&urb->lock);
1348 urbp = (struct urb_priv *)urb->hcpriv;
1350 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1353 switch (usb_pipetype(urb->pipe)) {
1355 ret = uhci_result_control(uhci, urb);
1358 case PIPE_INTERRUPT:
1359 ret = uhci_result_common(uhci, urb);
1361 case PIPE_ISOCHRONOUS:
1362 ret = uhci_result_isochronous(uhci, urb);
1366 if (ret == -EINPROGRESS)
1370 switch (usb_pipetype(urb->pipe)) {
1373 case PIPE_ISOCHRONOUS:
1374 /* Release bandwidth for Interrupt or Isoc. transfers */
1376 usb_release_bandwidth(urb->dev, urb, 1);
1377 uhci_unlink_generic(uhci, urb);
1379 case PIPE_INTERRUPT:
1380 /* Release bandwidth for Interrupt or Isoc. transfers */
1381 /* Make sure we don't release if we have a queued URB */
1382 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1383 usb_release_bandwidth(urb->dev, urb, 0);
1385 /* bandwidth was passed on to queued URB, */
1386 /* so don't let usb_unlink_urb() release it */
1388 uhci_unlink_generic(uhci, urb);
1391 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1393 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1396 /* Move it from uhci->urb_list to uhci->complete_list */
1397 uhci_moveto_complete(uhci, urbp);
1400 spin_unlock(&urb->lock);
1403 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1405 struct list_head *head;
1407 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1410 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1413 * Now we need to find out what the last successful toggle was
1414 * so we can update the local data toggle for the next transfer
1416 * There are 2 ways the last successful completed TD is found:
1418 * 1) The TD is NOT active and the actual length < expected length
1419 * 2) The TD is NOT active and it's the last TD in the chain
1421 * and a third way the first uncompleted TD is found:
1423 * 3) The TD is active and the previous TD is NOT active
1425 * Control and Isochronous ignore the toggle, so this is safe
1428 * FIXME: The toggle fixups won't be 100% reliable until we
1429 * change over to using a single queue for each endpoint and
1430 * stop the queue before unlinking.
1432 head = &urbp->td_list;
1433 list_for_each_entry(td, head, list) {
1434 unsigned int ctrlstat = td_status(td);
1436 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
1437 (uhci_actual_length(ctrlstat) <
1438 uhci_expected_length(td_token(td)) ||
1439 td->list.next == head))
1440 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1441 uhci_packetout(td_token(td)),
1442 uhci_toggle(td_token(td)) ^ 1);
1443 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
1444 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1445 uhci_packetout(td_token(td)),
1446 uhci_toggle(td_token(td)));
1448 prevactive = ctrlstat & TD_CTRL_ACTIVE;
1451 uhci_delete_queued_urb(uhci, urb);
1453 /* The interrupt loop will reclaim the QH's */
1454 uhci_remove_qh(uhci, urbp->qh);
1458 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1460 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1461 unsigned long flags;
1462 struct urb_priv *urbp;
1465 spin_lock_irqsave(&uhci->schedule_lock, flags);
1467 if (!urbp) /* URB was never linked! */
1469 list_del_init(&urbp->urb_list);
1471 uhci_unlink_generic(uhci, urb);
1473 age = uhci_get_current_frame_number(uhci);
1474 if (age != uhci->urb_remove_age) {
1475 uhci_remove_pending_urbps(uhci);
1476 uhci->urb_remove_age = age;
1479 /* If we're the first, set the next interrupt bit */
1480 if (list_empty(&uhci->urb_remove_list))
1481 uhci_set_next_interrupt(uhci);
1482 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1485 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1489 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1491 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1492 struct list_head *head;
1496 uhci_dec_fsbr(uhci, urb);
1498 urbp->fsbr_timeout = 1;
1501 * Ideally we would want to fix qh->element as well, but it's
1502 * read/write by the HC, so that can introduce a race. It's not
1503 * really worth the hassle
1506 head = &urbp->td_list;
1507 list_for_each_entry(td, head, list) {
1509 * Make sure we don't do the last one (since it'll have the
1510 * TERM bit set) as well as we skip every so many TD's to
1511 * make sure it doesn't hog the bandwidth
1513 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1514 (DEPTH_INTERVAL - 1))
1515 td->link |= UHCI_PTR_DEPTH;
1524 * uhci_get_current_frame_number()
1526 * returns the current frame number for a USB bus/controller.
1528 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1530 return inw(uhci->io_addr + USBFRNUM);
1533 static int init_stall_timer(struct usb_hcd *hcd);
1535 static void stall_callback(unsigned long ptr)
1537 struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1538 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1539 struct urb_priv *up;
1540 unsigned long flags;
1541 int called_uhci_finish_completion = 0;
1543 spin_lock_irqsave(&uhci->schedule_lock, flags);
1544 if (!list_empty(&uhci->urb_remove_list) &&
1545 uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1546 uhci_remove_pending_urbps(uhci);
1547 uhci_finish_completion(hcd, NULL);
1548 called_uhci_finish_completion = 1;
1551 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1552 struct urb *u = up->urb;
1554 spin_lock(&u->lock);
1556 /* Check if the FSBR timed out */
1557 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1558 uhci_fsbr_timeout(uhci, u);
1560 spin_unlock(&u->lock);
1562 spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1564 /* Wake up anyone waiting for an URB to complete */
1565 if (called_uhci_finish_completion)
1566 wake_up_all(&uhci->waitqh);
1568 /* Really disable FSBR */
1569 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1570 uhci->fsbrtimeout = 0;
1571 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1574 /* Poll for and perform state transitions */
1575 hc_state_transitions(uhci);
1576 if (unlikely(uhci->suspended_ports && uhci->state != UHCI_SUSPENDED))
1577 uhci_check_resume(uhci);
1579 init_stall_timer(hcd);
1582 static int init_stall_timer(struct usb_hcd *hcd)
1584 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1586 init_timer(&uhci->stall_timer);
1587 uhci->stall_timer.function = stall_callback;
1588 uhci->stall_timer.data = (unsigned long)hcd;
1589 uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
1590 add_timer(&uhci->stall_timer);
1595 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1597 struct uhci_qh *qh, *tmp;
1599 list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1600 list_del_init(&qh->remove_list);
1602 uhci_free_qh(uhci, qh);
1606 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1608 struct uhci_td *td, *tmp;
1610 list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1611 list_del_init(&td->remove_list);
1613 uhci_free_td(uhci, td);
1618 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1619 __releases(uhci->schedule_lock)
1620 __acquires(uhci->schedule_lock)
1622 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1624 uhci_destroy_urb_priv(uhci, urb);
1626 spin_unlock(&uhci->schedule_lock);
1627 usb_hcd_giveback_urb(hcd, urb, regs);
1628 spin_lock(&uhci->schedule_lock);
1631 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1633 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1634 struct urb_priv *urbp, *tmp;
1636 list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1637 struct urb *urb = urbp->urb;
1639 list_del_init(&urbp->urb_list);
1640 uhci_finish_urb(hcd, urb, regs);
1644 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1647 /* Splice the urb_remove_list onto the end of the complete_list */
1648 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1651 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1653 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1654 unsigned long io_addr = uhci->io_addr;
1655 unsigned short status;
1656 struct urb_priv *urbp, *tmp;
1660 * Read the interrupt status, and write it back to clear the
1661 * interrupt cause. Contrary to the UHCI specification, the
1662 * "HC Halted" status bit is persistent: it is RO, not R/WC.
1664 status = inw(io_addr + USBSTS);
1665 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */
1667 outw(status, io_addr + USBSTS); /* Clear it */
1669 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1670 if (status & USBSTS_HSE)
1671 dev_err(uhci_dev(uhci), "host system error, "
1673 if (status & USBSTS_HCPE)
1674 dev_err(uhci_dev(uhci), "host controller process "
1675 "error, something bad happened!\n");
1676 if ((status & USBSTS_HCH) && uhci->state > 0) {
1677 dev_err(uhci_dev(uhci), "host controller halted, "
1679 /* FIXME: Reset the controller, fix the offending TD */
1683 if (status & USBSTS_RD)
1684 uhci->resume_detect = 1;
1686 spin_lock(&uhci->schedule_lock);
1688 age = uhci_get_current_frame_number(uhci);
1689 if (age != uhci->qh_remove_age)
1690 uhci_free_pending_qhs(uhci);
1691 if (age != uhci->td_remove_age)
1692 uhci_free_pending_tds(uhci);
1693 if (age != uhci->urb_remove_age)
1694 uhci_remove_pending_urbps(uhci);
1696 if (list_empty(&uhci->urb_remove_list) &&
1697 list_empty(&uhci->td_remove_list) &&
1698 list_empty(&uhci->qh_remove_list))
1699 uhci_clear_next_interrupt(uhci);
1701 uhci_set_next_interrupt(uhci);
1703 /* Walk the list of pending URBs to see which ones completed
1704 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1705 list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1706 struct urb *urb = urbp->urb;
1708 /* Checks the status and does all of the magic necessary */
1709 uhci_transfer_result(uhci, urb);
1711 uhci_finish_completion(hcd, regs);
1713 spin_unlock(&uhci->schedule_lock);
1715 /* Wake up anyone waiting for an URB to complete */
1716 wake_up_all(&uhci->waitqh);
1721 static void reset_hc(struct uhci_hcd *uhci)
1723 unsigned long io_addr = uhci->io_addr;
1725 /* Turn off PIRQ, SMI, and all interrupts. This also turns off
1726 * the BIOS's USB Legacy Support.
1728 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
1729 outw(0, uhci->io_addr + USBINTR);
1731 /* Global reset for 50ms */
1732 uhci->state = UHCI_RESET;
1733 outw(USBCMD_GRESET, io_addr + USBCMD);
1735 outw(0, io_addr + USBCMD);
1737 /* Another 10ms delay */
1739 uhci->resume_detect = 0;
1742 static void suspend_hc(struct uhci_hcd *uhci)
1744 unsigned long io_addr = uhci->io_addr;
1746 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1747 uhci->state = UHCI_SUSPENDED;
1748 uhci->resume_detect = 0;
1749 outw(USBCMD_EGSM, io_addr + USBCMD);
1752 static void wakeup_hc(struct uhci_hcd *uhci)
1754 unsigned long io_addr = uhci->io_addr;
1756 switch (uhci->state) {
1757 case UHCI_SUSPENDED: /* Start the resume */
1758 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1760 /* Global resume for >= 20ms */
1761 outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1762 uhci->state = UHCI_RESUMING_1;
1763 uhci->state_end = jiffies + msecs_to_jiffies(20);
1766 case UHCI_RESUMING_1: /* End global resume */
1767 uhci->state = UHCI_RESUMING_2;
1768 outw(0, io_addr + USBCMD);
1771 case UHCI_RESUMING_2: /* Wait for EOP to be sent */
1772 if (inw(io_addr + USBCMD) & USBCMD_FGR)
1775 /* Run for at least 1 second, and
1776 * mark it configured with a 64-byte max packet */
1777 uhci->state = UHCI_RUNNING_GRACE;
1778 uhci->state_end = jiffies + HZ;
1779 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1783 case UHCI_RUNNING_GRACE: /* Now allowed to suspend */
1784 uhci->state = UHCI_RUNNING;
1792 static int ports_active(struct uhci_hcd *uhci)
1794 unsigned long io_addr = uhci->io_addr;
1798 for (i = 0; i < uhci->rh_numports; i++)
1799 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1804 static int suspend_allowed(struct uhci_hcd *uhci)
1806 unsigned long io_addr = uhci->io_addr;
1809 if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1812 /* Some of Intel's USB controllers have a bug that causes false
1813 * resume indications if any port has an over current condition.
1814 * To prevent problems, we will not allow a global suspend if
1817 * Some motherboards using Intel's chipsets (but not using all
1818 * the USB ports) appear to hardwire the over current inputs active
1819 * to disable the USB ports.
1822 /* check for over current condition on any port */
1823 for (i = 0; i < uhci->rh_numports; i++) {
1824 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1831 static void hc_state_transitions(struct uhci_hcd *uhci)
1833 switch (uhci->state) {
1836 /* global suspend if nothing connected for 1 second */
1837 if (!ports_active(uhci) && suspend_allowed(uhci)) {
1838 uhci->state = UHCI_SUSPENDING_GRACE;
1839 uhci->state_end = jiffies + HZ;
1843 case UHCI_SUSPENDING_GRACE:
1844 if (ports_active(uhci))
1845 uhci->state = UHCI_RUNNING;
1846 else if (time_after_eq(jiffies, uhci->state_end))
1850 case UHCI_SUSPENDED:
1852 /* wakeup if requested by a device */
1853 if (uhci->resume_detect)
1857 case UHCI_RESUMING_1:
1858 case UHCI_RESUMING_2:
1859 case UHCI_RUNNING_GRACE:
1860 if (time_after_eq(jiffies, uhci->state_end))
1869 static int start_hc(struct uhci_hcd *uhci)
1871 unsigned long io_addr = uhci->io_addr;
1875 * Reset the HC - this will force us to get a
1876 * new notification of any already connected
1877 * ports due to the virtual disconnect that it
1880 outw(USBCMD_HCRESET, io_addr + USBCMD);
1881 while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1882 if (--timeout < 0) {
1883 dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1889 /* Turn on PIRQ and all interrupts */
1890 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
1892 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1895 /* Start at frame 0 */
1896 outw(0, io_addr + USBFRNUM);
1897 outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
1899 /* Run and mark it configured with a 64-byte max packet */
1900 uhci->state = UHCI_RUNNING_GRACE;
1901 uhci->state_end = jiffies + HZ;
1902 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
1904 uhci_to_hcd(uhci)->state = USB_STATE_RUNNING;
1909 * De-allocate all resources..
1911 static void release_uhci(struct uhci_hcd *uhci)
1915 for (i = 0; i < UHCI_NUM_SKELQH; i++)
1916 if (uhci->skelqh[i]) {
1917 uhci_free_qh(uhci, uhci->skelqh[i]);
1918 uhci->skelqh[i] = NULL;
1921 if (uhci->term_td) {
1922 uhci_free_td(uhci, uhci->term_td);
1923 uhci->term_td = NULL;
1926 if (uhci->qh_pool) {
1927 dma_pool_destroy(uhci->qh_pool);
1928 uhci->qh_pool = NULL;
1931 if (uhci->td_pool) {
1932 dma_pool_destroy(uhci->td_pool);
1933 uhci->td_pool = NULL;
1937 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
1938 uhci->fl, uhci->fl->dma_handle);
1943 debugfs_remove(uhci->dentry);
1944 uhci->dentry = NULL;
1948 static int uhci_reset(struct usb_hcd *hcd)
1950 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1952 uhci->io_addr = (unsigned long) hcd->regs;
1954 /* Kick BIOS off this hardware and reset, so we won't get
1955 * interrupts from any previous setup.
1962 * Allocate a frame list, and then setup the skeleton
1964 * The hardware doesn't really know any difference
1965 * in the queues, but the order does matter for the
1966 * protocols higher up. The order is:
1968 * - any isochronous events handled before any
1969 * of the queues. We don't do that here, because
1970 * we'll create the actual TD entries on demand.
1971 * - The first queue is the interrupt queue.
1972 * - The second queue is the control queue, split into low- and full-speed
1973 * - The third queue is bulk queue.
1974 * - The fourth queue is the bandwidth reclamation queue, which loops back
1975 * to the full-speed control queue.
1977 static int uhci_start(struct usb_hcd *hcd)
1979 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1980 int retval = -EBUSY;
1983 dma_addr_t dma_handle;
1984 struct usb_device *udev;
1985 struct dentry *dentry;
1987 io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
1989 dentry = debugfs_create_file(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, uhci, &uhci_debug_operations);
1991 dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n");
1993 goto err_create_debug_entry;
1995 uhci->dentry = dentry;
1998 uhci->fsbrtimeout = 0;
2000 spin_lock_init(&uhci->schedule_lock);
2001 INIT_LIST_HEAD(&uhci->qh_remove_list);
2003 INIT_LIST_HEAD(&uhci->td_remove_list);
2005 INIT_LIST_HEAD(&uhci->urb_remove_list);
2007 INIT_LIST_HEAD(&uhci->urb_list);
2009 INIT_LIST_HEAD(&uhci->complete_list);
2011 init_waitqueue_head(&uhci->waitqh);
2013 uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2016 dev_err(uhci_dev(uhci), "unable to allocate "
2017 "consistent memory for frame list\n");
2021 memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2023 uhci->fl->dma_handle = dma_handle;
2025 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2026 sizeof(struct uhci_td), 16, 0);
2027 if (!uhci->td_pool) {
2028 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2029 goto err_create_td_pool;
2032 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2033 sizeof(struct uhci_qh), 16, 0);
2034 if (!uhci->qh_pool) {
2035 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2036 goto err_create_qh_pool;
2039 /* Initialize the root hub */
2041 /* UHCI specs says devices must have 2 ports, but goes on to say */
2042 /* they may have more but give no way to determine how many they */
2043 /* have. However, according to the UHCI spec, Bit 7 is always set */
2044 /* to 1. So we try to use this to our advantage */
2045 for (port = 0; port < (io_size - 0x10) / 2; port++) {
2046 unsigned int portstatus;
2048 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2049 if (!(portstatus & 0x0080))
2053 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2055 /* This is experimental so anything less than 2 or greater than 8 is */
2056 /* something weird and we'll ignore it */
2057 if (port < 2 || port > UHCI_RH_MAXCHILD) {
2058 dev_info(uhci_dev(uhci), "port count misdetected? "
2059 "forcing to 2 ports\n");
2063 uhci->rh_numports = port;
2065 udev = usb_alloc_dev(NULL, &hcd->self, 0);
2067 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2068 goto err_alloc_root_hub;
2071 uhci->term_td = uhci_alloc_td(uhci, udev);
2072 if (!uhci->term_td) {
2073 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2074 goto err_alloc_term_td;
2077 for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2078 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2079 if (!uhci->skelqh[i]) {
2080 dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2081 goto err_alloc_skelqh;
2086 * 8 Interrupt queues; link all higher int queues to int1,
2087 * then link int1 to control and control to bulk
2089 uhci->skel_int128_qh->link =
2090 uhci->skel_int64_qh->link =
2091 uhci->skel_int32_qh->link =
2092 uhci->skel_int16_qh->link =
2093 uhci->skel_int8_qh->link =
2094 uhci->skel_int4_qh->link =
2095 uhci->skel_int2_qh->link =
2096 cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2097 uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2099 uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2100 uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2101 uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2103 /* This dummy TD is to work around a bug in Intel PIIX controllers */
2104 uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2105 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2106 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2108 uhci->skel_term_qh->link = UHCI_PTR_TERM;
2109 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2112 * Fill the frame list: make all entries point to the proper
2115 * The interrupt queues will be interleaved as evenly as possible.
2116 * There's not much to be done about period-1 interrupts; they have
2117 * to occur in every frame. But we can schedule period-2 interrupts
2118 * in odd-numbered frames, period-4 interrupts in frames congruent
2119 * to 2 (mod 4), and so on. This way each frame only has two
2120 * interrupt QHs, which will help spread out bandwidth utilization.
2122 for (i = 0; i < UHCI_NUMFRAMES; i++) {
2126 * ffs (Find First bit Set) does exactly what we need:
2127 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6],
2128 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2129 * ffs > 6 => not on any high-period queue, so use
2130 * skel_int1_qh = skelqh[7].
2131 * Add UHCI_NUMFRAMES to insure at least one bit is set.
2133 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2137 /* Only place we don't use the frame list routines */
2138 uhci->fl->frame[i] = UHCI_PTR_QH |
2139 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2143 * Some architectures require a full mb() to enforce completion of
2144 * the memory writes above before the I/O transfers in start_hc().
2147 if ((retval = start_hc(uhci)) != 0)
2148 goto err_alloc_skelqh;
2150 init_stall_timer(hcd);
2152 udev->speed = USB_SPEED_FULL;
2154 if (hcd_register_root(udev, hcd) != 0) {
2155 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2157 goto err_start_root_hub;
2168 del_timer_sync(&uhci->stall_timer);
2171 for (i = 0; i < UHCI_NUM_SKELQH; i++)
2172 if (uhci->skelqh[i]) {
2173 uhci_free_qh(uhci, uhci->skelqh[i]);
2174 uhci->skelqh[i] = NULL;
2177 uhci_free_td(uhci, uhci->term_td);
2178 uhci->term_td = NULL;
2184 dma_pool_destroy(uhci->qh_pool);
2185 uhci->qh_pool = NULL;
2188 dma_pool_destroy(uhci->td_pool);
2189 uhci->td_pool = NULL;
2192 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2193 uhci->fl, uhci->fl->dma_handle);
2197 debugfs_remove(uhci->dentry);
2198 uhci->dentry = NULL;
2200 err_create_debug_entry:
2204 static void uhci_stop(struct usb_hcd *hcd)
2206 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2208 del_timer_sync(&uhci->stall_timer);
2211 * At this point, we're guaranteed that no new connects can be made
2212 * to this bus since there are no more parents
2217 spin_lock_irq(&uhci->schedule_lock);
2218 uhci_free_pending_qhs(uhci);
2219 uhci_free_pending_tds(uhci);
2220 uhci_remove_pending_urbps(uhci);
2221 uhci_finish_completion(hcd, NULL);
2223 uhci_free_pending_qhs(uhci);
2224 uhci_free_pending_tds(uhci);
2225 spin_unlock_irq(&uhci->schedule_lock);
2227 /* Wake up anyone waiting for an URB to complete */
2228 wake_up_all(&uhci->waitqh);
2234 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2236 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2238 /* Don't try to suspend broken motherboards, reset instead */
2239 if (suspend_allowed(uhci)) {
2241 uhci->saved_framenumber =
2242 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2248 static int uhci_resume(struct usb_hcd *hcd)
2250 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2253 pci_set_master(to_pci_dev(uhci_dev(uhci)));
2255 if (uhci->state == UHCI_SUSPENDED) {
2258 * Some systems don't maintain the UHCI register values
2259 * during a PM suspend/resume cycle, so reinitialize
2260 * the Frame Number, Framelist Base Address, Interrupt
2261 * Enable, and Legacy Support registers.
2263 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2265 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2266 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2267 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2268 USBINTR_SP, uhci->io_addr + USBINTR);
2269 uhci->resume_detect = 1;
2270 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2274 if ((rc = start_hc(uhci)) != 0)
2277 hcd->state = USB_STATE_RUNNING;
2282 /* Wait until all the URBs for a particular device/endpoint are gone */
2283 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2284 struct usb_host_endpoint *ep)
2286 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2288 wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list));
2291 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2293 return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2296 static const char hcd_name[] = "uhci_hcd";
2298 static const struct hc_driver uhci_driver = {
2299 .description = hcd_name,
2300 .product_desc = "UHCI Host Controller",
2301 .hcd_priv_size = sizeof(struct uhci_hcd),
2303 /* Generic hardware linkage */
2307 /* Basic lifecycle operations */
2308 .reset = uhci_reset,
2309 .start = uhci_start,
2311 .suspend = uhci_suspend,
2312 .resume = uhci_resume,
2316 .urb_enqueue = uhci_urb_enqueue,
2317 .urb_dequeue = uhci_urb_dequeue,
2319 .endpoint_disable = uhci_hcd_endpoint_disable,
2320 .get_frame_number = uhci_hcd_get_frame_number,
2322 .hub_status_data = uhci_hub_status_data,
2323 .hub_control = uhci_hub_control,
2326 static const struct pci_device_id uhci_pci_ids[] = { {
2327 /* handle any USB UHCI controller */
2328 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2329 .driver_data = (unsigned long) &uhci_driver,
2330 }, { /* end: all zeroes */ }
2333 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2335 static struct pci_driver uhci_pci_driver = {
2336 .name = (char *)hcd_name,
2337 .id_table = uhci_pci_ids,
2339 .probe = usb_hcd_pci_probe,
2340 .remove = usb_hcd_pci_remove,
2343 .suspend = usb_hcd_pci_suspend,
2344 .resume = usb_hcd_pci_resume,
2348 static int __init uhci_hcd_init(void)
2350 int retval = -ENOMEM;
2352 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2358 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2363 uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
2364 if (!uhci_debugfs_root)
2367 uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2368 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2369 if (!uhci_up_cachep)
2372 retval = pci_register_driver(&uhci_pci_driver);
2379 if (kmem_cache_destroy(uhci_up_cachep))
2380 warn("not all urb_priv's were freed!");
2383 debugfs_remove(uhci_debugfs_root);
2394 static void __exit uhci_hcd_cleanup(void)
2396 pci_unregister_driver(&uhci_pci_driver);
2398 if (kmem_cache_destroy(uhci_up_cachep))
2399 warn("not all urb_priv's were freed!");
2401 debugfs_remove(uhci_debugfs_root);
2407 module_init(uhci_hcd_init);
2408 module_exit(uhci_hcd_cleanup);
2410 MODULE_AUTHOR(DRIVER_AUTHOR);
2411 MODULE_DESCRIPTION(DRIVER_DESC);
2412 MODULE_LICENSE("GPL");