vserver 1.9.5.x5
[linux-2.6.git] / drivers / usb / host / uhci-hcd.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17  *
18  * Intel documents this fairly well, and as far as I know there
19  * are no royalties or anything like that, but even so there are
20  * people who decided that they want to do the same thing in a
21  * completely different way.
22  *
23  * WARNING! The USB documentation is downright evil. Most of it
24  * is just crap, written by a committee. You're better off ignoring
25  * most of it, the important stuff is:
26  *  - the low-level protocol (fairly simple but lots of small details)
27  *  - working around the horridness of the rest
28  */
29
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
32 #define DEBUG
33 #else
34 #undef DEBUG
35 #endif
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/debugfs.h>
50 #include <linux/pm.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54 #include <linux/bitops.h>
55
56 #include <asm/uaccess.h>
57 #include <asm/io.h>
58 #include <asm/irq.h>
59 #include <asm/system.h>
60
61 #include "../core/hcd.h"
62 #include "uhci-hcd.h"
63
64 /*
65  * Version Information
66  */
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
70 Alan Stern"
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
72
73 /*
74  * debug = 0, no debugging messages
75  * debug = 1, dump failed URB's except for stalls
76  * debug = 2, dump all failed URB's (including stalls)
77  *            show all queues in /debug/uhci/[pci_addr]
78  * debug = 3, show all TD's in URB's when dumping
79  */
80 #ifdef DEBUG
81 static int debug = 1;
82 #else
83 static int debug = 0;
84 #endif
85 module_param(debug, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(debug, "Debug level");
87 static char *errbuf;
88 #define ERRBUF_LEN    (32 * 1024)
89
90 #include "uhci-hub.c"
91 #include "uhci-debug.c"
92
93 static kmem_cache_t *uhci_up_cachep;    /* urb_priv */
94
95 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
102
103 static void hc_state_transitions(struct uhci_hcd *uhci);
104
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT    msecs_to_jiffies(50)
107 #define FSBR_DELAY      msecs_to_jiffies(50)
108
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
113
114 /*
115  * Technically, updating td->status here is a race, but it's not really a
116  * problem. The worst that can happen is that we set the IOC bit again
117  * generating a spurious interrupt. We could fix this by creating another
118  * QH and leaving the IOC bit always set, but then we would have to play
119  * games with the FSBR code to make sure we get the correct order in all
120  * the cases. I don't think it's worth the effort
121  */
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
123 {
124         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
125 }
126
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
128 {
129         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
130 }
131
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci, 
133                                         struct urb_priv *urbp)
134 {
135         list_move_tail(&urbp->urb_list, &uhci->complete_list);
136 }
137
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
139 {
140         dma_addr_t dma_handle;
141         struct uhci_td *td;
142
143         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
144         if (!td)
145                 return NULL;
146
147         td->dma_handle = dma_handle;
148
149         td->link = UHCI_PTR_TERM;
150         td->buffer = 0;
151
152         td->frame = -1;
153         td->dev = dev;
154
155         INIT_LIST_HEAD(&td->list);
156         INIT_LIST_HEAD(&td->remove_list);
157         INIT_LIST_HEAD(&td->fl_list);
158
159         usb_get_dev(dev);
160
161         return td;
162 }
163
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165                 u32 token, u32 buffer)
166 {
167         td->status = cpu_to_le32(status);
168         td->token = cpu_to_le32(token);
169         td->buffer = cpu_to_le32(buffer);
170 }
171
172 /*
173  * We insert Isochronous URB's directly into the frame list at the beginning
174  */
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
176 {
177         framenum &= (UHCI_NUMFRAMES - 1);
178
179         td->frame = framenum;
180
181         /* Is there a TD already mapped there? */
182         if (uhci->fl->frame_cpu[framenum]) {
183                 struct uhci_td *ftd, *ltd;
184
185                 ftd = uhci->fl->frame_cpu[framenum];
186                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
187
188                 list_add_tail(&td->fl_list, &ftd->fl_list);
189
190                 td->link = ltd->link;
191                 wmb();
192                 ltd->link = cpu_to_le32(td->dma_handle);
193         } else {
194                 td->link = uhci->fl->frame[framenum];
195                 wmb();
196                 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197                 uhci->fl->frame_cpu[framenum] = td;
198         }
199 }
200
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
202 {
203         /* If it's not inserted, don't remove it */
204         if (td->frame == -1 && list_empty(&td->fl_list))
205                 return;
206
207         if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208                 if (list_empty(&td->fl_list)) {
209                         uhci->fl->frame[td->frame] = td->link;
210                         uhci->fl->frame_cpu[td->frame] = NULL;
211                 } else {
212                         struct uhci_td *ntd;
213
214                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215                         uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216                         uhci->fl->frame_cpu[td->frame] = ntd;
217                 }
218         } else {
219                 struct uhci_td *ptd;
220
221                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222                 ptd->link = td->link;
223         }
224
225         wmb();
226         td->link = UHCI_PTR_TERM;
227
228         list_del_init(&td->fl_list);
229         td->frame = -1;
230 }
231
232 /*
233  * Inserts a td list into qh.
234  */
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
236 {
237         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
238         struct uhci_td *td;
239         __le32 *plink;
240
241         /* Ordering isn't important here yet since the QH hasn't been */
242         /* inserted into the schedule yet */
243         plink = &qh->element;
244         list_for_each_entry(td, &urbp->td_list, list) {
245                 *plink = cpu_to_le32(td->dma_handle) | breadth;
246                 plink = &td->link;
247         }
248         *plink = UHCI_PTR_TERM;
249 }
250
251 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
252 {
253         if (!list_empty(&td->list))
254                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
255         if (!list_empty(&td->remove_list))
256                 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
257         if (!list_empty(&td->fl_list))
258                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
259
260         if (td->dev)
261                 usb_put_dev(td->dev);
262
263         dma_pool_free(uhci->td_pool, td, td->dma_handle);
264 }
265
266 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
267 {
268         dma_addr_t dma_handle;
269         struct uhci_qh *qh;
270
271         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
272         if (!qh)
273                 return NULL;
274
275         qh->dma_handle = dma_handle;
276
277         qh->element = UHCI_PTR_TERM;
278         qh->link = UHCI_PTR_TERM;
279
280         qh->dev = dev;
281         qh->urbp = NULL;
282
283         INIT_LIST_HEAD(&qh->list);
284         INIT_LIST_HEAD(&qh->remove_list);
285
286         usb_get_dev(dev);
287
288         return qh;
289 }
290
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
292 {
293         if (!list_empty(&qh->list))
294                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
295         if (!list_empty(&qh->remove_list))
296                 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
297
298         if (qh->dev)
299                 usb_put_dev(qh->dev);
300
301         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
302 }
303
304 /*
305  * Append this urb's qh after the last qh in skelqh->list
306  *
307  * Note that urb_priv.queue_list doesn't have a separate queue head;
308  * it's a ring with every element "live".
309  */
310 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
311 {
312         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
313         struct urb_priv *turbp;
314         struct uhci_qh *lqh;
315
316         /* Grab the last QH */
317         lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
318
319         /* Point to the next skelqh */
320         urbp->qh->link = lqh->link;
321         wmb();                          /* Ordering is important */
322
323         /*
324          * Patch QHs for previous endpoint's queued URBs?  HC goes
325          * here next, not to the next skelqh it now points to.
326          *
327          *    lqh --> td ... --> qh ... --> td --> qh ... --> td
328          *     |                 |                 |
329          *     v                 v                 v
330          *     +<----------------+-----------------+
331          *     v
332          *    newqh --> td ... --> td
333          *     |
334          *     v
335          *    ...
336          *
337          * The HC could see (and use!) any of these as we write them.
338          */
339         lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
340         if (lqh->urbp) {
341                 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
342                         turbp->qh->link = lqh->link;
343         }
344
345         list_add_tail(&urbp->qh->list, &skelqh->list);
346 }
347
348 /*
349  * Start removal of QH from schedule; it finishes next frame.
350  * TDs should be unlinked before this is called.
351  */
352 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
353 {
354         struct uhci_qh *pqh;
355         __le32 newlink;
356         unsigned int age;
357
358         if (!qh)
359                 return;
360
361         /*
362          * Only go through the hoops if it's actually linked in
363          */
364         if (!list_empty(&qh->list)) {
365
366                 /* If our queue is nonempty, make the next URB the head */
367                 if (!list_empty(&qh->urbp->queue_list)) {
368                         struct urb_priv *nurbp;
369
370                         nurbp = list_entry(qh->urbp->queue_list.next,
371                                         struct urb_priv, queue_list);
372                         nurbp->queued = 0;
373                         list_add(&nurbp->qh->list, &qh->list);
374                         newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
375                 } else
376                         newlink = qh->link;
377
378                 /* Fix up the previous QH's queue to link to either
379                  * the new head of this queue or the start of the
380                  * next endpoint's queue. */
381                 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
382                 pqh->link = newlink;
383                 if (pqh->urbp) {
384                         struct urb_priv *turbp;
385
386                         list_for_each_entry(turbp, &pqh->urbp->queue_list,
387                                         queue_list)
388                                 turbp->qh->link = newlink;
389                 }
390                 wmb();
391
392                 /* Leave qh->link in case the HC is on the QH now, it will */
393                 /* continue the rest of the schedule */
394                 qh->element = UHCI_PTR_TERM;
395
396                 list_del_init(&qh->list);
397         }
398
399         list_del_init(&qh->urbp->queue_list);
400         qh->urbp = NULL;
401
402         age = uhci_get_current_frame_number(uhci);
403         if (age != uhci->qh_remove_age) {
404                 uhci_free_pending_qhs(uhci);
405                 uhci->qh_remove_age = age;
406         }
407
408         /* Check to see if the remove list is empty. Set the IOC bit */
409         /* to force an interrupt so we can remove the QH */
410         if (list_empty(&uhci->qh_remove_list))
411                 uhci_set_next_interrupt(uhci);
412
413         list_add(&qh->remove_list, &uhci->qh_remove_list);
414 }
415
416 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
417 {
418         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
419         struct uhci_td *td;
420
421         list_for_each_entry(td, &urbp->td_list, list) {
422                 if (toggle)
423                         td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
424                 else
425                         td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
426
427                 toggle ^= 1;
428         }
429
430         return toggle;
431 }
432
433 /* This function will append one URB's QH to another URB's QH. This is for */
434 /* queuing interrupt, control or bulk transfers */
435 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
436 {
437         struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
438         struct uhci_td *lltd;
439
440         eurbp = eurb->hcpriv;
441         urbp = urb->hcpriv;
442
443         /* Find the first URB in the queue */
444         furbp = eurbp;
445         if (eurbp->queued) {
446                 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
447                         if (!furbp->queued)
448                                 break;
449         }
450
451         lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
452
453         lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
454
455         /* Control transfers always start with toggle 0 */
456         if (!usb_pipecontrol(urb->pipe))
457                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
458                                 usb_pipeout(urb->pipe),
459                                 uhci_fixup_toggle(urb,
460                                         uhci_toggle(td_token(lltd)) ^ 1));
461
462         /* All qh's in the queue need to link to the next queue */
463         urbp->qh->link = eurbp->qh->link;
464
465         wmb();                  /* Make sure we flush everything */
466
467         lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
468
469         list_add_tail(&urbp->queue_list, &furbp->queue_list);
470
471         urbp->queued = 1;
472 }
473
474 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
475 {
476         struct urb_priv *urbp, *nurbp, *purbp, *turbp;
477         struct uhci_td *pltd;
478         unsigned int toggle;
479
480         urbp = urb->hcpriv;
481
482         if (list_empty(&urbp->queue_list))
483                 return;
484
485         nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
486
487         /*
488          * Fix up the toggle for the following URBs in the queue.
489          * Only needed for bulk and interrupt: control and isochronous
490          * endpoints don't propagate toggles between messages.
491          */
492         if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
493                 if (!urbp->queued)
494                         /* We just set the toggle in uhci_unlink_generic */
495                         toggle = usb_gettoggle(urb->dev,
496                                         usb_pipeendpoint(urb->pipe),
497                                         usb_pipeout(urb->pipe));
498                 else {
499                         /* If we're in the middle of the queue, grab the */
500                         /* toggle from the TD previous to us */
501                         purbp = list_entry(urbp->queue_list.prev,
502                                         struct urb_priv, queue_list);
503                         pltd = list_entry(purbp->td_list.prev,
504                                         struct uhci_td, list);
505                         toggle = uhci_toggle(td_token(pltd)) ^ 1;
506                 }
507
508                 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
509                         if (!turbp->queued)
510                                 break;
511                         toggle = uhci_fixup_toggle(turbp->urb, toggle);
512                 }
513
514                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
515                                 usb_pipeout(urb->pipe), toggle);
516         }
517
518         if (urbp->queued) {
519                 /* We're somewhere in the middle (or end).  The case where
520                  * we're at the head is handled in uhci_remove_qh(). */
521                 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
522                                 queue_list);
523
524                 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
525                 if (nurbp->queued)
526                         pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
527                 else
528                         /* The next URB happens to be the beginning, so */
529                         /*  we're the last, end the chain */
530                         pltd->link = UHCI_PTR_TERM;
531         }
532
533         /* urbp->queue_list is handled in uhci_remove_qh() */
534 }
535
536 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
537 {
538         struct urb_priv *urbp;
539
540         urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
541         if (!urbp)
542                 return NULL;
543
544         memset((void *)urbp, 0, sizeof(*urbp));
545
546         urbp->inserttime = jiffies;
547         urbp->fsbrtime = jiffies;
548         urbp->urb = urb;
549         
550         INIT_LIST_HEAD(&urbp->td_list);
551         INIT_LIST_HEAD(&urbp->queue_list);
552         INIT_LIST_HEAD(&urbp->urb_list);
553
554         list_add_tail(&urbp->urb_list, &uhci->urb_list);
555
556         urb->hcpriv = urbp;
557
558         return urbp;
559 }
560
561 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
562 {
563         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
564
565         td->urb = urb;
566
567         list_add_tail(&td->list, &urbp->td_list);
568 }
569
570 static void uhci_remove_td_from_urb(struct uhci_td *td)
571 {
572         if (list_empty(&td->list))
573                 return;
574
575         list_del_init(&td->list);
576
577         td->urb = NULL;
578 }
579
580 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
581 {
582         struct uhci_td *td, *tmp;
583         struct urb_priv *urbp;
584         unsigned int age;
585
586         urbp = (struct urb_priv *)urb->hcpriv;
587         if (!urbp)
588                 return;
589
590         if (!list_empty(&urbp->urb_list))
591                 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
592                                 "or uhci->remove_list!\n", urb);
593
594         age = uhci_get_current_frame_number(uhci);
595         if (age != uhci->td_remove_age) {
596                 uhci_free_pending_tds(uhci);
597                 uhci->td_remove_age = age;
598         }
599
600         /* Check to see if the remove list is empty. Set the IOC bit */
601         /* to force an interrupt so we can remove the TD's*/
602         if (list_empty(&uhci->td_remove_list))
603                 uhci_set_next_interrupt(uhci);
604
605         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
606                 uhci_remove_td_from_urb(td);
607                 uhci_remove_td(uhci, td);
608                 list_add(&td->remove_list, &uhci->td_remove_list);
609         }
610
611         urb->hcpriv = NULL;
612         kmem_cache_free(uhci_up_cachep, urbp);
613 }
614
615 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
616 {
617         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
618
619         if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
620                 urbp->fsbr = 1;
621                 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
622                         uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
623         }
624 }
625
626 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
627 {
628         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
629
630         if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
631                 urbp->fsbr = 0;
632                 if (!--uhci->fsbr)
633                         uhci->fsbrtimeout = jiffies + FSBR_DELAY;
634         }
635 }
636
637 /*
638  * Map status to standard result codes
639  *
640  * <status> is (td_status(td) & 0xF60000), a.k.a.
641  * uhci_status_bits(td_status(td)).
642  * Note: <status> does not include the TD_CTRL_NAK bit.
643  * <dir_out> is True for output TDs and False for input TDs.
644  */
645 static int uhci_map_status(int status, int dir_out)
646 {
647         if (!status)
648                 return 0;
649         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
650                 return -EPROTO;
651         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
652                 if (dir_out)
653                         return -EPROTO;
654                 else
655                         return -EILSEQ;
656         }
657         if (status & TD_CTRL_BABBLE)                    /* Babble */
658                 return -EOVERFLOW;
659         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
660                 return -ENOSR;
661         if (status & TD_CTRL_STALLED)                   /* Stalled */
662                 return -EPIPE;
663         WARN_ON(status & TD_CTRL_ACTIVE);               /* Active */
664         return 0;
665 }
666
667 /*
668  * Control transfers
669  */
670 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
671 {
672         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
673         struct uhci_td *td;
674         struct uhci_qh *qh, *skelqh;
675         unsigned long destination, status;
676         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
677         int len = urb->transfer_buffer_length;
678         dma_addr_t data = urb->transfer_dma;
679
680         /* The "pipe" thing contains the destination in bits 8--18 */
681         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
682
683         /* 3 errors */
684         status = TD_CTRL_ACTIVE | uhci_maxerr(3);
685         if (urb->dev->speed == USB_SPEED_LOW)
686                 status |= TD_CTRL_LS;
687
688         /*
689          * Build the TD for the control request setup packet
690          */
691         td = uhci_alloc_td(uhci, urb->dev);
692         if (!td)
693                 return -ENOMEM;
694
695         uhci_add_td_to_urb(urb, td);
696         uhci_fill_td(td, status, destination | uhci_explen(7),
697                 urb->setup_dma);
698
699         /*
700          * If direction is "send", change the packet ID from SETUP (0x2D)
701          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
702          * set Short Packet Detect (SPD) for all data packets.
703          */
704         if (usb_pipeout(urb->pipe))
705                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
706         else {
707                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
708                 status |= TD_CTRL_SPD;
709         }
710
711         /*
712          * Build the DATA TD's
713          */
714         while (len > 0) {
715                 int pktsze = len;
716
717                 if (pktsze > maxsze)
718                         pktsze = maxsze;
719
720                 td = uhci_alloc_td(uhci, urb->dev);
721                 if (!td)
722                         return -ENOMEM;
723
724                 /* Alternate Data0/1 (start with Data1) */
725                 destination ^= TD_TOKEN_TOGGLE;
726         
727                 uhci_add_td_to_urb(urb, td);
728                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
729                         data);
730
731                 data += pktsze;
732                 len -= pktsze;
733         }
734
735         /*
736          * Build the final TD for control status 
737          */
738         td = uhci_alloc_td(uhci, urb->dev);
739         if (!td)
740                 return -ENOMEM;
741
742         /*
743          * It's IN if the pipe is an output pipe or we're not expecting
744          * data back.
745          */
746         destination &= ~TD_TOKEN_PID_MASK;
747         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
748                 destination |= USB_PID_IN;
749         else
750                 destination |= USB_PID_OUT;
751
752         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
753
754         status &= ~TD_CTRL_SPD;
755
756         uhci_add_td_to_urb(urb, td);
757         uhci_fill_td(td, status | TD_CTRL_IOC,
758                 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
759
760         qh = uhci_alloc_qh(uhci, urb->dev);
761         if (!qh)
762                 return -ENOMEM;
763
764         urbp->qh = qh;
765         qh->urbp = urbp;
766
767         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
768
769         /* Low-speed transfers get a different queue, and won't hog the bus.
770          * Also, some devices enumerate better without FSBR; the easiest way
771          * to do that is to put URBs on the low-speed queue while the device
772          * is in the DEFAULT state. */
773         if (urb->dev->speed == USB_SPEED_LOW ||
774                         urb->dev->state == USB_STATE_DEFAULT)
775                 skelqh = uhci->skel_ls_control_qh;
776         else {
777                 skelqh = uhci->skel_fs_control_qh;
778                 uhci_inc_fsbr(uhci, urb);
779         }
780
781         if (eurb)
782                 uhci_append_queued_urb(uhci, eurb, urb);
783         else
784                 uhci_insert_qh(uhci, skelqh, urb);
785
786         return -EINPROGRESS;
787 }
788
789 /*
790  * If control-IN transfer was short, the status packet wasn't sent.
791  * This routine changes the element pointer in the QH to point at the
792  * status TD.  It's safe to do this even while the QH is live, because
793  * the hardware only updates the element pointer following a successful
794  * transfer.  The inactive TD for the short packet won't cause an update,
795  * so the pointer won't get overwritten.  The next time the controller
796  * sees this QH, it will send the status packet.
797  */
798 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
799 {
800         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
801         struct uhci_td *td;
802
803         urbp->short_control_packet = 1;
804
805         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
806         urbp->qh->element = cpu_to_le32(td->dma_handle);
807
808         return -EINPROGRESS;
809 }
810
811
812 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
813 {
814         struct list_head *tmp, *head;
815         struct urb_priv *urbp = urb->hcpriv;
816         struct uhci_td *td;
817         unsigned int status;
818         int ret = 0;
819
820         if (list_empty(&urbp->td_list))
821                 return -EINVAL;
822
823         head = &urbp->td_list;
824
825         if (urbp->short_control_packet) {
826                 tmp = head->prev;
827                 goto status_stage;
828         }
829
830         tmp = head->next;
831         td = list_entry(tmp, struct uhci_td, list);
832
833         /* The first TD is the SETUP stage, check the status, but skip */
834         /*  the count */
835         status = uhci_status_bits(td_status(td));
836         if (status & TD_CTRL_ACTIVE)
837                 return -EINPROGRESS;
838
839         if (status)
840                 goto td_error;
841
842         urb->actual_length = 0;
843
844         /* The rest of the TD's (but the last) are data */
845         tmp = tmp->next;
846         while (tmp != head && tmp->next != head) {
847                 unsigned int ctrlstat;
848
849                 td = list_entry(tmp, struct uhci_td, list);
850                 tmp = tmp->next;
851
852                 ctrlstat = td_status(td);
853                 status = uhci_status_bits(ctrlstat);
854                 if (status & TD_CTRL_ACTIVE)
855                         return -EINPROGRESS;
856
857                 urb->actual_length += uhci_actual_length(ctrlstat);
858
859                 if (status)
860                         goto td_error;
861
862                 /* Check to see if we received a short packet */
863                 if (uhci_actual_length(ctrlstat) <
864                                 uhci_expected_length(td_token(td))) {
865                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
866                                 ret = -EREMOTEIO;
867                                 goto err;
868                         }
869
870                         if (uhci_packetid(td_token(td)) == USB_PID_IN)
871                                 return usb_control_retrigger_status(uhci, urb);
872                         else
873                                 return 0;
874                 }
875         }
876
877 status_stage:
878         td = list_entry(tmp, struct uhci_td, list);
879
880         /* Control status stage */
881         status = td_status(td);
882
883 #ifdef I_HAVE_BUGGY_APC_BACKUPS
884         /* APC BackUPS Pro kludge */
885         /* It tries to send all of the descriptor instead of the amount */
886         /*  we requested */
887         if (status & TD_CTRL_IOC &&     /* IOC is masked out by uhci_status_bits */
888             status & TD_CTRL_ACTIVE &&
889             status & TD_CTRL_NAK)
890                 return 0;
891 #endif
892
893         status = uhci_status_bits(status);
894         if (status & TD_CTRL_ACTIVE)
895                 return -EINPROGRESS;
896
897         if (status)
898                 goto td_error;
899
900         return 0;
901
902 td_error:
903         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
904
905 err:
906         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
907                 /* Some debugging code */
908                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
909                                 __FUNCTION__, status);
910
911                 if (errbuf) {
912                         /* Print the chain for debugging purposes */
913                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
914
915                         lprintk(errbuf);
916                 }
917         }
918
919         return ret;
920 }
921
922 /*
923  * Common submit for bulk and interrupt
924  */
925 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
926 {
927         struct uhci_td *td;
928         struct uhci_qh *qh;
929         unsigned long destination, status;
930         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
931         int len = urb->transfer_buffer_length;
932         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
933         dma_addr_t data = urb->transfer_dma;
934
935         if (len < 0)
936                 return -EINVAL;
937
938         /* The "pipe" thing contains the destination in bits 8--18 */
939         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
940
941         status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
942         if (urb->dev->speed == USB_SPEED_LOW)
943                 status |= TD_CTRL_LS;
944         if (usb_pipein(urb->pipe))
945                 status |= TD_CTRL_SPD;
946
947         /*
948          * Build the DATA TD's
949          */
950         do {    /* Allow zero length packets */
951                 int pktsze = maxsze;
952
953                 if (pktsze >= len) {
954                         pktsze = len;
955                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
956                                 status &= ~TD_CTRL_SPD;
957                 }
958
959                 td = uhci_alloc_td(uhci, urb->dev);
960                 if (!td)
961                         return -ENOMEM;
962
963                 uhci_add_td_to_urb(urb, td);
964                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
965                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
966                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
967                         data);
968
969                 data += pktsze;
970                 len -= maxsze;
971
972                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
973                         usb_pipeout(urb->pipe));
974         } while (len > 0);
975
976         /*
977          * URB_ZERO_PACKET means adding a 0-length packet, if direction
978          * is OUT and the transfer_length was an exact multiple of maxsze,
979          * hence (len = transfer_length - N * maxsze) == 0
980          * however, if transfer_length == 0, the zero packet was already
981          * prepared above.
982          */
983         if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
984             !len && urb->transfer_buffer_length) {
985                 td = uhci_alloc_td(uhci, urb->dev);
986                 if (!td)
987                         return -ENOMEM;
988
989                 uhci_add_td_to_urb(urb, td);
990                 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
991                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
992                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
993                         data);
994
995                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
996                         usb_pipeout(urb->pipe));
997         }
998
999         /* Set the interrupt-on-completion flag on the last packet.
1000          * A more-or-less typical 4 KB URB (= size of one memory page)
1001          * will require about 3 ms to transfer; that's a little on the
1002          * fast side but not enough to justify delaying an interrupt
1003          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1004          * flag setting. */
1005         td->status |= cpu_to_le32(TD_CTRL_IOC);
1006
1007         qh = uhci_alloc_qh(uhci, urb->dev);
1008         if (!qh)
1009                 return -ENOMEM;
1010
1011         urbp->qh = qh;
1012         qh->urbp = urbp;
1013
1014         /* Always breadth first */
1015         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1016
1017         if (eurb)
1018                 uhci_append_queued_urb(uhci, eurb, urb);
1019         else
1020                 uhci_insert_qh(uhci, skelqh, urb);
1021
1022         return -EINPROGRESS;
1023 }
1024
1025 /*
1026  * Common result for bulk and interrupt
1027  */
1028 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1029 {
1030         struct urb_priv *urbp = urb->hcpriv;
1031         struct uhci_td *td;
1032         unsigned int status = 0;
1033         int ret = 0;
1034
1035         urb->actual_length = 0;
1036
1037         list_for_each_entry(td, &urbp->td_list, list) {
1038                 unsigned int ctrlstat = td_status(td);
1039
1040                 status = uhci_status_bits(ctrlstat);
1041                 if (status & TD_CTRL_ACTIVE)
1042                         return -EINPROGRESS;
1043
1044                 urb->actual_length += uhci_actual_length(ctrlstat);
1045
1046                 if (status)
1047                         goto td_error;
1048
1049                 if (uhci_actual_length(ctrlstat) <
1050                                 uhci_expected_length(td_token(td))) {
1051                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1052                                 ret = -EREMOTEIO;
1053                                 goto err;
1054                         } else
1055                                 return 0;
1056                 }
1057         }
1058
1059         return 0;
1060
1061 td_error:
1062         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1063
1064 err:
1065         /* 
1066          * Enable this chunk of code if you want to see some more debugging.
1067          * But be careful, it has the tendancy to starve out khubd and prevent
1068          * disconnects from happening successfully if you have a slow debug
1069          * log interface (like a serial console.
1070          */
1071 #if 0
1072         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1073                 /* Some debugging code */
1074                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1075                                 __FUNCTION__, status);
1076
1077                 if (errbuf) {
1078                         /* Print the chain for debugging purposes */
1079                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1080
1081                         lprintk(errbuf);
1082                 }
1083         }
1084 #endif
1085         return ret;
1086 }
1087
1088 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1089 {
1090         int ret;
1091
1092         /* Can't have low-speed bulk transfers */
1093         if (urb->dev->speed == USB_SPEED_LOW)
1094                 return -EINVAL;
1095
1096         ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1097         if (ret == -EINPROGRESS)
1098                 uhci_inc_fsbr(uhci, urb);
1099
1100         return ret;
1101 }
1102
1103 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1104 {
1105         /* USB 1.1 interrupt transfers only involve one packet per interval;
1106          * that's the uhci_submit_common() "breadth first" policy.  Drivers
1107          * can submit urbs of any length, but longer ones might need many
1108          * intervals to complete.
1109          */
1110         return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1111 }
1112
1113 /*
1114  * Isochronous transfers
1115  */
1116 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1117 {
1118         struct urb *last_urb = NULL;
1119         struct urb_priv *up;
1120         int ret = 0;
1121
1122         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1123                 struct urb *u = up->urb;
1124
1125                 /* look for pending URB's with identical pipe handle */
1126                 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1127                     (u->status == -EINPROGRESS) && (u != urb)) {
1128                         if (!last_urb)
1129                                 *start = u->start_frame;
1130                         last_urb = u;
1131                 }
1132         }
1133
1134         if (last_urb) {
1135                 *end = (last_urb->start_frame + last_urb->number_of_packets *
1136                                 last_urb->interval) & (UHCI_NUMFRAMES-1);
1137                 ret = 0;
1138         } else
1139                 ret = -1;       /* no previous urb found */
1140
1141         return ret;
1142 }
1143
1144 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1145 {
1146         int limits;
1147         unsigned int start = 0, end = 0;
1148
1149         if (urb->number_of_packets > 900)       /* 900? Why? */
1150                 return -EFBIG;
1151
1152         limits = isochronous_find_limits(uhci, urb, &start, &end);
1153
1154         if (urb->transfer_flags & URB_ISO_ASAP) {
1155                 if (limits)
1156                         urb->start_frame =
1157                                         (uhci_get_current_frame_number(uhci) +
1158                                                 10) & (UHCI_NUMFRAMES - 1);
1159                 else
1160                         urb->start_frame = end;
1161         } else {
1162                 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1163                 /* FIXME: Sanity check */
1164         }
1165
1166         return 0;
1167 }
1168
1169 /*
1170  * Isochronous transfers
1171  */
1172 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1173 {
1174         struct uhci_td *td;
1175         int i, ret, frame;
1176         int status, destination;
1177
1178         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1179         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1180
1181         ret = isochronous_find_start(uhci, urb);
1182         if (ret)
1183                 return ret;
1184
1185         frame = urb->start_frame;
1186         for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1187                 if (!urb->iso_frame_desc[i].length)
1188                         continue;
1189
1190                 td = uhci_alloc_td(uhci, urb->dev);
1191                 if (!td)
1192                         return -ENOMEM;
1193
1194                 uhci_add_td_to_urb(urb, td);
1195                 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1196                         urb->transfer_dma + urb->iso_frame_desc[i].offset);
1197
1198                 if (i + 1 >= urb->number_of_packets)
1199                         td->status |= cpu_to_le32(TD_CTRL_IOC);
1200
1201                 uhci_insert_td_frame_list(uhci, td, frame);
1202         }
1203
1204         return -EINPROGRESS;
1205 }
1206
1207 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1208 {
1209         struct uhci_td *td;
1210         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1211         int status;
1212         int i, ret = 0;
1213
1214         urb->actual_length = 0;
1215
1216         i = 0;
1217         list_for_each_entry(td, &urbp->td_list, list) {
1218                 int actlength;
1219                 unsigned int ctrlstat = td_status(td);
1220
1221                 if (ctrlstat & TD_CTRL_ACTIVE)
1222                         return -EINPROGRESS;
1223
1224                 actlength = uhci_actual_length(ctrlstat);
1225                 urb->iso_frame_desc[i].actual_length = actlength;
1226                 urb->actual_length += actlength;
1227
1228                 status = uhci_map_status(uhci_status_bits(ctrlstat),
1229                                 usb_pipeout(urb->pipe));
1230                 urb->iso_frame_desc[i].status = status;
1231                 if (status) {
1232                         urb->error_count++;
1233                         ret = status;
1234                 }
1235
1236                 i++;
1237         }
1238
1239         return ret;
1240 }
1241
1242 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1243 {
1244         struct urb_priv *up;
1245
1246         /* We don't match Isoc transfers since they are special */
1247         if (usb_pipeisoc(urb->pipe))
1248                 return NULL;
1249
1250         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1251                 struct urb *u = up->urb;
1252
1253                 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1254                         /* For control, ignore the direction */
1255                         if (usb_pipecontrol(urb->pipe) &&
1256                             (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1257                                 return u;
1258                         else if (u->pipe == urb->pipe)
1259                                 return u;
1260                 }
1261         }
1262
1263         return NULL;
1264 }
1265
1266 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1267                 struct usb_host_endpoint *ep,
1268                 struct urb *urb, int mem_flags)
1269 {
1270         int ret;
1271         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1272         unsigned long flags;
1273         struct urb *eurb;
1274         int bustime;
1275
1276         spin_lock_irqsave(&uhci->schedule_lock, flags);
1277
1278         ret = urb->status;
1279         if (ret != -EINPROGRESS)                /* URB already unlinked! */
1280                 goto out;
1281
1282         eurb = uhci_find_urb_ep(uhci, urb);
1283
1284         if (!uhci_alloc_urb_priv(uhci, urb)) {
1285                 ret = -ENOMEM;
1286                 goto out;
1287         }
1288
1289         switch (usb_pipetype(urb->pipe)) {
1290         case PIPE_CONTROL:
1291                 ret = uhci_submit_control(uhci, urb, eurb);
1292                 break;
1293         case PIPE_INTERRUPT:
1294                 if (!eurb) {
1295                         bustime = usb_check_bandwidth(urb->dev, urb);
1296                         if (bustime < 0)
1297                                 ret = bustime;
1298                         else {
1299                                 ret = uhci_submit_interrupt(uhci, urb, eurb);
1300                                 if (ret == -EINPROGRESS)
1301                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1302                         }
1303                 } else {        /* inherit from parent */
1304                         urb->bandwidth = eurb->bandwidth;
1305                         ret = uhci_submit_interrupt(uhci, urb, eurb);
1306                 }
1307                 break;
1308         case PIPE_BULK:
1309                 ret = uhci_submit_bulk(uhci, urb, eurb);
1310                 break;
1311         case PIPE_ISOCHRONOUS:
1312                 bustime = usb_check_bandwidth(urb->dev, urb);
1313                 if (bustime < 0) {
1314                         ret = bustime;
1315                         break;
1316                 }
1317
1318                 ret = uhci_submit_isochronous(uhci, urb);
1319                 if (ret == -EINPROGRESS)
1320                         usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1321                 break;
1322         }
1323
1324         if (ret != -EINPROGRESS) {
1325                 /* Submit failed, so delete it from the urb_list */
1326                 struct urb_priv *urbp = urb->hcpriv;
1327
1328                 list_del_init(&urbp->urb_list);
1329                 uhci_destroy_urb_priv(uhci, urb);
1330         } else
1331                 ret = 0;
1332
1333 out:
1334         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1335         return ret;
1336 }
1337
1338 /*
1339  * Return the result of a transfer
1340  */
1341 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1342 {
1343         int ret = -EINPROGRESS;
1344         struct urb_priv *urbp;
1345
1346         spin_lock(&urb->lock);
1347
1348         urbp = (struct urb_priv *)urb->hcpriv;
1349
1350         if (urb->status != -EINPROGRESS)        /* URB already dequeued */
1351                 goto out;
1352
1353         switch (usb_pipetype(urb->pipe)) {
1354         case PIPE_CONTROL:
1355                 ret = uhci_result_control(uhci, urb);
1356                 break;
1357         case PIPE_BULK:
1358         case PIPE_INTERRUPT:
1359                 ret = uhci_result_common(uhci, urb);
1360                 break;
1361         case PIPE_ISOCHRONOUS:
1362                 ret = uhci_result_isochronous(uhci, urb);
1363                 break;
1364         }
1365
1366         if (ret == -EINPROGRESS)
1367                 goto out;
1368         urb->status = ret;
1369
1370         switch (usb_pipetype(urb->pipe)) {
1371         case PIPE_CONTROL:
1372         case PIPE_BULK:
1373         case PIPE_ISOCHRONOUS:
1374                 /* Release bandwidth for Interrupt or Isoc. transfers */
1375                 if (urb->bandwidth)
1376                         usb_release_bandwidth(urb->dev, urb, 1);
1377                 uhci_unlink_generic(uhci, urb);
1378                 break;
1379         case PIPE_INTERRUPT:
1380                 /* Release bandwidth for Interrupt or Isoc. transfers */
1381                 /* Make sure we don't release if we have a queued URB */
1382                 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1383                         usb_release_bandwidth(urb->dev, urb, 0);
1384                 else
1385                         /* bandwidth was passed on to queued URB, */
1386                         /* so don't let usb_unlink_urb() release it */
1387                         urb->bandwidth = 0;
1388                 uhci_unlink_generic(uhci, urb);
1389                 break;
1390         default:
1391                 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1392                                 "for urb %p\n",
1393                                 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1394         }
1395
1396         /* Move it from uhci->urb_list to uhci->complete_list */
1397         uhci_moveto_complete(uhci, urbp);
1398
1399 out:
1400         spin_unlock(&urb->lock);
1401 }
1402
1403 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1404 {
1405         struct list_head *head;
1406         struct uhci_td *td;
1407         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1408         int prevactive = 0;
1409
1410         uhci_dec_fsbr(uhci, urb);       /* Safe since it checks */
1411
1412         /*
1413          * Now we need to find out what the last successful toggle was
1414          * so we can update the local data toggle for the next transfer
1415          *
1416          * There are 2 ways the last successful completed TD is found:
1417          *
1418          * 1) The TD is NOT active and the actual length < expected length
1419          * 2) The TD is NOT active and it's the last TD in the chain
1420          *
1421          * and a third way the first uncompleted TD is found:
1422          *
1423          * 3) The TD is active and the previous TD is NOT active
1424          *
1425          * Control and Isochronous ignore the toggle, so this is safe
1426          * for all types
1427          *
1428          * FIXME: The toggle fixups won't be 100% reliable until we
1429          * change over to using a single queue for each endpoint and
1430          * stop the queue before unlinking.
1431          */
1432         head = &urbp->td_list;
1433         list_for_each_entry(td, head, list) {
1434                 unsigned int ctrlstat = td_status(td);
1435
1436                 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
1437                                 (uhci_actual_length(ctrlstat) <
1438                                  uhci_expected_length(td_token(td)) ||
1439                                 td->list.next == head))
1440                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1441                                 uhci_packetout(td_token(td)),
1442                                 uhci_toggle(td_token(td)) ^ 1);
1443                 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
1444                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1445                                 uhci_packetout(td_token(td)),
1446                                 uhci_toggle(td_token(td)));
1447
1448                 prevactive = ctrlstat & TD_CTRL_ACTIVE;
1449         }
1450
1451         uhci_delete_queued_urb(uhci, urb);
1452
1453         /* The interrupt loop will reclaim the QH's */
1454         uhci_remove_qh(uhci, urbp->qh);
1455         urbp->qh = NULL;
1456 }
1457
1458 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1459 {
1460         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1461         unsigned long flags;
1462         struct urb_priv *urbp;
1463         unsigned int age;
1464
1465         spin_lock_irqsave(&uhci->schedule_lock, flags);
1466         urbp = urb->hcpriv;
1467         if (!urbp)                      /* URB was never linked! */
1468                 goto done;
1469         list_del_init(&urbp->urb_list);
1470
1471         uhci_unlink_generic(uhci, urb);
1472
1473         age = uhci_get_current_frame_number(uhci);
1474         if (age != uhci->urb_remove_age) {
1475                 uhci_remove_pending_urbps(uhci);
1476                 uhci->urb_remove_age = age;
1477         }
1478
1479         /* If we're the first, set the next interrupt bit */
1480         if (list_empty(&uhci->urb_remove_list))
1481                 uhci_set_next_interrupt(uhci);
1482         list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1483
1484 done:
1485         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1486         return 0;
1487 }
1488
1489 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1490 {
1491         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1492         struct list_head *head;
1493         struct uhci_td *td;
1494         int count = 0;
1495
1496         uhci_dec_fsbr(uhci, urb);
1497
1498         urbp->fsbr_timeout = 1;
1499
1500         /*
1501          * Ideally we would want to fix qh->element as well, but it's
1502          * read/write by the HC, so that can introduce a race. It's not
1503          * really worth the hassle
1504          */
1505
1506         head = &urbp->td_list;
1507         list_for_each_entry(td, head, list) {
1508                 /*
1509                  * Make sure we don't do the last one (since it'll have the
1510                  * TERM bit set) as well as we skip every so many TD's to
1511                  * make sure it doesn't hog the bandwidth
1512                  */
1513                 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1514                                 (DEPTH_INTERVAL - 1))
1515                         td->link |= UHCI_PTR_DEPTH;
1516
1517                 count++;
1518         }
1519
1520         return 0;
1521 }
1522
1523 /*
1524  * uhci_get_current_frame_number()
1525  *
1526  * returns the current frame number for a USB bus/controller.
1527  */
1528 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1529 {
1530         return inw(uhci->io_addr + USBFRNUM);
1531 }
1532
1533 static int init_stall_timer(struct usb_hcd *hcd);
1534
1535 static void stall_callback(unsigned long ptr)
1536 {
1537         struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1538         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1539         struct urb_priv *up;
1540         unsigned long flags;
1541         int called_uhci_finish_completion = 0;
1542
1543         spin_lock_irqsave(&uhci->schedule_lock, flags);
1544         if (!list_empty(&uhci->urb_remove_list) &&
1545             uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1546                 uhci_remove_pending_urbps(uhci);
1547                 uhci_finish_completion(hcd, NULL);
1548                 called_uhci_finish_completion = 1;
1549         }
1550
1551         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1552                 struct urb *u = up->urb;
1553
1554                 spin_lock(&u->lock);
1555
1556                 /* Check if the FSBR timed out */
1557                 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1558                         uhci_fsbr_timeout(uhci, u);
1559
1560                 spin_unlock(&u->lock);
1561         }
1562         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1563
1564         /* Wake up anyone waiting for an URB to complete */
1565         if (called_uhci_finish_completion)
1566                 wake_up_all(&uhci->waitqh);
1567
1568         /* Really disable FSBR */
1569         if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1570                 uhci->fsbrtimeout = 0;
1571                 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1572         }
1573
1574         /* Poll for and perform state transitions */
1575         hc_state_transitions(uhci);
1576         if (unlikely(uhci->suspended_ports && uhci->state != UHCI_SUSPENDED))
1577                 uhci_check_resume(uhci);
1578
1579         init_stall_timer(hcd);
1580 }
1581
1582 static int init_stall_timer(struct usb_hcd *hcd)
1583 {
1584         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1585
1586         init_timer(&uhci->stall_timer);
1587         uhci->stall_timer.function = stall_callback;
1588         uhci->stall_timer.data = (unsigned long)hcd;
1589         uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
1590         add_timer(&uhci->stall_timer);
1591
1592         return 0;
1593 }
1594
1595 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1596 {
1597         struct uhci_qh *qh, *tmp;
1598
1599         list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1600                 list_del_init(&qh->remove_list);
1601
1602                 uhci_free_qh(uhci, qh);
1603         }
1604 }
1605
1606 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1607 {
1608         struct uhci_td *td, *tmp;
1609
1610         list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1611                 list_del_init(&td->remove_list);
1612
1613                 uhci_free_td(uhci, td);
1614         }
1615 }
1616
1617 static void
1618 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1619 __releases(uhci->schedule_lock)
1620 __acquires(uhci->schedule_lock)
1621 {
1622         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1623
1624         uhci_destroy_urb_priv(uhci, urb);
1625
1626         spin_unlock(&uhci->schedule_lock);
1627         usb_hcd_giveback_urb(hcd, urb, regs);
1628         spin_lock(&uhci->schedule_lock);
1629 }
1630
1631 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1632 {
1633         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1634         struct urb_priv *urbp, *tmp;
1635
1636         list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1637                 struct urb *urb = urbp->urb;
1638
1639                 list_del_init(&urbp->urb_list);
1640                 uhci_finish_urb(hcd, urb, regs);
1641         }
1642 }
1643
1644 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1645 {
1646
1647         /* Splice the urb_remove_list onto the end of the complete_list */
1648         list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1649 }
1650
1651 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1652 {
1653         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1654         unsigned long io_addr = uhci->io_addr;
1655         unsigned short status;
1656         struct urb_priv *urbp, *tmp;
1657         unsigned int age;
1658
1659         /*
1660          * Read the interrupt status, and write it back to clear the
1661          * interrupt cause.  Contrary to the UHCI specification, the
1662          * "HC Halted" status bit is persistent: it is RO, not R/WC.
1663          */
1664         status = inw(io_addr + USBSTS);
1665         if (!(status & ~USBSTS_HCH))    /* shared interrupt, not mine */
1666                 return IRQ_NONE;
1667         outw(status, io_addr + USBSTS);         /* Clear it */
1668
1669         if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1670                 if (status & USBSTS_HSE)
1671                         dev_err(uhci_dev(uhci), "host system error, "
1672                                         "PCI problems?\n");
1673                 if (status & USBSTS_HCPE)
1674                         dev_err(uhci_dev(uhci), "host controller process "
1675                                         "error, something bad happened!\n");
1676                 if ((status & USBSTS_HCH) && uhci->state > 0) {
1677                         dev_err(uhci_dev(uhci), "host controller halted, "
1678                                         "very bad!\n");
1679                         /* FIXME: Reset the controller, fix the offending TD */
1680                 }
1681         }
1682
1683         if (status & USBSTS_RD)
1684                 uhci->resume_detect = 1;
1685
1686         spin_lock(&uhci->schedule_lock);
1687
1688         age = uhci_get_current_frame_number(uhci);
1689         if (age != uhci->qh_remove_age)
1690                 uhci_free_pending_qhs(uhci);
1691         if (age != uhci->td_remove_age)
1692                 uhci_free_pending_tds(uhci);
1693         if (age != uhci->urb_remove_age)
1694                 uhci_remove_pending_urbps(uhci);
1695
1696         if (list_empty(&uhci->urb_remove_list) &&
1697             list_empty(&uhci->td_remove_list) &&
1698             list_empty(&uhci->qh_remove_list))
1699                 uhci_clear_next_interrupt(uhci);
1700         else
1701                 uhci_set_next_interrupt(uhci);
1702
1703         /* Walk the list of pending URBs to see which ones completed
1704          * (must be _safe because uhci_transfer_result() dequeues URBs) */
1705         list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1706                 struct urb *urb = urbp->urb;
1707
1708                 /* Checks the status and does all of the magic necessary */
1709                 uhci_transfer_result(uhci, urb);
1710         }
1711         uhci_finish_completion(hcd, regs);
1712
1713         spin_unlock(&uhci->schedule_lock);
1714
1715         /* Wake up anyone waiting for an URB to complete */
1716         wake_up_all(&uhci->waitqh);
1717
1718         return IRQ_HANDLED;
1719 }
1720
1721 static void reset_hc(struct uhci_hcd *uhci)
1722 {
1723         unsigned long io_addr = uhci->io_addr;
1724
1725         /* Turn off PIRQ, SMI, and all interrupts.  This also turns off
1726          * the BIOS's USB Legacy Support.
1727          */
1728         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
1729         outw(0, uhci->io_addr + USBINTR);
1730
1731         /* Global reset for 50ms */
1732         uhci->state = UHCI_RESET;
1733         outw(USBCMD_GRESET, io_addr + USBCMD);
1734         msleep(50);
1735         outw(0, io_addr + USBCMD);
1736
1737         /* Another 10ms delay */
1738         msleep(10);
1739         uhci->resume_detect = 0;
1740 }
1741
1742 static void suspend_hc(struct uhci_hcd *uhci)
1743 {
1744         unsigned long io_addr = uhci->io_addr;
1745
1746         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1747         uhci->state = UHCI_SUSPENDED;
1748         uhci->resume_detect = 0;
1749         outw(USBCMD_EGSM, io_addr + USBCMD);
1750 }
1751
1752 static void wakeup_hc(struct uhci_hcd *uhci)
1753 {
1754         unsigned long io_addr = uhci->io_addr;
1755
1756         switch (uhci->state) {
1757                 case UHCI_SUSPENDED:            /* Start the resume */
1758                         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1759
1760                         /* Global resume for >= 20ms */
1761                         outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1762                         uhci->state = UHCI_RESUMING_1;
1763                         uhci->state_end = jiffies + msecs_to_jiffies(20);
1764                         break;
1765
1766                 case UHCI_RESUMING_1:           /* End global resume */
1767                         uhci->state = UHCI_RESUMING_2;
1768                         outw(0, io_addr + USBCMD);
1769                         /* Falls through */
1770
1771                 case UHCI_RESUMING_2:           /* Wait for EOP to be sent */
1772                         if (inw(io_addr + USBCMD) & USBCMD_FGR)
1773                                 break;
1774
1775                         /* Run for at least 1 second, and
1776                          * mark it configured with a 64-byte max packet */
1777                         uhci->state = UHCI_RUNNING_GRACE;
1778                         uhci->state_end = jiffies + HZ;
1779                         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1780                                         io_addr + USBCMD);
1781                         break;
1782
1783                 case UHCI_RUNNING_GRACE:        /* Now allowed to suspend */
1784                         uhci->state = UHCI_RUNNING;
1785                         break;
1786
1787                 default:
1788                         break;
1789         }
1790 }
1791
1792 static int ports_active(struct uhci_hcd *uhci)
1793 {
1794         unsigned long io_addr = uhci->io_addr;
1795         int connection = 0;
1796         int i;
1797
1798         for (i = 0; i < uhci->rh_numports; i++)
1799                 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1800
1801         return connection;
1802 }
1803
1804 static int suspend_allowed(struct uhci_hcd *uhci)
1805 {
1806         unsigned long io_addr = uhci->io_addr;
1807         int i;
1808
1809         if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1810                 return 1;
1811
1812         /* Some of Intel's USB controllers have a bug that causes false
1813          * resume indications if any port has an over current condition.
1814          * To prevent problems, we will not allow a global suspend if
1815          * any ports are OC.
1816          *
1817          * Some motherboards using Intel's chipsets (but not using all
1818          * the USB ports) appear to hardwire the over current inputs active
1819          * to disable the USB ports.
1820          */
1821
1822         /* check for over current condition on any port */
1823         for (i = 0; i < uhci->rh_numports; i++) {
1824                 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1825                         return 0;
1826         }
1827
1828         return 1;
1829 }
1830
1831 static void hc_state_transitions(struct uhci_hcd *uhci)
1832 {
1833         switch (uhci->state) {
1834                 case UHCI_RUNNING:
1835
1836                         /* global suspend if nothing connected for 1 second */
1837                         if (!ports_active(uhci) && suspend_allowed(uhci)) {
1838                                 uhci->state = UHCI_SUSPENDING_GRACE;
1839                                 uhci->state_end = jiffies + HZ;
1840                         }
1841                         break;
1842
1843                 case UHCI_SUSPENDING_GRACE:
1844                         if (ports_active(uhci))
1845                                 uhci->state = UHCI_RUNNING;
1846                         else if (time_after_eq(jiffies, uhci->state_end))
1847                                 suspend_hc(uhci);
1848                         break;
1849
1850                 case UHCI_SUSPENDED:
1851
1852                         /* wakeup if requested by a device */
1853                         if (uhci->resume_detect)
1854                                 wakeup_hc(uhci);
1855                         break;
1856
1857                 case UHCI_RESUMING_1:
1858                 case UHCI_RESUMING_2:
1859                 case UHCI_RUNNING_GRACE:
1860                         if (time_after_eq(jiffies, uhci->state_end))
1861                                 wakeup_hc(uhci);
1862                         break;
1863
1864                 default:
1865                         break;
1866         }
1867 }
1868
1869 static int start_hc(struct uhci_hcd *uhci)
1870 {
1871         unsigned long io_addr = uhci->io_addr;
1872         int timeout = 10;
1873
1874         /*
1875          * Reset the HC - this will force us to get a
1876          * new notification of any already connected
1877          * ports due to the virtual disconnect that it
1878          * implies.
1879          */
1880         outw(USBCMD_HCRESET, io_addr + USBCMD);
1881         while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1882                 if (--timeout < 0) {
1883                         dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1884                         return -ETIMEDOUT;
1885                 }
1886                 msleep(1);
1887         }
1888
1889         /* Turn on PIRQ and all interrupts */
1890         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
1891                         USBLEGSUP_DEFAULT);
1892         outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1893                 io_addr + USBINTR);
1894
1895         /* Start at frame 0 */
1896         outw(0, io_addr + USBFRNUM);
1897         outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
1898
1899         /* Run and mark it configured with a 64-byte max packet */
1900         uhci->state = UHCI_RUNNING_GRACE;
1901         uhci->state_end = jiffies + HZ;
1902         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
1903
1904         uhci_to_hcd(uhci)->state = USB_STATE_RUNNING;
1905         return 0;
1906 }
1907
1908 /*
1909  * De-allocate all resources..
1910  */
1911 static void release_uhci(struct uhci_hcd *uhci)
1912 {
1913         int i;
1914
1915         for (i = 0; i < UHCI_NUM_SKELQH; i++)
1916                 if (uhci->skelqh[i]) {
1917                         uhci_free_qh(uhci, uhci->skelqh[i]);
1918                         uhci->skelqh[i] = NULL;
1919                 }
1920
1921         if (uhci->term_td) {
1922                 uhci_free_td(uhci, uhci->term_td);
1923                 uhci->term_td = NULL;
1924         }
1925
1926         if (uhci->qh_pool) {
1927                 dma_pool_destroy(uhci->qh_pool);
1928                 uhci->qh_pool = NULL;
1929         }
1930
1931         if (uhci->td_pool) {
1932                 dma_pool_destroy(uhci->td_pool);
1933                 uhci->td_pool = NULL;
1934         }
1935
1936         if (uhci->fl) {
1937                 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
1938                                 uhci->fl, uhci->fl->dma_handle);
1939                 uhci->fl = NULL;
1940         }
1941
1942         if (uhci->dentry) {
1943                 debugfs_remove(uhci->dentry);
1944                 uhci->dentry = NULL;
1945         }
1946 }
1947
1948 static int uhci_reset(struct usb_hcd *hcd)
1949 {
1950         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1951
1952         uhci->io_addr = (unsigned long) hcd->regs;
1953
1954         /* Kick BIOS off this hardware and reset, so we won't get
1955          * interrupts from any previous setup.
1956          */
1957         reset_hc(uhci);
1958         return 0;
1959 }
1960
1961 /*
1962  * Allocate a frame list, and then setup the skeleton
1963  *
1964  * The hardware doesn't really know any difference
1965  * in the queues, but the order does matter for the
1966  * protocols higher up. The order is:
1967  *
1968  *  - any isochronous events handled before any
1969  *    of the queues. We don't do that here, because
1970  *    we'll create the actual TD entries on demand.
1971  *  - The first queue is the interrupt queue.
1972  *  - The second queue is the control queue, split into low- and full-speed
1973  *  - The third queue is bulk queue.
1974  *  - The fourth queue is the bandwidth reclamation queue, which loops back
1975  *    to the full-speed control queue.
1976  */
1977 static int uhci_start(struct usb_hcd *hcd)
1978 {
1979         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1980         int retval = -EBUSY;
1981         int i, port;
1982         unsigned io_size;
1983         dma_addr_t dma_handle;
1984         struct usb_device *udev;
1985         struct dentry *dentry;
1986
1987         io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
1988
1989         dentry = debugfs_create_file(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, uhci, &uhci_debug_operations);
1990         if (!dentry) {
1991                 dev_err(uhci_dev(uhci), "couldn't create uhci debugfs entry\n");
1992                 retval = -ENOMEM;
1993                 goto err_create_debug_entry;
1994         }
1995         uhci->dentry = dentry;
1996
1997         uhci->fsbr = 0;
1998         uhci->fsbrtimeout = 0;
1999
2000         spin_lock_init(&uhci->schedule_lock);
2001         INIT_LIST_HEAD(&uhci->qh_remove_list);
2002
2003         INIT_LIST_HEAD(&uhci->td_remove_list);
2004
2005         INIT_LIST_HEAD(&uhci->urb_remove_list);
2006
2007         INIT_LIST_HEAD(&uhci->urb_list);
2008
2009         INIT_LIST_HEAD(&uhci->complete_list);
2010
2011         init_waitqueue_head(&uhci->waitqh);
2012
2013         uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2014                         &dma_handle, 0);
2015         if (!uhci->fl) {
2016                 dev_err(uhci_dev(uhci), "unable to allocate "
2017                                 "consistent memory for frame list\n");
2018                 goto err_alloc_fl;
2019         }
2020
2021         memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2022
2023         uhci->fl->dma_handle = dma_handle;
2024
2025         uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2026                         sizeof(struct uhci_td), 16, 0);
2027         if (!uhci->td_pool) {
2028                 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2029                 goto err_create_td_pool;
2030         }
2031
2032         uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2033                         sizeof(struct uhci_qh), 16, 0);
2034         if (!uhci->qh_pool) {
2035                 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2036                 goto err_create_qh_pool;
2037         }
2038
2039         /* Initialize the root hub */
2040
2041         /* UHCI specs says devices must have 2 ports, but goes on to say */
2042         /*  they may have more but give no way to determine how many they */
2043         /*  have. However, according to the UHCI spec, Bit 7 is always set */
2044         /*  to 1. So we try to use this to our advantage */
2045         for (port = 0; port < (io_size - 0x10) / 2; port++) {
2046                 unsigned int portstatus;
2047
2048                 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2049                 if (!(portstatus & 0x0080))
2050                         break;
2051         }
2052         if (debug)
2053                 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2054
2055         /* This is experimental so anything less than 2 or greater than 8 is */
2056         /*  something weird and we'll ignore it */
2057         if (port < 2 || port > UHCI_RH_MAXCHILD) {
2058                 dev_info(uhci_dev(uhci), "port count misdetected? "
2059                                 "forcing to 2 ports\n");
2060                 port = 2;
2061         }
2062
2063         uhci->rh_numports = port;
2064
2065         udev = usb_alloc_dev(NULL, &hcd->self, 0);
2066         if (!udev) {
2067                 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2068                 goto err_alloc_root_hub;
2069         }
2070
2071         uhci->term_td = uhci_alloc_td(uhci, udev);
2072         if (!uhci->term_td) {
2073                 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2074                 goto err_alloc_term_td;
2075         }
2076
2077         for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2078                 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2079                 if (!uhci->skelqh[i]) {
2080                         dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2081                         goto err_alloc_skelqh;
2082                 }
2083         }
2084
2085         /*
2086          * 8 Interrupt queues; link all higher int queues to int1,
2087          * then link int1 to control and control to bulk
2088          */
2089         uhci->skel_int128_qh->link =
2090                         uhci->skel_int64_qh->link =
2091                         uhci->skel_int32_qh->link =
2092                         uhci->skel_int16_qh->link =
2093                         uhci->skel_int8_qh->link =
2094                         uhci->skel_int4_qh->link =
2095                         uhci->skel_int2_qh->link =
2096                         cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2097         uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2098
2099         uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2100         uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2101         uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2102
2103         /* This dummy TD is to work around a bug in Intel PIIX controllers */
2104         uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2105                 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2106         uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2107
2108         uhci->skel_term_qh->link = UHCI_PTR_TERM;
2109         uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2110
2111         /*
2112          * Fill the frame list: make all entries point to the proper
2113          * interrupt queue.
2114          *
2115          * The interrupt queues will be interleaved as evenly as possible.
2116          * There's not much to be done about period-1 interrupts; they have
2117          * to occur in every frame.  But we can schedule period-2 interrupts
2118          * in odd-numbered frames, period-4 interrupts in frames congruent
2119          * to 2 (mod 4), and so on.  This way each frame only has two
2120          * interrupt QHs, which will help spread out bandwidth utilization.
2121          */
2122         for (i = 0; i < UHCI_NUMFRAMES; i++) {
2123                 int irq;
2124
2125                 /*
2126                  * ffs (Find First bit Set) does exactly what we need:
2127                  * 1,3,5,...  => ffs = 0 => use skel_int2_qh = skelqh[6],
2128                  * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2129                  * ffs > 6 => not on any high-period queue, so use
2130                  *      skel_int1_qh = skelqh[7].
2131                  * Add UHCI_NUMFRAMES to insure at least one bit is set.
2132                  */
2133                 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2134                 if (irq < 0)
2135                         irq = 7;
2136
2137                 /* Only place we don't use the frame list routines */
2138                 uhci->fl->frame[i] = UHCI_PTR_QH |
2139                                 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2140         }
2141
2142         /*
2143          * Some architectures require a full mb() to enforce completion of
2144          * the memory writes above before the I/O transfers in start_hc().
2145          */
2146         mb();
2147         if ((retval = start_hc(uhci)) != 0)
2148                 goto err_alloc_skelqh;
2149
2150         init_stall_timer(hcd);
2151
2152         udev->speed = USB_SPEED_FULL;
2153
2154         if (hcd_register_root(udev, hcd) != 0) {
2155                 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2156                 retval = -ENOMEM;
2157                 goto err_start_root_hub;
2158         }
2159
2160         return 0;
2161
2162 /*
2163  * error exits:
2164  */
2165 err_start_root_hub:
2166         reset_hc(uhci);
2167
2168         del_timer_sync(&uhci->stall_timer);
2169
2170 err_alloc_skelqh:
2171         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2172                 if (uhci->skelqh[i]) {
2173                         uhci_free_qh(uhci, uhci->skelqh[i]);
2174                         uhci->skelqh[i] = NULL;
2175                 }
2176
2177         uhci_free_td(uhci, uhci->term_td);
2178         uhci->term_td = NULL;
2179
2180 err_alloc_term_td:
2181         usb_put_dev(udev);
2182
2183 err_alloc_root_hub:
2184         dma_pool_destroy(uhci->qh_pool);
2185         uhci->qh_pool = NULL;
2186
2187 err_create_qh_pool:
2188         dma_pool_destroy(uhci->td_pool);
2189         uhci->td_pool = NULL;
2190
2191 err_create_td_pool:
2192         dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2193                         uhci->fl, uhci->fl->dma_handle);
2194         uhci->fl = NULL;
2195
2196 err_alloc_fl:
2197         debugfs_remove(uhci->dentry);
2198         uhci->dentry = NULL;
2199
2200 err_create_debug_entry:
2201         return retval;
2202 }
2203
2204 static void uhci_stop(struct usb_hcd *hcd)
2205 {
2206         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2207
2208         del_timer_sync(&uhci->stall_timer);
2209
2210         /*
2211          * At this point, we're guaranteed that no new connects can be made
2212          * to this bus since there are no more parents
2213          */
2214
2215         reset_hc(uhci);
2216
2217         spin_lock_irq(&uhci->schedule_lock);
2218         uhci_free_pending_qhs(uhci);
2219         uhci_free_pending_tds(uhci);
2220         uhci_remove_pending_urbps(uhci);
2221         uhci_finish_completion(hcd, NULL);
2222
2223         uhci_free_pending_qhs(uhci);
2224         uhci_free_pending_tds(uhci);
2225         spin_unlock_irq(&uhci->schedule_lock);
2226
2227         /* Wake up anyone waiting for an URB to complete */
2228         wake_up_all(&uhci->waitqh);
2229         
2230         release_uhci(uhci);
2231 }
2232
2233 #ifdef CONFIG_PM
2234 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2235 {
2236         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2237
2238         /* Don't try to suspend broken motherboards, reset instead */
2239         if (suspend_allowed(uhci)) {
2240                 suspend_hc(uhci);
2241                 uhci->saved_framenumber =
2242                                 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2243         } else
2244                 reset_hc(uhci);
2245         return 0;
2246 }
2247
2248 static int uhci_resume(struct usb_hcd *hcd)
2249 {
2250         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2251         int rc;
2252
2253         pci_set_master(to_pci_dev(uhci_dev(uhci)));
2254
2255         if (uhci->state == UHCI_SUSPENDED) {
2256
2257                 /*
2258                  * Some systems don't maintain the UHCI register values
2259                  * during a PM suspend/resume cycle, so reinitialize
2260                  * the Frame Number, Framelist Base Address, Interrupt
2261                  * Enable, and Legacy Support registers.
2262                  */
2263                 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2264                                 0);
2265                 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2266                 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2267                 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2268                                 USBINTR_SP, uhci->io_addr + USBINTR);
2269                 uhci->resume_detect = 1;
2270                 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2271                                 USBLEGSUP_DEFAULT);
2272         } else {
2273                 reset_hc(uhci);
2274                 if ((rc = start_hc(uhci)) != 0)
2275                         return rc;
2276         }
2277         hcd->state = USB_STATE_RUNNING;
2278         return 0;
2279 }
2280 #endif
2281
2282 /* Wait until all the URBs for a particular device/endpoint are gone */
2283 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2284                 struct usb_host_endpoint *ep)
2285 {
2286         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2287
2288         wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list));
2289 }
2290
2291 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2292 {
2293         return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2294 }
2295
2296 static const char hcd_name[] = "uhci_hcd";
2297
2298 static const struct hc_driver uhci_driver = {
2299         .description =          hcd_name,
2300         .product_desc =         "UHCI Host Controller",
2301         .hcd_priv_size =        sizeof(struct uhci_hcd),
2302
2303         /* Generic hardware linkage */
2304         .irq =                  uhci_irq,
2305         .flags =                HCD_USB11,
2306
2307         /* Basic lifecycle operations */
2308         .reset =                uhci_reset,
2309         .start =                uhci_start,
2310 #ifdef CONFIG_PM
2311         .suspend =              uhci_suspend,
2312         .resume =               uhci_resume,
2313 #endif
2314         .stop =                 uhci_stop,
2315
2316         .urb_enqueue =          uhci_urb_enqueue,
2317         .urb_dequeue =          uhci_urb_dequeue,
2318
2319         .endpoint_disable =     uhci_hcd_endpoint_disable,
2320         .get_frame_number =     uhci_hcd_get_frame_number,
2321
2322         .hub_status_data =      uhci_hub_status_data,
2323         .hub_control =          uhci_hub_control,
2324 };
2325
2326 static const struct pci_device_id uhci_pci_ids[] = { {
2327         /* handle any USB UHCI controller */
2328         PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2329         .driver_data =  (unsigned long) &uhci_driver,
2330         }, { /* end: all zeroes */ }
2331 };
2332
2333 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2334
2335 static struct pci_driver uhci_pci_driver = {
2336         .name =         (char *)hcd_name,
2337         .id_table =     uhci_pci_ids,
2338
2339         .probe =        usb_hcd_pci_probe,
2340         .remove =       usb_hcd_pci_remove,
2341
2342 #ifdef  CONFIG_PM
2343         .suspend =      usb_hcd_pci_suspend,
2344         .resume =       usb_hcd_pci_resume,
2345 #endif  /* PM */
2346 };
2347  
2348 static int __init uhci_hcd_init(void)
2349 {
2350         int retval = -ENOMEM;
2351
2352         printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2353
2354         if (usb_disabled())
2355                 return -ENODEV;
2356
2357         if (debug) {
2358                 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2359                 if (!errbuf)
2360                         goto errbuf_failed;
2361         }
2362
2363         uhci_debugfs_root = debugfs_create_dir("uhci", NULL);
2364         if (!uhci_debugfs_root)
2365                 goto debug_failed;
2366
2367         uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2368                 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2369         if (!uhci_up_cachep)
2370                 goto up_failed;
2371
2372         retval = pci_register_driver(&uhci_pci_driver);
2373         if (retval)
2374                 goto init_failed;
2375
2376         return 0;
2377
2378 init_failed:
2379         if (kmem_cache_destroy(uhci_up_cachep))
2380                 warn("not all urb_priv's were freed!");
2381
2382 up_failed:
2383         debugfs_remove(uhci_debugfs_root);
2384
2385 debug_failed:
2386         if (errbuf)
2387                 kfree(errbuf);
2388
2389 errbuf_failed:
2390
2391         return retval;
2392 }
2393
2394 static void __exit uhci_hcd_cleanup(void) 
2395 {
2396         pci_unregister_driver(&uhci_pci_driver);
2397         
2398         if (kmem_cache_destroy(uhci_up_cachep))
2399                 warn("not all urb_priv's were freed!");
2400
2401         debugfs_remove(uhci_debugfs_root);
2402
2403         if (errbuf)
2404                 kfree(errbuf);
2405 }
2406
2407 module_init(uhci_hcd_init);
2408 module_exit(uhci_hcd_cleanup);
2409
2410 MODULE_AUTHOR(DRIVER_AUTHOR);
2411 MODULE_DESCRIPTION(DRIVER_DESC);
2412 MODULE_LICENSE("GPL");