ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / usb / host / uhci-hcd.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17  *
18  * Intel documents this fairly well, and as far as I know there
19  * are no royalties or anything like that, but even so there are
20  * people who decided that they want to do the same thing in a
21  * completely different way.
22  *
23  * WARNING! The USB documentation is downright evil. Most of it
24  * is just crap, written by a committee. You're better off ignoring
25  * most of it, the important stuff is:
26  *  - the low-level protocol (fairly simple but lots of small details)
27  *  - working around the horridness of the rest
28  */
29
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
32 #define DEBUG
33 #else
34 #undef DEBUG
35 #endif
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
50 #include <linux/pm.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54
55 #include <asm/bitops.h>
56 #include <asm/uaccess.h>
57 #include <asm/io.h>
58 #include <asm/irq.h>
59 #include <asm/system.h>
60
61 #include "../core/hcd.h"
62 #include "uhci-hcd.h"
63
64 /*
65  * Version Information
66  */
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
70 Alan Stern"
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
72
73 /*
74  * debug = 0, no debugging messages
75  * debug = 1, dump failed URB's except for stalls
76  * debug = 2, dump all failed URB's (including stalls)
77  *            show all queues in /proc/driver/uhci/[pci_addr]
78  * debug = 3, show all TD's in URB's when dumping
79  */
80 #ifdef DEBUG
81 static int debug = 1;
82 #else
83 static int debug = 0;
84 #endif
85 MODULE_PARM(debug, "i");
86 MODULE_PARM_DESC(debug, "Debug level");
87 static char *errbuf;
88 #define ERRBUF_LEN    (32 * 1024)
89
90 #include "uhci-hub.c"
91 #include "uhci-debug.c"
92
93 static kmem_cache_t *uhci_up_cachep;    /* urb_priv */
94
95 static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98
99 static void hc_state_transitions(struct uhci_hcd *uhci);
100
101 /* If a transfer is still active after this much time, turn off FSBR */
102 #define IDLE_TIMEOUT    (HZ / 20)       /* 50 ms */
103 #define FSBR_DELAY      (HZ / 20)       /* 50 ms */
104
105 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
106 /* depth first traversal. We'll do it in groups of this number of TD's */
107 /* to make sure it doesn't hog all of the bandwidth */
108 #define DEPTH_INTERVAL 5
109
110 /*
111  * Technically, updating td->status here is a race, but it's not really a
112  * problem. The worst that can happen is that we set the IOC bit again
113  * generating a spurious interrupt. We could fix this by creating another
114  * QH and leaving the IOC bit always set, but then we would have to play
115  * games with the FSBR code to make sure we get the correct order in all
116  * the cases. I don't think it's worth the effort
117  */
118 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
119 {
120         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
121 }
122
123 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
124 {
125         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
126 }
127
128 static inline void uhci_moveto_complete(struct uhci_hcd *uhci, 
129                                         struct urb_priv *urbp)
130 {
131         list_move_tail(&urbp->urb_list, &uhci->complete_list);
132 }
133
134 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
135 {
136         dma_addr_t dma_handle;
137         struct uhci_td *td;
138
139         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
140         if (!td)
141                 return NULL;
142
143         td->dma_handle = dma_handle;
144
145         td->link = UHCI_PTR_TERM;
146         td->buffer = 0;
147
148         td->frame = -1;
149         td->dev = dev;
150
151         INIT_LIST_HEAD(&td->list);
152         INIT_LIST_HEAD(&td->remove_list);
153         INIT_LIST_HEAD(&td->fl_list);
154
155         usb_get_dev(dev);
156
157         return td;
158 }
159
160 static inline void uhci_fill_td(struct uhci_td *td, __u32 status,
161                 __u32 token, __u32 buffer)
162 {
163         td->status = cpu_to_le32(status);
164         td->token = cpu_to_le32(token);
165         td->buffer = cpu_to_le32(buffer);
166 }
167
168 /*
169  * We insert Isochronous URB's directly into the frame list at the beginning
170  */
171 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
172 {
173         framenum %= UHCI_NUMFRAMES;
174
175         td->frame = framenum;
176
177         /* Is there a TD already mapped there? */
178         if (uhci->fl->frame_cpu[framenum]) {
179                 struct uhci_td *ftd, *ltd;
180
181                 ftd = uhci->fl->frame_cpu[framenum];
182                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
183
184                 list_add_tail(&td->fl_list, &ftd->fl_list);
185
186                 td->link = ltd->link;
187                 mb();
188                 ltd->link = cpu_to_le32(td->dma_handle);
189         } else {
190                 td->link = uhci->fl->frame[framenum];
191                 mb();
192                 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
193                 uhci->fl->frame_cpu[framenum] = td;
194         }
195 }
196
197 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
198 {
199         /* If it's not inserted, don't remove it */
200         if (td->frame == -1 && list_empty(&td->fl_list))
201                 return;
202
203         if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
204                 if (list_empty(&td->fl_list)) {
205                         uhci->fl->frame[td->frame] = td->link;
206                         uhci->fl->frame_cpu[td->frame] = NULL;
207                 } else {
208                         struct uhci_td *ntd;
209
210                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
211                         uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
212                         uhci->fl->frame_cpu[td->frame] = ntd;
213                 }
214         } else {
215                 struct uhci_td *ptd;
216
217                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
218                 ptd->link = td->link;
219         }
220
221         mb();
222         td->link = UHCI_PTR_TERM;
223
224         list_del_init(&td->fl_list);
225         td->frame = -1;
226 }
227
228 /*
229  * Inserts a td into qh list at the top.
230  */
231 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth)
232 {
233         struct list_head *tmp, *head;
234         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
235         struct uhci_td *td, *ptd;
236
237         if (list_empty(&urbp->td_list))
238                 return;
239
240         head = &urbp->td_list;
241         tmp = head->next;
242
243         /* Ordering isn't important here yet since the QH hasn't been */
244         /*  inserted into the schedule yet */
245         td = list_entry(tmp, struct uhci_td, list);
246
247         /* Add the first TD to the QH element pointer */
248         qh->element = cpu_to_le32(td->dma_handle) | breadth;
249
250         ptd = td;
251
252         /* Then link the rest of the TD's */
253         tmp = tmp->next;
254         while (tmp != head) {
255                 td = list_entry(tmp, struct uhci_td, list);
256
257                 tmp = tmp->next;
258
259                 ptd->link = cpu_to_le32(td->dma_handle) | breadth;
260
261                 ptd = td;
262         }
263
264         ptd->link = UHCI_PTR_TERM;
265 }
266
267 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
268 {
269         if (!list_empty(&td->list))
270                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
271         if (!list_empty(&td->remove_list))
272                 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
273         if (!list_empty(&td->fl_list))
274                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
275
276         if (td->dev)
277                 usb_put_dev(td->dev);
278
279         dma_pool_free(uhci->td_pool, td, td->dma_handle);
280 }
281
282 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
283 {
284         dma_addr_t dma_handle;
285         struct uhci_qh *qh;
286
287         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
288         if (!qh)
289                 return NULL;
290
291         qh->dma_handle = dma_handle;
292
293         qh->element = UHCI_PTR_TERM;
294         qh->link = UHCI_PTR_TERM;
295
296         qh->dev = dev;
297         qh->urbp = NULL;
298
299         INIT_LIST_HEAD(&qh->list);
300         INIT_LIST_HEAD(&qh->remove_list);
301
302         usb_get_dev(dev);
303
304         return qh;
305 }
306
307 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
308 {
309         if (!list_empty(&qh->list))
310                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
311         if (!list_empty(&qh->remove_list))
312                 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
313
314         if (qh->dev)
315                 usb_put_dev(qh->dev);
316
317         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
318 }
319
320 /*
321  * Append this urb's qh after the last qh in skelqh->list
322  *
323  * Note that urb_priv.queue_list doesn't have a separate queue head;
324  * it's a ring with every element "live".
325  */
326 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
327 {
328         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
329         struct list_head *tmp;
330         struct uhci_qh *lqh;
331
332         /* Grab the last QH */
333         lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
334
335         /*
336          * Patch this endpoint's URB's QHs to point to the next skelqh:
337          *    skelqh --> ... lqh --> newqh --> next skelqh
338          * Do this first, so the HC always sees the right QH after this one.
339          */
340         list_for_each (tmp, &urbp->queue_list) {
341                 struct urb_priv *turbp =
342                         list_entry(tmp, struct urb_priv, queue_list);
343
344                 turbp->qh->link = lqh->link;
345         }
346         urbp->qh->link = lqh->link;
347         wmb();                          /* Ordering is important */
348
349         /*
350          * Patch QHs for previous endpoint's queued URBs?  HC goes
351          * here next, not to the next skelqh it now points to.
352          *
353          *    lqh --> td ... --> qh ... --> td --> qh ... --> td
354          *     |                 |                 |
355          *     v                 v                 v
356          *     +<----------------+-----------------+
357          *     v
358          *    newqh --> td ... --> td
359          *     |
360          *     v
361          *    ...
362          *
363          * The HC could see (and use!) any of these as we write them.
364          */
365         if (lqh->urbp) {
366                 list_for_each (tmp, &lqh->urbp->queue_list) {
367                         struct urb_priv *turbp =
368                                 list_entry(tmp, struct urb_priv, queue_list);
369
370                         turbp->qh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
371                 }
372         }
373         lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
374
375         list_add_tail(&urbp->qh->list, &skelqh->list);
376 }
377
378 /*
379  * Start removal of QH from schedule; it finishes next frame.
380  * TDs should be unlinked before this is called.
381  */
382 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
383 {
384         struct uhci_qh *pqh;
385         __u32 newlink;
386
387         if (!qh)
388                 return;
389
390         /*
391          * Only go through the hoops if it's actually linked in
392          */
393         if (!list_empty(&qh->list)) {
394
395                 /* If our queue is nonempty, make the next URB the head */
396                 if (!list_empty(&qh->urbp->queue_list)) {
397                         struct urb_priv *nurbp;
398
399                         nurbp = list_entry(qh->urbp->queue_list.next,
400                                         struct urb_priv, queue_list);
401                         nurbp->queued = 0;
402                         list_add(&nurbp->qh->list, &qh->list);
403                         newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
404                 } else
405                         newlink = qh->link;
406
407                 /* Fix up the previous QH's queue to link to either
408                  * the new head of this queue or the start of the
409                  * next endpoint's queue. */
410                 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
411                 pqh->link = newlink;
412                 if (pqh->urbp) {
413                         struct list_head *head, *tmp;
414
415                         head = &pqh->urbp->queue_list;
416                         tmp = head->next;
417                         while (head != tmp) {
418                                 struct urb_priv *turbp =
419                                         list_entry(tmp, struct urb_priv, queue_list);
420
421                                 tmp = tmp->next;
422
423                                 turbp->qh->link = newlink;
424                         }
425                 }
426                 mb();
427
428                 /* Leave qh->link in case the HC is on the QH now, it will */
429                 /* continue the rest of the schedule */
430                 qh->element = UHCI_PTR_TERM;
431
432                 list_del_init(&qh->list);
433         }
434
435         list_del_init(&qh->urbp->queue_list);
436         qh->urbp = NULL;
437
438         /* Check to see if the remove list is empty. Set the IOC bit */
439         /* to force an interrupt so we can remove the QH */
440         if (list_empty(&uhci->qh_remove_list))
441                 uhci_set_next_interrupt(uhci);
442
443         list_add(&qh->remove_list, &uhci->qh_remove_list);
444 }
445
446 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
447 {
448         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
449         struct list_head *head, *tmp;
450
451         head = &urbp->td_list;
452         tmp = head->next;
453         while (head != tmp) {
454                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
455
456                 tmp = tmp->next;
457
458                 if (toggle)
459                         td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
460                 else
461                         td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
462
463
464                 toggle ^= 1;
465         }
466
467         return toggle;
468 }
469
470 /* This function will append one URB's QH to another URB's QH. This is for */
471 /* queuing interrupt, control or bulk transfers */
472 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
473 {
474         struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
475         struct list_head *tmp;
476         struct uhci_td *lltd;
477
478         eurbp = eurb->hcpriv;
479         urbp = urb->hcpriv;
480
481         /* Find the first URB in the queue */
482         if (eurbp->queued) {
483                 struct list_head *head = &eurbp->queue_list;
484
485                 tmp = head->next;
486                 while (tmp != head) {
487                         struct urb_priv *turbp =
488                                 list_entry(tmp, struct urb_priv, queue_list);
489
490                         if (!turbp->queued)
491                                 break;
492
493                         tmp = tmp->next;
494                 }
495         } else
496                 tmp = &eurbp->queue_list;
497
498         furbp = list_entry(tmp, struct urb_priv, queue_list);
499         lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
500
501         lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
502
503         /* Control transfers always start with toggle 0 */
504         if (!usb_pipecontrol(urb->pipe))
505                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
506                                 usb_pipeout(urb->pipe),
507                                 uhci_fixup_toggle(urb,
508                                         uhci_toggle(td_token(lltd)) ^ 1));
509
510         /* All qh's in the queue need to link to the next queue */
511         urbp->qh->link = eurbp->qh->link;
512
513         mb();                   /* Make sure we flush everything */
514
515         lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
516
517         list_add_tail(&urbp->queue_list, &furbp->queue_list);
518
519         urbp->queued = 1;
520 }
521
522 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
523 {
524         struct urb_priv *urbp, *nurbp;
525         struct list_head *head, *tmp;
526         struct urb_priv *purbp;
527         struct uhci_td *pltd;
528         unsigned int toggle;
529
530         urbp = urb->hcpriv;
531
532         if (list_empty(&urbp->queue_list))
533                 return;
534
535         nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
536
537         /*
538          * Fix up the toggle for the following URBs in the queue.
539          * Only needed for bulk and interrupt: control and isochronous
540          * endpoints don't propagate toggles between messages.
541          */
542         if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
543                 if (!urbp->queued)
544                         /* We just set the toggle in uhci_unlink_generic */
545                         toggle = usb_gettoggle(urb->dev,
546                                         usb_pipeendpoint(urb->pipe),
547                                         usb_pipeout(urb->pipe));
548                 else {
549                         /* If we're in the middle of the queue, grab the */
550                         /* toggle from the TD previous to us */
551                         purbp = list_entry(urbp->queue_list.prev,
552                                         struct urb_priv, queue_list);
553                         pltd = list_entry(purbp->td_list.prev,
554                                         struct uhci_td, list);
555                         toggle = uhci_toggle(td_token(pltd)) ^ 1;
556                 }
557
558                 head = &urbp->queue_list;
559                 tmp = head->next;
560                 while (head != tmp) {
561                         struct urb_priv *turbp;
562
563                         turbp = list_entry(tmp, struct urb_priv, queue_list);
564                         tmp = tmp->next;
565
566                         if (!turbp->queued)
567                                 break;
568                         toggle = uhci_fixup_toggle(turbp->urb, toggle);
569                 }
570
571                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
572                                 usb_pipeout(urb->pipe), toggle);
573         }
574
575         if (urbp->queued) {
576                 /* We're somewhere in the middle (or end).  The case where
577                  * we're at the head is handled in uhci_remove_qh(). */
578                 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
579                                 queue_list);
580
581                 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
582                 if (nurbp->queued)
583                         pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
584                 else
585                         /* The next URB happens to be the beginning, so */
586                         /*  we're the last, end the chain */
587                         pltd->link = UHCI_PTR_TERM;
588         }
589
590         /* urbp->queue_list is handled in uhci_remove_qh() */
591 }
592
593 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
594 {
595         struct urb_priv *urbp;
596
597         urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
598         if (!urbp)
599                 return NULL;
600
601         memset((void *)urbp, 0, sizeof(*urbp));
602
603         urbp->inserttime = jiffies;
604         urbp->fsbrtime = jiffies;
605         urbp->urb = urb;
606         
607         INIT_LIST_HEAD(&urbp->td_list);
608         INIT_LIST_HEAD(&urbp->queue_list);
609         INIT_LIST_HEAD(&urbp->urb_list);
610
611         list_add_tail(&urbp->urb_list, &uhci->urb_list);
612
613         urb->hcpriv = urbp;
614
615         return urbp;
616 }
617
618 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
619 {
620         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
621
622         td->urb = urb;
623
624         list_add_tail(&td->list, &urbp->td_list);
625 }
626
627 static void uhci_remove_td_from_urb(struct uhci_td *td)
628 {
629         if (list_empty(&td->list))
630                 return;
631
632         list_del_init(&td->list);
633
634         td->urb = NULL;
635 }
636
637 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
638 {
639         struct list_head *head, *tmp;
640         struct urb_priv *urbp;
641
642         urbp = (struct urb_priv *)urb->hcpriv;
643         if (!urbp)
644                 return;
645
646         if (!list_empty(&urbp->urb_list))
647                 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
648                                 "or uhci->remove_list!\n", urb);
649
650         /* Check to see if the remove list is empty. Set the IOC bit */
651         /* to force an interrupt so we can remove the TD's*/
652         if (list_empty(&uhci->td_remove_list))
653                 uhci_set_next_interrupt(uhci);
654
655         head = &urbp->td_list;
656         tmp = head->next;
657         while (tmp != head) {
658                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
659
660                 tmp = tmp->next;
661
662                 uhci_remove_td_from_urb(td);
663                 uhci_remove_td(uhci, td);
664                 list_add(&td->remove_list, &uhci->td_remove_list);
665         }
666
667         urb->hcpriv = NULL;
668         kmem_cache_free(uhci_up_cachep, urbp);
669 }
670
671 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
672 {
673         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
674
675         if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
676                 urbp->fsbr = 1;
677                 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
678                         uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
679         }
680 }
681
682 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
683 {
684         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
685
686         if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
687                 urbp->fsbr = 0;
688                 if (!--uhci->fsbr)
689                         uhci->fsbrtimeout = jiffies + FSBR_DELAY;
690         }
691 }
692
693 /*
694  * Map status to standard result codes
695  *
696  * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
697  * Note: status does not include the TD_CTRL_NAK bit.
698  * <dir_out> is True for output TDs and False for input TDs.
699  */
700 static int uhci_map_status(int status, int dir_out)
701 {
702         if (!status)
703                 return 0;
704         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
705                 return -EPROTO;
706         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
707                 if (dir_out)
708                         return -EPROTO;
709                 else
710                         return -EILSEQ;
711         }
712         if (status & TD_CTRL_BABBLE)                    /* Babble */
713                 return -EOVERFLOW;
714         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
715                 return -ENOSR;
716         if (status & TD_CTRL_STALLED)                   /* Stalled */
717                 return -EPIPE;
718         WARN_ON(status & TD_CTRL_ACTIVE);               /* Active */
719         return 0;
720 }
721
722 /*
723  * Control transfers
724  */
725 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
726 {
727         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
728         struct uhci_td *td;
729         struct uhci_qh *qh, *skelqh;
730         unsigned long destination, status;
731         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
732         int len = urb->transfer_buffer_length;
733         dma_addr_t data = urb->transfer_dma;
734
735         /* The "pipe" thing contains the destination in bits 8--18 */
736         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
737
738         /* 3 errors */
739         status = TD_CTRL_ACTIVE | uhci_maxerr(3);
740         if (urb->dev->speed == USB_SPEED_LOW)
741                 status |= TD_CTRL_LS;
742
743         /*
744          * Build the TD for the control request setup packet
745          */
746         td = uhci_alloc_td(uhci, urb->dev);
747         if (!td)
748                 return -ENOMEM;
749
750         uhci_add_td_to_urb(urb, td);
751         uhci_fill_td(td, status, destination | uhci_explen(7),
752                 urb->setup_dma);
753
754         /*
755          * If direction is "send", change the packet ID from SETUP (0x2D)
756          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
757          * set Short Packet Detect (SPD) for all data packets.
758          */
759         if (usb_pipeout(urb->pipe))
760                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
761         else {
762                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
763                 status |= TD_CTRL_SPD;
764         }
765
766         /*
767          * Build the DATA TD's
768          */
769         while (len > 0) {
770                 int pktsze = len;
771
772                 if (pktsze > maxsze)
773                         pktsze = maxsze;
774
775                 td = uhci_alloc_td(uhci, urb->dev);
776                 if (!td)
777                         return -ENOMEM;
778
779                 /* Alternate Data0/1 (start with Data1) */
780                 destination ^= TD_TOKEN_TOGGLE;
781         
782                 uhci_add_td_to_urb(urb, td);
783                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
784                         data);
785
786                 data += pktsze;
787                 len -= pktsze;
788         }
789
790         /*
791          * Build the final TD for control status 
792          */
793         td = uhci_alloc_td(uhci, urb->dev);
794         if (!td)
795                 return -ENOMEM;
796
797         /*
798          * It's IN if the pipe is an output pipe or we're not expecting
799          * data back.
800          */
801         destination &= ~TD_TOKEN_PID_MASK;
802         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
803                 destination |= USB_PID_IN;
804         else
805                 destination |= USB_PID_OUT;
806
807         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
808
809         status &= ~TD_CTRL_SPD;
810
811         uhci_add_td_to_urb(urb, td);
812         uhci_fill_td(td, status | TD_CTRL_IOC,
813                 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
814
815         qh = uhci_alloc_qh(uhci, urb->dev);
816         if (!qh)
817                 return -ENOMEM;
818
819         urbp->qh = qh;
820         qh->urbp = urbp;
821
822         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
823
824         /* Low-speed transfers get a different queue, and won't hog the bus */
825         if (urb->dev->speed == USB_SPEED_LOW)
826                 skelqh = uhci->skel_ls_control_qh;
827         else {
828                 skelqh = uhci->skel_fs_control_qh;
829                 uhci_inc_fsbr(uhci, urb);
830         }
831
832         if (eurb)
833                 uhci_append_queued_urb(uhci, eurb, urb);
834         else
835                 uhci_insert_qh(uhci, skelqh, urb);
836
837         return -EINPROGRESS;
838 }
839
840 /*
841  * If control-IN transfer was short, the status packet wasn't sent.
842  * This routine changes the element pointer in the QH to point at the
843  * status TD.  It's safe to do this even while the QH is live, because
844  * the hardware only updates the element pointer following a successful
845  * transfer.  The inactive TD for the short packet won't cause an update,
846  * so the pointer won't get overwritten.  The next time the controller
847  * sees this QH, it will send the status packet.
848  */
849 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
850 {
851         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
852         struct uhci_td *td;
853
854         urbp->short_control_packet = 1;
855
856         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
857         urbp->qh->element = td->dma_handle;
858
859         return -EINPROGRESS;
860 }
861
862
863 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
864 {
865         struct list_head *tmp, *head;
866         struct urb_priv *urbp = urb->hcpriv;
867         struct uhci_td *td;
868         unsigned int status;
869         int ret = 0;
870
871         if (list_empty(&urbp->td_list))
872                 return -EINVAL;
873
874         head = &urbp->td_list;
875
876         if (urbp->short_control_packet) {
877                 tmp = head->prev;
878                 goto status_stage;
879         }
880
881         tmp = head->next;
882         td = list_entry(tmp, struct uhci_td, list);
883
884         /* The first TD is the SETUP stage, check the status, but skip */
885         /*  the count */
886         status = uhci_status_bits(td_status(td));
887         if (status & TD_CTRL_ACTIVE)
888                 return -EINPROGRESS;
889
890         if (status)
891                 goto td_error;
892
893         urb->actual_length = 0;
894
895         /* The rest of the TD's (but the last) are data */
896         tmp = tmp->next;
897         while (tmp != head && tmp->next != head) {
898                 td = list_entry(tmp, struct uhci_td, list);
899
900                 tmp = tmp->next;
901
902                 status = uhci_status_bits(td_status(td));
903                 if (status & TD_CTRL_ACTIVE)
904                         return -EINPROGRESS;
905
906                 urb->actual_length += uhci_actual_length(td_status(td));
907
908                 if (status)
909                         goto td_error;
910
911                 /* Check to see if we received a short packet */
912                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
913                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
914                                 ret = -EREMOTEIO;
915                                 goto err;
916                         }
917
918                         if (uhci_packetid(td_token(td)) == USB_PID_IN)
919                                 return usb_control_retrigger_status(uhci, urb);
920                         else
921                                 return 0;
922                 }
923         }
924
925 status_stage:
926         td = list_entry(tmp, struct uhci_td, list);
927
928         /* Control status stage */
929         status = td_status(td);
930
931 #ifdef I_HAVE_BUGGY_APC_BACKUPS
932         /* APC BackUPS Pro kludge */
933         /* It tries to send all of the descriptor instead of the amount */
934         /*  we requested */
935         if (status & TD_CTRL_IOC &&     /* IOC is masked out by uhci_status_bits */
936             status & TD_CTRL_ACTIVE &&
937             status & TD_CTRL_NAK)
938                 return 0;
939 #endif
940
941         status = uhci_status_bits(status);
942         if (status & TD_CTRL_ACTIVE)
943                 return -EINPROGRESS;
944
945         if (status)
946                 goto td_error;
947
948         return 0;
949
950 td_error:
951         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
952
953 err:
954         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
955                 /* Some debugging code */
956                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
957                                 __FUNCTION__, status);
958
959                 if (errbuf) {
960                         /* Print the chain for debugging purposes */
961                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
962
963                         lprintk(errbuf);
964                 }
965         }
966
967         return ret;
968 }
969
970 /*
971  * Common submit for bulk and interrupt
972  */
973 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
974 {
975         struct uhci_td *td;
976         struct uhci_qh *qh;
977         unsigned long destination, status;
978         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
979         int len = urb->transfer_buffer_length;
980         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
981         dma_addr_t data = urb->transfer_dma;
982
983         if (len < 0)
984                 return -EINVAL;
985
986         /* The "pipe" thing contains the destination in bits 8--18 */
987         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
988
989         status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
990         if (urb->dev->speed == USB_SPEED_LOW)
991                 status |= TD_CTRL_LS;
992         if (usb_pipein(urb->pipe))
993                 status |= TD_CTRL_SPD;
994
995         /*
996          * Build the DATA TD's
997          */
998         do {    /* Allow zero length packets */
999                 int pktsze = maxsze;
1000
1001                 if (pktsze >= len) {
1002                         pktsze = len;
1003                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1004                                 status &= ~TD_CTRL_SPD;
1005                 }
1006
1007                 td = uhci_alloc_td(uhci, urb->dev);
1008                 if (!td)
1009                         return -ENOMEM;
1010
1011                 uhci_add_td_to_urb(urb, td);
1012                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
1013                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1014                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1015                         data);
1016
1017                 data += pktsze;
1018                 len -= maxsze;
1019
1020                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1021                         usb_pipeout(urb->pipe));
1022         } while (len > 0);
1023
1024         /*
1025          * URB_ZERO_PACKET means adding a 0-length packet, if direction
1026          * is OUT and the transfer_length was an exact multiple of maxsze,
1027          * hence (len = transfer_length - N * maxsze) == 0
1028          * however, if transfer_length == 0, the zero packet was already
1029          * prepared above.
1030          */
1031         if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
1032             !len && urb->transfer_buffer_length) {
1033                 td = uhci_alloc_td(uhci, urb->dev);
1034                 if (!td)
1035                         return -ENOMEM;
1036
1037                 uhci_add_td_to_urb(urb, td);
1038                 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
1039                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1040                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1041                         data);
1042
1043                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1044                         usb_pipeout(urb->pipe));
1045         }
1046
1047         /* Set the flag on the last packet */
1048         if (!(urb->transfer_flags & URB_NO_INTERRUPT))
1049                 td->status |= cpu_to_le32(TD_CTRL_IOC);
1050
1051         qh = uhci_alloc_qh(uhci, urb->dev);
1052         if (!qh)
1053                 return -ENOMEM;
1054
1055         urbp->qh = qh;
1056         qh->urbp = urbp;
1057
1058         /* Always breadth first */
1059         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1060
1061         if (eurb)
1062                 uhci_append_queued_urb(uhci, eurb, urb);
1063         else
1064                 uhci_insert_qh(uhci, skelqh, urb);
1065
1066         return -EINPROGRESS;
1067 }
1068
1069 /*
1070  * Common result for bulk and interrupt
1071  */
1072 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1073 {
1074         struct list_head *tmp, *head;
1075         struct urb_priv *urbp = urb->hcpriv;
1076         struct uhci_td *td;
1077         unsigned int status = 0;
1078         int ret = 0;
1079
1080         urb->actual_length = 0;
1081
1082         head = &urbp->td_list;
1083         tmp = head->next;
1084         while (tmp != head) {
1085                 td = list_entry(tmp, struct uhci_td, list);
1086
1087                 tmp = tmp->next;
1088
1089                 status = uhci_status_bits(td_status(td));
1090                 if (status & TD_CTRL_ACTIVE)
1091                         return -EINPROGRESS;
1092
1093                 urb->actual_length += uhci_actual_length(td_status(td));
1094
1095                 if (status)
1096                         goto td_error;
1097
1098                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1099                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1100                                 ret = -EREMOTEIO;
1101                                 goto err;
1102                         } else
1103                                 return 0;
1104                 }
1105         }
1106
1107         return 0;
1108
1109 td_error:
1110         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1111         if (ret == -EPIPE)
1112                 /* endpoint has stalled - mark it halted */
1113                 usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
1114                                 uhci_packetout(td_token(td)));
1115
1116 err:
1117         /* 
1118          * Enable this chunk of code if you want to see some more debugging.
1119          * But be careful, it has the tendancy to starve out khubd and prevent
1120          * disconnects from happening successfully if you have a slow debug
1121          * log interface (like a serial console.
1122          */
1123 #if 0
1124         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1125                 /* Some debugging code */
1126                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1127                                 __FUNCTION__, status);
1128
1129                 if (errbuf) {
1130                         /* Print the chain for debugging purposes */
1131                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1132
1133                         lprintk(errbuf);
1134                 }
1135         }
1136 #endif
1137         return ret;
1138 }
1139
1140 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1141 {
1142         int ret;
1143
1144         /* Can't have low-speed bulk transfers */
1145         if (urb->dev->speed == USB_SPEED_LOW)
1146                 return -EINVAL;
1147
1148         ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1149         if (ret == -EINPROGRESS)
1150                 uhci_inc_fsbr(uhci, urb);
1151
1152         return ret;
1153 }
1154
1155 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1156 {
1157         /* USB 1.1 interrupt transfers only involve one packet per interval;
1158          * that's the uhci_submit_common() "breadth first" policy.  Drivers
1159          * can submit urbs of any length, but longer ones might need many
1160          * intervals to complete.
1161          */
1162         return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1163 }
1164
1165 /*
1166  * Isochronous transfers
1167  */
1168 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1169 {
1170         struct urb *last_urb = NULL;
1171         struct list_head *tmp, *head;
1172         int ret = 0;
1173
1174         head = &uhci->urb_list;
1175         tmp = head->next;
1176         while (tmp != head) {
1177                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1178                 struct urb *u = up->urb;
1179
1180                 tmp = tmp->next;
1181
1182                 /* look for pending URB's with identical pipe handle */
1183                 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1184                     (u->status == -EINPROGRESS) && (u != urb)) {
1185                         if (!last_urb)
1186                                 *start = u->start_frame;
1187                         last_urb = u;
1188                 }
1189         }
1190
1191         if (last_urb) {
1192                 *end = (last_urb->start_frame + last_urb->number_of_packets *
1193                                 last_urb->interval) & (UHCI_NUMFRAMES-1);
1194                 ret = 0;
1195         } else
1196                 ret = -1;       /* no previous urb found */
1197
1198         return ret;
1199 }
1200
1201 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1202 {
1203         int limits;
1204         unsigned int start = 0, end = 0;
1205
1206         if (urb->number_of_packets > 900)       /* 900? Why? */
1207                 return -EFBIG;
1208
1209         limits = isochronous_find_limits(uhci, urb, &start, &end);
1210
1211         if (urb->transfer_flags & URB_ISO_ASAP) {
1212                 if (limits) {
1213                         int curframe;
1214
1215                         curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
1216                         urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1217                 } else
1218                         urb->start_frame = end;
1219         } else {
1220                 urb->start_frame %= UHCI_NUMFRAMES;
1221                 /* FIXME: Sanity check */
1222         }
1223
1224         return 0;
1225 }
1226
1227 /*
1228  * Isochronous transfers
1229  */
1230 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1231 {
1232         struct uhci_td *td;
1233         int i, ret, frame;
1234         int status, destination;
1235
1236         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1237         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1238
1239         ret = isochronous_find_start(uhci, urb);
1240         if (ret)
1241                 return ret;
1242
1243         frame = urb->start_frame;
1244         for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1245                 if (!urb->iso_frame_desc[i].length)
1246                         continue;
1247
1248                 td = uhci_alloc_td(uhci, urb->dev);
1249                 if (!td)
1250                         return -ENOMEM;
1251
1252                 uhci_add_td_to_urb(urb, td);
1253                 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1254                         urb->transfer_dma + urb->iso_frame_desc[i].offset);
1255
1256                 if (i + 1 >= urb->number_of_packets)
1257                         td->status |= cpu_to_le32(TD_CTRL_IOC);
1258
1259                 uhci_insert_td_frame_list(uhci, td, frame);
1260         }
1261
1262         return -EINPROGRESS;
1263 }
1264
1265 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1266 {
1267         struct list_head *tmp, *head;
1268         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1269         int status;
1270         int i, ret = 0;
1271
1272         urb->actual_length = 0;
1273
1274         i = 0;
1275         head = &urbp->td_list;
1276         tmp = head->next;
1277         while (tmp != head) {
1278                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1279                 int actlength;
1280
1281                 tmp = tmp->next;
1282
1283                 if (td_status(td) & TD_CTRL_ACTIVE)
1284                         return -EINPROGRESS;
1285
1286                 actlength = uhci_actual_length(td_status(td));
1287                 urb->iso_frame_desc[i].actual_length = actlength;
1288                 urb->actual_length += actlength;
1289
1290                 status = uhci_map_status(uhci_status_bits(td_status(td)),
1291                                 usb_pipeout(urb->pipe));
1292                 urb->iso_frame_desc[i].status = status;
1293                 if (status) {
1294                         urb->error_count++;
1295                         ret = status;
1296                 }
1297
1298                 i++;
1299         }
1300
1301         return ret;
1302 }
1303
1304 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1305 {
1306         struct list_head *tmp, *head;
1307
1308         /* We don't match Isoc transfers since they are special */
1309         if (usb_pipeisoc(urb->pipe))
1310                 return NULL;
1311
1312         head = &uhci->urb_list;
1313         tmp = head->next;
1314         while (tmp != head) {
1315                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1316                 struct urb *u = up->urb;
1317
1318                 tmp = tmp->next;
1319
1320                 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1321                         /* For control, ignore the direction */
1322                         if (usb_pipecontrol(urb->pipe) &&
1323                             (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1324                                 return u;
1325                         else if (u->pipe == urb->pipe)
1326                                 return u;
1327                 }
1328         }
1329
1330         return NULL;
1331 }
1332
1333 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1334 {
1335         int ret = -EINVAL;
1336         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1337         unsigned long flags;
1338         struct urb *eurb;
1339         int bustime;
1340
1341         spin_lock_irqsave(&uhci->schedule_lock, flags);
1342
1343         if (urb->status != -EINPROGRESS)        /* URB already unlinked! */
1344                 goto out;
1345
1346         eurb = uhci_find_urb_ep(uhci, urb);
1347
1348         if (!uhci_alloc_urb_priv(uhci, urb)) {
1349                 ret = -ENOMEM;
1350                 goto out;
1351         }
1352
1353         switch (usb_pipetype(urb->pipe)) {
1354         case PIPE_CONTROL:
1355                 ret = uhci_submit_control(uhci, urb, eurb);
1356                 break;
1357         case PIPE_INTERRUPT:
1358                 if (!eurb) {
1359                         bustime = usb_check_bandwidth(urb->dev, urb);
1360                         if (bustime < 0)
1361                                 ret = bustime;
1362                         else {
1363                                 ret = uhci_submit_interrupt(uhci, urb, eurb);
1364                                 if (ret == -EINPROGRESS)
1365                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1366                         }
1367                 } else {        /* inherit from parent */
1368                         urb->bandwidth = eurb->bandwidth;
1369                         ret = uhci_submit_interrupt(uhci, urb, eurb);
1370                 }
1371                 break;
1372         case PIPE_BULK:
1373                 ret = uhci_submit_bulk(uhci, urb, eurb);
1374                 break;
1375         case PIPE_ISOCHRONOUS:
1376                 bustime = usb_check_bandwidth(urb->dev, urb);
1377                 if (bustime < 0) {
1378                         ret = bustime;
1379                         break;
1380                 }
1381
1382                 ret = uhci_submit_isochronous(uhci, urb);
1383                 if (ret == -EINPROGRESS)
1384                         usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1385                 break;
1386         }
1387
1388         if (ret != -EINPROGRESS) {
1389                 /* Submit failed, so delete it from the urb_list */
1390                 struct urb_priv *urbp = urb->hcpriv;
1391
1392                 list_del_init(&urbp->urb_list);
1393                 uhci_destroy_urb_priv(uhci, urb);
1394         } else
1395                 ret = 0;
1396
1397 out:
1398         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1399         return ret;
1400 }
1401
1402 /*
1403  * Return the result of a transfer
1404  */
1405 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1406 {
1407         int ret = -EINPROGRESS;
1408         struct urb_priv *urbp;
1409
1410         spin_lock(&urb->lock);
1411
1412         urbp = (struct urb_priv *)urb->hcpriv;
1413
1414         if (urb->status != -EINPROGRESS)        /* URB already dequeued */
1415                 goto out;
1416
1417         switch (usb_pipetype(urb->pipe)) {
1418         case PIPE_CONTROL:
1419                 ret = uhci_result_control(uhci, urb);
1420                 break;
1421         case PIPE_BULK:
1422         case PIPE_INTERRUPT:
1423                 ret = uhci_result_common(uhci, urb);
1424                 break;
1425         case PIPE_ISOCHRONOUS:
1426                 ret = uhci_result_isochronous(uhci, urb);
1427                 break;
1428         }
1429
1430         if (ret == -EINPROGRESS)
1431                 goto out;
1432         urb->status = ret;
1433
1434         switch (usb_pipetype(urb->pipe)) {
1435         case PIPE_CONTROL:
1436         case PIPE_BULK:
1437         case PIPE_ISOCHRONOUS:
1438                 /* Release bandwidth for Interrupt or Isoc. transfers */
1439                 if (urb->bandwidth)
1440                         usb_release_bandwidth(urb->dev, urb, 1);
1441                 uhci_unlink_generic(uhci, urb);
1442                 break;
1443         case PIPE_INTERRUPT:
1444                 /* Release bandwidth for Interrupt or Isoc. transfers */
1445                 /* Make sure we don't release if we have a queued URB */
1446                 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1447                         usb_release_bandwidth(urb->dev, urb, 0);
1448                 else
1449                         /* bandwidth was passed on to queued URB, */
1450                         /* so don't let usb_unlink_urb() release it */
1451                         urb->bandwidth = 0;
1452                 uhci_unlink_generic(uhci, urb);
1453                 break;
1454         default:
1455                 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1456                                 "for urb %p\n",
1457                                 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1458         }
1459
1460         /* Move it from uhci->urb_list to uhci->complete_list */
1461         uhci_moveto_complete(uhci, urbp);
1462
1463 out:
1464         spin_unlock(&urb->lock);
1465 }
1466
1467 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1468 {
1469         struct list_head *head, *tmp;
1470         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1471         int prevactive = 1;
1472
1473         uhci_dec_fsbr(uhci, urb);       /* Safe since it checks */
1474
1475         /*
1476          * Now we need to find out what the last successful toggle was
1477          * so we can update the local data toggle for the next transfer
1478          *
1479          * There's 3 way's the last successful completed TD is found:
1480          *
1481          * 1) The TD is NOT active and the actual length < expected length
1482          * 2) The TD is NOT active and it's the last TD in the chain
1483          * 3) The TD is active and the previous TD is NOT active
1484          *
1485          * Control and Isochronous ignore the toggle, so this is safe
1486          * for all types
1487          */
1488         head = &urbp->td_list;
1489         tmp = head->next;
1490         while (tmp != head) {
1491                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1492
1493                 tmp = tmp->next;
1494
1495                 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1496                     (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
1497                     tmp == head))
1498                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1499                                 uhci_packetout(td_token(td)),
1500                                 uhci_toggle(td_token(td)) ^ 1);
1501                 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1502                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1503                                 uhci_packetout(td_token(td)),
1504                                 uhci_toggle(td_token(td)));
1505
1506                 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1507         }
1508
1509         uhci_delete_queued_urb(uhci, urb);
1510
1511         /* The interrupt loop will reclaim the QH's */
1512         uhci_remove_qh(uhci, urbp->qh);
1513         urbp->qh = NULL;
1514 }
1515
1516 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1517 {
1518         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1519         unsigned long flags;
1520         struct urb_priv *urbp;
1521
1522         spin_lock_irqsave(&uhci->schedule_lock, flags);
1523         urbp = urb->hcpriv;
1524         if (!urbp)                      /* URB was never linked! */
1525                 goto done;
1526         list_del_init(&urbp->urb_list);
1527
1528         uhci_unlink_generic(uhci, urb);
1529
1530         /* If we're the first, set the next interrupt bit */
1531         if (list_empty(&uhci->urb_remove_list))
1532                 uhci_set_next_interrupt(uhci);
1533         list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1534
1535 done:
1536         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1537         return 0;
1538 }
1539
1540 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1541 {
1542         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1543         struct list_head *head, *tmp;
1544         int count = 0;
1545
1546         uhci_dec_fsbr(uhci, urb);
1547
1548         urbp->fsbr_timeout = 1;
1549
1550         /*
1551          * Ideally we would want to fix qh->element as well, but it's
1552          * read/write by the HC, so that can introduce a race. It's not
1553          * really worth the hassle
1554          */
1555
1556         head = &urbp->td_list;
1557         tmp = head->next;
1558         while (tmp != head) {
1559                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1560
1561                 tmp = tmp->next;
1562
1563                 /*
1564                  * Make sure we don't do the last one (since it'll have the
1565                  * TERM bit set) as well as we skip every so many TD's to
1566                  * make sure it doesn't hog the bandwidth
1567                  */
1568                 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1569                         td->link |= UHCI_PTR_DEPTH;
1570
1571                 count++;
1572         }
1573
1574         return 0;
1575 }
1576
1577 /*
1578  * uhci_get_current_frame_number()
1579  *
1580  * returns the current frame number for a USB bus/controller.
1581  */
1582 static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1583 {
1584         return inw(uhci->io_addr + USBFRNUM);
1585 }
1586
1587 static int init_stall_timer(struct usb_hcd *hcd);
1588
1589 static void stall_callback(unsigned long ptr)
1590 {
1591         struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1592         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1593         struct list_head list, *tmp, *head;
1594         unsigned long flags;
1595
1596         INIT_LIST_HEAD(&list);
1597
1598         spin_lock_irqsave(&uhci->schedule_lock, flags);
1599         head = &uhci->urb_list;
1600         tmp = head->next;
1601         while (tmp != head) {
1602                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1603                 struct urb *u = up->urb;
1604
1605                 tmp = tmp->next;
1606
1607                 spin_lock(&u->lock);
1608
1609                 /* Check if the FSBR timed out */
1610                 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1611                         uhci_fsbr_timeout(uhci, u);
1612
1613                 /* Check if the URB timed out */
1614                 if (u->timeout && u->status == -EINPROGRESS &&
1615                         time_after_eq(jiffies, up->inserttime + u->timeout)) {
1616                         u->status = -ETIMEDOUT;
1617                         list_move_tail(&up->urb_list, &list);
1618                 }
1619
1620                 spin_unlock(&u->lock);
1621         }
1622         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1623
1624         head = &list;
1625         tmp = head->next;
1626         while (tmp != head) {
1627                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1628                 struct urb *u = up->urb;
1629
1630                 tmp = tmp->next;
1631
1632                 uhci_urb_dequeue(hcd, u);
1633         }
1634
1635         /* Really disable FSBR */
1636         if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1637                 uhci->fsbrtimeout = 0;
1638                 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1639         }
1640
1641         /* Poll for and perform state transitions */
1642         hc_state_transitions(uhci);
1643
1644         init_stall_timer(hcd);
1645 }
1646
1647 static int init_stall_timer(struct usb_hcd *hcd)
1648 {
1649         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1650
1651         init_timer(&uhci->stall_timer);
1652         uhci->stall_timer.function = stall_callback;
1653         uhci->stall_timer.data = (unsigned long)hcd;
1654         uhci->stall_timer.expires = jiffies + (HZ / 10);
1655         add_timer(&uhci->stall_timer);
1656
1657         return 0;
1658 }
1659
1660 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1661 {
1662         struct list_head *tmp, *head;
1663
1664         head = &uhci->qh_remove_list;
1665         tmp = head->next;
1666         while (tmp != head) {
1667                 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
1668
1669                 tmp = tmp->next;
1670
1671                 list_del_init(&qh->remove_list);
1672
1673                 uhci_free_qh(uhci, qh);
1674         }
1675 }
1676
1677 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1678 {
1679         struct list_head *tmp, *head;
1680
1681         head = &uhci->td_remove_list;
1682         tmp = head->next;
1683         while (tmp != head) {
1684                 struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list);
1685
1686                 tmp = tmp->next;
1687
1688                 list_del_init(&td->remove_list);
1689
1690                 uhci_free_td(uhci, td);
1691         }
1692 }
1693
1694 static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1695 {
1696         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1697
1698         uhci_destroy_urb_priv(uhci, urb);
1699
1700         spin_unlock(&uhci->schedule_lock);
1701         usb_hcd_giveback_urb(hcd, urb, regs);
1702         spin_lock(&uhci->schedule_lock);
1703 }
1704
1705 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1706 {
1707         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1708         struct list_head *tmp, *head;
1709
1710         head = &uhci->complete_list;
1711         tmp = head->next;
1712         while (tmp != head) {
1713                 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1714                 struct urb *urb = urbp->urb;
1715
1716                 list_del_init(&urbp->urb_list);
1717                 uhci_finish_urb(hcd, urb, regs);
1718
1719                 head = &uhci->complete_list;
1720                 tmp = head->next;
1721         }
1722 }
1723
1724 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1725 {
1726
1727         /* Splice the urb_remove_list onto the end of the complete_list */
1728         list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1729 }
1730
1731 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1732 {
1733         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1734         unsigned int io_addr = uhci->io_addr;
1735         unsigned short status;
1736         struct list_head *tmp, *head;
1737
1738         /*
1739          * Read the interrupt status, and write it back to clear the
1740          * interrupt cause.  Contrary to the UHCI specification, the
1741          * "HC Halted" status bit is persistent: it is RO, not R/WC.
1742          */
1743         status = inw(io_addr + USBSTS);
1744         if (!(status & ~USBSTS_HCH))    /* shared interrupt, not mine */
1745                 return IRQ_NONE;
1746         outw(status, io_addr + USBSTS);         /* Clear it */
1747
1748         if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1749                 if (status & USBSTS_HSE)
1750                         dev_err(uhci_dev(uhci), "host system error, "
1751                                         "PCI problems?\n");
1752                 if (status & USBSTS_HCPE)
1753                         dev_err(uhci_dev(uhci), "host controller process "
1754                                         "error, something bad happened!\n");
1755                 if ((status & USBSTS_HCH) && uhci->state > 0) {
1756                         dev_err(uhci_dev(uhci), "host controller halted, "
1757                                         "very bad!\n");
1758                         /* FIXME: Reset the controller, fix the offending TD */
1759                 }
1760         }
1761
1762         if (status & USBSTS_RD)
1763                 uhci->resume_detect = 1;
1764
1765         spin_lock(&uhci->schedule_lock);
1766
1767         uhci_free_pending_qhs(uhci);
1768         uhci_free_pending_tds(uhci);
1769         uhci_remove_pending_urbps(uhci);
1770
1771         uhci_clear_next_interrupt(uhci);
1772
1773         /* Walk the list of pending URB's to see which ones completed */
1774         head = &uhci->urb_list;
1775         tmp = head->next;
1776         while (tmp != head) {
1777                 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1778                 struct urb *urb = urbp->urb;
1779
1780                 tmp = tmp->next;
1781
1782                 /* Checks the status and does all of the magic necessary */
1783                 uhci_transfer_result(uhci, urb);
1784         }
1785         uhci_finish_completion(hcd, regs);
1786
1787         spin_unlock(&uhci->schedule_lock);
1788
1789         return IRQ_HANDLED;
1790 }
1791
1792 static void reset_hc(struct uhci_hcd *uhci)
1793 {
1794         unsigned int io_addr = uhci->io_addr;
1795
1796         /* Global reset for 50ms */
1797         uhci->state = UHCI_RESET;
1798         outw(USBCMD_GRESET, io_addr + USBCMD);
1799         set_current_state(TASK_UNINTERRUPTIBLE);
1800         schedule_timeout((HZ*50+999) / 1000);
1801         outw(0, io_addr + USBCMD);
1802
1803         /* Another 10ms delay */
1804         set_current_state(TASK_UNINTERRUPTIBLE);
1805         schedule_timeout((HZ*10+999) / 1000);
1806         uhci->resume_detect = 0;
1807 }
1808
1809 static void suspend_hc(struct uhci_hcd *uhci)
1810 {
1811         unsigned int io_addr = uhci->io_addr;
1812
1813         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1814         uhci->state = UHCI_SUSPENDED;
1815         uhci->resume_detect = 0;
1816         outw(USBCMD_EGSM, io_addr + USBCMD);
1817 }
1818
1819 static void wakeup_hc(struct uhci_hcd *uhci)
1820 {
1821         unsigned int io_addr = uhci->io_addr;
1822
1823         switch (uhci->state) {
1824                 case UHCI_SUSPENDED:            /* Start the resume */
1825                         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1826
1827                         /* Global resume for >= 20ms */
1828                         outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1829                         uhci->state = UHCI_RESUMING_1;
1830                         uhci->state_end = jiffies + (20*HZ+999) / 1000;
1831                         break;
1832
1833                 case UHCI_RESUMING_1:           /* End global resume */
1834                         uhci->state = UHCI_RESUMING_2;
1835                         outw(0, io_addr + USBCMD);
1836                         /* Falls through */
1837
1838                 case UHCI_RESUMING_2:           /* Wait for EOP to be sent */
1839                         if (inw(io_addr + USBCMD) & USBCMD_FGR)
1840                                 break;
1841
1842                         /* Run for at least 1 second, and
1843                          * mark it configured with a 64-byte max packet */
1844                         uhci->state = UHCI_RUNNING_GRACE;
1845                         uhci->state_end = jiffies + HZ;
1846                         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1847                                         io_addr + USBCMD);
1848                         break;
1849
1850                 case UHCI_RUNNING_GRACE:        /* Now allowed to suspend */
1851                         uhci->state = UHCI_RUNNING;
1852                         break;
1853
1854                 default:
1855                         break;
1856         }
1857 }
1858
1859 static int ports_active(struct uhci_hcd *uhci)
1860 {
1861         unsigned int io_addr = uhci->io_addr;
1862         int connection = 0;
1863         int i;
1864
1865         for (i = 0; i < uhci->rh_numports; i++)
1866                 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1867
1868         return connection;
1869 }
1870
1871 static int suspend_allowed(struct uhci_hcd *uhci)
1872 {
1873         unsigned int io_addr = uhci->io_addr;
1874         int i;
1875
1876         if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1877                 return 1;
1878
1879         /* Some of Intel's USB controllers have a bug that causes false
1880          * resume indications if any port has an over current condition.
1881          * To prevent problems, we will not allow a global suspend if
1882          * any ports are OC.
1883          *
1884          * Some motherboards using Intel's chipsets (but not using all
1885          * the USB ports) appear to hardwire the over current inputs active
1886          * to disable the USB ports.
1887          */
1888
1889         /* check for over current condition on any port */
1890         for (i = 0; i < uhci->rh_numports; i++) {
1891                 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1892                         return 0;
1893         }
1894
1895         return 1;
1896 }
1897
1898 static void hc_state_transitions(struct uhci_hcd *uhci)
1899 {
1900         switch (uhci->state) {
1901                 case UHCI_RUNNING:
1902
1903                         /* global suspend if nothing connected for 1 second */
1904                         if (!ports_active(uhci) && suspend_allowed(uhci)) {
1905                                 uhci->state = UHCI_SUSPENDING_GRACE;
1906                                 uhci->state_end = jiffies + HZ;
1907                         }
1908                         break;
1909
1910                 case UHCI_SUSPENDING_GRACE:
1911                         if (ports_active(uhci))
1912                                 uhci->state = UHCI_RUNNING;
1913                         else if (time_after_eq(jiffies, uhci->state_end))
1914                                 suspend_hc(uhci);
1915                         break;
1916
1917                 case UHCI_SUSPENDED:
1918
1919                         /* wakeup if requested by a device */
1920                         if (uhci->resume_detect)
1921                                 wakeup_hc(uhci);
1922                         break;
1923
1924                 case UHCI_RESUMING_1:
1925                 case UHCI_RESUMING_2:
1926                 case UHCI_RUNNING_GRACE:
1927                         if (time_after_eq(jiffies, uhci->state_end))
1928                                 wakeup_hc(uhci);
1929                         break;
1930
1931                 default:
1932                         break;
1933         }
1934 }
1935
1936 static void start_hc(struct uhci_hcd *uhci)
1937 {
1938         unsigned int io_addr = uhci->io_addr;
1939         int timeout = 1000;
1940
1941         /*
1942          * Reset the HC - this will force us to get a
1943          * new notification of any already connected
1944          * ports due to the virtual disconnect that it
1945          * implies.
1946          */
1947         outw(USBCMD_HCRESET, io_addr + USBCMD);
1948         while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1949                 if (!--timeout) {
1950                         dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1951                         break;
1952                 }
1953         }
1954
1955         /* Turn on all interrupts */
1956         outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1957                 io_addr + USBINTR);
1958
1959         /* Start at frame 0 */
1960         outw(0, io_addr + USBFRNUM);
1961         outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
1962
1963         /* Run and mark it configured with a 64-byte max packet */
1964         uhci->state = UHCI_RUNNING_GRACE;
1965         uhci->state_end = jiffies + HZ;
1966         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
1967
1968         uhci->hcd.state = USB_STATE_RUNNING;
1969 }
1970
1971 /*
1972  * De-allocate all resources..
1973  */
1974 static void release_uhci(struct uhci_hcd *uhci)
1975 {
1976         int i;
1977
1978         for (i = 0; i < UHCI_NUM_SKELQH; i++)
1979                 if (uhci->skelqh[i]) {
1980                         uhci_free_qh(uhci, uhci->skelqh[i]);
1981                         uhci->skelqh[i] = NULL;
1982                 }
1983
1984         if (uhci->term_td) {
1985                 uhci_free_td(uhci, uhci->term_td);
1986                 uhci->term_td = NULL;
1987         }
1988
1989         if (uhci->qh_pool) {
1990                 dma_pool_destroy(uhci->qh_pool);
1991                 uhci->qh_pool = NULL;
1992         }
1993
1994         if (uhci->td_pool) {
1995                 dma_pool_destroy(uhci->td_pool);
1996                 uhci->td_pool = NULL;
1997         }
1998
1999         if (uhci->fl) {
2000                 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2001                                 uhci->fl, uhci->fl->dma_handle);
2002                 uhci->fl = NULL;
2003         }
2004
2005 #ifdef CONFIG_PROC_FS
2006         if (uhci->proc_entry) {
2007                 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
2008                 uhci->proc_entry = NULL;
2009         }
2010 #endif
2011 }
2012
2013 static int uhci_reset(struct usb_hcd *hcd)
2014 {
2015         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2016
2017         uhci->io_addr = (unsigned long) hcd->regs;
2018
2019         /* Turn off all interrupts */
2020         outw(0, uhci->io_addr + USBINTR);
2021
2022         /* Maybe kick BIOS off this hardware.  Then reset, so we won't get
2023          * interrupts from any previous setup.
2024          */
2025         reset_hc(uhci);
2026         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2027                         USBLEGSUP_DEFAULT);
2028         return 0;
2029 }
2030
2031 /*
2032  * Allocate a frame list, and then setup the skeleton
2033  *
2034  * The hardware doesn't really know any difference
2035  * in the queues, but the order does matter for the
2036  * protocols higher up. The order is:
2037  *
2038  *  - any isochronous events handled before any
2039  *    of the queues. We don't do that here, because
2040  *    we'll create the actual TD entries on demand.
2041  *  - The first queue is the interrupt queue.
2042  *  - The second queue is the control queue, split into low- and full-speed
2043  *  - The third queue is bulk queue.
2044  *  - The fourth queue is the bandwidth reclamation queue, which loops back
2045  *    to the full-speed control queue.
2046  */
2047 static int uhci_start(struct usb_hcd *hcd)
2048 {
2049         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2050         int retval = -EBUSY;
2051         int i, port;
2052         unsigned io_size;
2053         dma_addr_t dma_handle;
2054         struct usb_device *udev;
2055 #ifdef CONFIG_PROC_FS
2056         struct proc_dir_entry *ent;
2057 #endif
2058
2059         io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
2060
2061 #ifdef CONFIG_PROC_FS
2062         ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2063         if (!ent) {
2064                 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
2065                 retval = -ENOMEM;
2066                 goto err_create_proc_entry;
2067         }
2068
2069         ent->data = uhci;
2070         ent->proc_fops = &uhci_proc_operations;
2071         ent->size = 0;
2072         uhci->proc_entry = ent;
2073 #endif
2074
2075         uhci->fsbr = 0;
2076         uhci->fsbrtimeout = 0;
2077
2078         spin_lock_init(&uhci->schedule_lock);
2079         INIT_LIST_HEAD(&uhci->qh_remove_list);
2080
2081         INIT_LIST_HEAD(&uhci->td_remove_list);
2082
2083         INIT_LIST_HEAD(&uhci->urb_remove_list);
2084
2085         INIT_LIST_HEAD(&uhci->urb_list);
2086
2087         INIT_LIST_HEAD(&uhci->complete_list);
2088
2089         uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2090                         &dma_handle, 0);
2091         if (!uhci->fl) {
2092                 dev_err(uhci_dev(uhci), "unable to allocate "
2093                                 "consistent memory for frame list\n");
2094                 goto err_alloc_fl;
2095         }
2096
2097         memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2098
2099         uhci->fl->dma_handle = dma_handle;
2100
2101         uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2102                         sizeof(struct uhci_td), 16, 0);
2103         if (!uhci->td_pool) {
2104                 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2105                 goto err_create_td_pool;
2106         }
2107
2108         uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2109                         sizeof(struct uhci_qh), 16, 0);
2110         if (!uhci->qh_pool) {
2111                 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2112                 goto err_create_qh_pool;
2113         }
2114
2115         /* Initialize the root hub */
2116
2117         /* UHCI specs says devices must have 2 ports, but goes on to say */
2118         /*  they may have more but give no way to determine how many they */
2119         /*  have. However, according to the UHCI spec, Bit 7 is always set */
2120         /*  to 1. So we try to use this to our advantage */
2121         for (port = 0; port < (io_size - 0x10) / 2; port++) {
2122                 unsigned int portstatus;
2123
2124                 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2125                 if (!(portstatus & 0x0080))
2126                         break;
2127         }
2128         if (debug)
2129                 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2130
2131         /* This is experimental so anything less than 2 or greater than 8 is */
2132         /*  something weird and we'll ignore it */
2133         if (port < 2 || port > UHCI_RH_MAXCHILD) {
2134                 dev_info(uhci_dev(uhci), "port count misdetected? "
2135                                 "forcing to 2 ports\n");
2136                 port = 2;
2137         }
2138
2139         uhci->rh_numports = port;
2140
2141         hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self, 0);
2142         if (!udev) {
2143                 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2144                 goto err_alloc_root_hub;
2145         }
2146
2147         uhci->term_td = uhci_alloc_td(uhci, udev);
2148         if (!uhci->term_td) {
2149                 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2150                 goto err_alloc_term_td;
2151         }
2152
2153         for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2154                 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2155                 if (!uhci->skelqh[i]) {
2156                         dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2157                         goto err_alloc_skelqh;
2158                 }
2159         }
2160
2161         /*
2162          * 8 Interrupt queues; link all higher int queues to int1,
2163          * then link int1 to control and control to bulk
2164          */
2165         uhci->skel_int128_qh->link =
2166                         uhci->skel_int64_qh->link =
2167                         uhci->skel_int32_qh->link =
2168                         uhci->skel_int16_qh->link =
2169                         uhci->skel_int8_qh->link =
2170                         uhci->skel_int4_qh->link =
2171                         uhci->skel_int2_qh->link =
2172                         cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2173         uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2174
2175         uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2176         uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2177         uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2178
2179         /* This dummy TD is to work around a bug in Intel PIIX controllers */
2180         uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2181                 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2182         uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2183
2184         uhci->skel_term_qh->link = UHCI_PTR_TERM;
2185         uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2186
2187         /*
2188          * Fill the frame list: make all entries point to the proper
2189          * interrupt queue.
2190          *
2191          * The interrupt queues will be interleaved as evenly as possible.
2192          * There's not much to be done about period-1 interrupts; they have
2193          * to occur in every frame.  But we can schedule period-2 interrupts
2194          * in odd-numbered frames, period-4 interrupts in frames congruent
2195          * to 2 (mod 4), and so on.  This way each frame only has two
2196          * interrupt QHs, which will help spread out bandwidth utilization.
2197          */
2198         for (i = 0; i < UHCI_NUMFRAMES; i++) {
2199                 int irq;
2200
2201                 /*
2202                  * ffs (Find First bit Set) does exactly what we need:
2203                  * 1,3,5,...  => ffs = 0 => use skel_int2_qh = skelqh[6],
2204                  * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2205                  * ffs > 6 => not on any high-period queue, so use
2206                  *      skel_int1_qh = skelqh[7].
2207                  * Add UHCI_NUMFRAMES to insure at least one bit is set.
2208                  */
2209                 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2210                 if (irq < 0)
2211                         irq = 7;
2212
2213                 /* Only place we don't use the frame list routines */
2214                 uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2215         }
2216
2217         start_hc(uhci);
2218
2219         init_stall_timer(hcd);
2220
2221         udev->speed = USB_SPEED_FULL;
2222
2223         if (usb_register_root_hub(udev, uhci_dev(uhci)) != 0) {
2224                 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2225                 retval = -ENOMEM;
2226                 goto err_start_root_hub;
2227         }
2228
2229         return 0;
2230
2231 /*
2232  * error exits:
2233  */
2234 err_start_root_hub:
2235         reset_hc(uhci);
2236
2237         del_timer_sync(&uhci->stall_timer);
2238
2239 err_alloc_skelqh:
2240         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2241                 if (uhci->skelqh[i]) {
2242                         uhci_free_qh(uhci, uhci->skelqh[i]);
2243                         uhci->skelqh[i] = NULL;
2244                 }
2245
2246         uhci_free_td(uhci, uhci->term_td);
2247         uhci->term_td = NULL;
2248
2249 err_alloc_term_td:
2250         usb_put_dev(udev);
2251         hcd->self.root_hub = NULL;
2252
2253 err_alloc_root_hub:
2254         dma_pool_destroy(uhci->qh_pool);
2255         uhci->qh_pool = NULL;
2256
2257 err_create_qh_pool:
2258         dma_pool_destroy(uhci->td_pool);
2259         uhci->td_pool = NULL;
2260
2261 err_create_td_pool:
2262         dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2263                         uhci->fl, uhci->fl->dma_handle);
2264         uhci->fl = NULL;
2265
2266 err_alloc_fl:
2267 #ifdef CONFIG_PROC_FS
2268         remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2269         uhci->proc_entry = NULL;
2270
2271 err_create_proc_entry:
2272 #endif
2273
2274         return retval;
2275 }
2276
2277 static void uhci_stop(struct usb_hcd *hcd)
2278 {
2279         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2280
2281         del_timer_sync(&uhci->stall_timer);
2282
2283         /*
2284          * At this point, we're guaranteed that no new connects can be made
2285          * to this bus since there are no more parents
2286          */
2287
2288         reset_hc(uhci);
2289
2290         spin_lock_irq(&uhci->schedule_lock);
2291         uhci_free_pending_qhs(uhci);
2292         uhci_free_pending_tds(uhci);
2293         uhci_remove_pending_urbps(uhci);
2294         uhci_finish_completion(hcd, NULL);
2295
2296         uhci_free_pending_qhs(uhci);
2297         uhci_free_pending_tds(uhci);
2298         spin_unlock_irq(&uhci->schedule_lock);
2299         
2300         release_uhci(uhci);
2301 }
2302
2303 #ifdef CONFIG_PM
2304 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2305 {
2306         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2307
2308         /* Don't try to suspend broken motherboards, reset instead */
2309         if (suspend_allowed(uhci)) {
2310                 suspend_hc(uhci);
2311                 uhci->saved_framenumber =
2312                                 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2313         } else
2314                 reset_hc(uhci);
2315         return 0;
2316 }
2317
2318 static int uhci_resume(struct usb_hcd *hcd)
2319 {
2320         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2321
2322         pci_set_master(to_pci_dev(uhci_dev(uhci)));
2323
2324         if (uhci->state == UHCI_SUSPENDED) {
2325
2326                 /*
2327                  * Some systems don't maintain the UHCI register values
2328                  * during a PM suspend/resume cycle, so reinitialize
2329                  * the Frame Number, the Framelist Base Address, and the
2330                  * Interrupt Enable registers.
2331                  */
2332                 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2333                 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2334                 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2335                                 USBINTR_SP, uhci->io_addr + USBINTR);
2336                 uhci->resume_detect = 1;
2337         } else {
2338                 reset_hc(uhci);
2339                 start_hc(uhci);
2340         }
2341         uhci->hcd.state = USB_STATE_RUNNING;
2342         return 0;
2343 }
2344 #endif
2345
2346 static struct usb_hcd *uhci_hcd_alloc(void)
2347 {
2348         struct uhci_hcd *uhci;
2349
2350         uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2351         if (!uhci)
2352                 return NULL;
2353
2354         memset(uhci, 0, sizeof(*uhci));
2355         uhci->hcd.product_desc = "UHCI Host Controller";
2356         return &uhci->hcd;
2357 }
2358
2359 static void uhci_hcd_free(struct usb_hcd *hcd)
2360 {
2361         kfree(hcd_to_uhci(hcd));
2362 }
2363
2364 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2365 {
2366         return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2367 }
2368
2369 static const char hcd_name[] = "uhci_hcd";
2370
2371 static const struct hc_driver uhci_driver = {
2372         .description =          hcd_name,
2373
2374         /* Generic hardware linkage */
2375         .irq =                  uhci_irq,
2376         .flags =                HCD_USB11,
2377
2378         /* Basic lifecycle operations */
2379         .reset =                uhci_reset,
2380         .start =                uhci_start,
2381 #ifdef CONFIG_PM
2382         .suspend =              uhci_suspend,
2383         .resume =               uhci_resume,
2384 #endif
2385         .stop =                 uhci_stop,
2386
2387         .hcd_alloc =            uhci_hcd_alloc,
2388         .hcd_free =             uhci_hcd_free,
2389
2390         .urb_enqueue =          uhci_urb_enqueue,
2391         .urb_dequeue =          uhci_urb_dequeue,
2392
2393         .get_frame_number =     uhci_hcd_get_frame_number,
2394
2395         .hub_status_data =      uhci_hub_status_data,
2396         .hub_control =          uhci_hub_control,
2397 };
2398
2399 static const struct pci_device_id uhci_pci_ids[] = { {
2400         /* handle any USB UHCI controller */
2401         PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2402         .driver_data =  (unsigned long) &uhci_driver,
2403         }, { /* end: all zeroes */ }
2404 };
2405
2406 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2407
2408 static struct pci_driver uhci_pci_driver = {
2409         .name =         (char *)hcd_name,
2410         .id_table =     uhci_pci_ids,
2411
2412         .probe =        usb_hcd_pci_probe,
2413         .remove =       usb_hcd_pci_remove,
2414
2415 #ifdef  CONFIG_PM
2416         .suspend =      usb_hcd_pci_suspend,
2417         .resume =       usb_hcd_pci_resume,
2418 #endif  /* PM */
2419 };
2420  
2421 static int __init uhci_hcd_init(void)
2422 {
2423         int retval = -ENOMEM;
2424
2425         printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2426
2427         if (usb_disabled())
2428                 return -ENODEV;
2429
2430         if (debug) {
2431                 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2432                 if (!errbuf)
2433                         goto errbuf_failed;
2434         }
2435
2436 #ifdef CONFIG_PROC_FS
2437         uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
2438         if (!uhci_proc_root)
2439                 goto proc_failed;
2440 #endif
2441
2442         uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2443                 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2444         if (!uhci_up_cachep)
2445                 goto up_failed;
2446
2447         retval = pci_module_init(&uhci_pci_driver);
2448         if (retval)
2449                 goto init_failed;
2450
2451         return 0;
2452
2453 init_failed:
2454         if (kmem_cache_destroy(uhci_up_cachep))
2455                 warn("not all urb_priv's were freed!");
2456
2457 up_failed:
2458
2459 #ifdef CONFIG_PROC_FS
2460         remove_proc_entry("driver/uhci", 0);
2461
2462 proc_failed:
2463 #endif
2464         if (errbuf)
2465                 kfree(errbuf);
2466
2467 errbuf_failed:
2468
2469         return retval;
2470 }
2471
2472 static void __exit uhci_hcd_cleanup(void) 
2473 {
2474         pci_unregister_driver(&uhci_pci_driver);
2475         
2476         if (kmem_cache_destroy(uhci_up_cachep))
2477                 warn("not all urb_priv's were freed!");
2478
2479 #ifdef CONFIG_PROC_FS
2480         remove_proc_entry("driver/uhci", 0);
2481 #endif
2482
2483         if (errbuf)
2484                 kfree(errbuf);
2485 }
2486
2487 module_init(uhci_hcd_init);
2488 module_exit(uhci_hcd_cleanup);
2489
2490 MODULE_AUTHOR(DRIVER_AUTHOR);
2491 MODULE_DESCRIPTION(DRIVER_DESC);
2492 MODULE_LICENSE("GPL");