patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / usb / host / uhci-hcd.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17  *
18  * Intel documents this fairly well, and as far as I know there
19  * are no royalties or anything like that, but even so there are
20  * people who decided that they want to do the same thing in a
21  * completely different way.
22  *
23  * WARNING! The USB documentation is downright evil. Most of it
24  * is just crap, written by a committee. You're better off ignoring
25  * most of it, the important stuff is:
26  *  - the low-level protocol (fairly simple but lots of small details)
27  *  - working around the horridness of the rest
28  */
29
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
32 #define DEBUG
33 #else
34 #undef DEBUG
35 #endif
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
50 #include <linux/pm.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54
55 #include <asm/bitops.h>
56 #include <asm/uaccess.h>
57 #include <asm/io.h>
58 #include <asm/irq.h>
59 #include <asm/system.h>
60
61 #include "../core/hcd.h"
62 #include "uhci-hcd.h"
63
64 /*
65  * Version Information
66  */
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
70 Alan Stern"
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
72
73 /*
74  * debug = 0, no debugging messages
75  * debug = 1, dump failed URB's except for stalls
76  * debug = 2, dump all failed URB's (including stalls)
77  *            show all queues in /proc/driver/uhci/[pci_addr]
78  * debug = 3, show all TD's in URB's when dumping
79  */
80 #ifdef DEBUG
81 static int debug = 1;
82 #else
83 static int debug = 0;
84 #endif
85 MODULE_PARM(debug, "i");
86 MODULE_PARM_DESC(debug, "Debug level");
87 static char *errbuf;
88 #define ERRBUF_LEN    (32 * 1024)
89
90 #include "uhci-hub.c"
91 #include "uhci-debug.c"
92
93 static kmem_cache_t *uhci_up_cachep;    /* urb_priv */
94
95 static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
102
103 static void hc_state_transitions(struct uhci_hcd *uhci);
104
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT    (HZ / 20)       /* 50 ms */
107 #define FSBR_DELAY      (HZ / 20)       /* 50 ms */
108
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
113
114 /*
115  * Technically, updating td->status here is a race, but it's not really a
116  * problem. The worst that can happen is that we set the IOC bit again
117  * generating a spurious interrupt. We could fix this by creating another
118  * QH and leaving the IOC bit always set, but then we would have to play
119  * games with the FSBR code to make sure we get the correct order in all
120  * the cases. I don't think it's worth the effort
121  */
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
123 {
124         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
125 }
126
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
128 {
129         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
130 }
131
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci, 
133                                         struct urb_priv *urbp)
134 {
135         list_move_tail(&urbp->urb_list, &uhci->complete_list);
136 }
137
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
139 {
140         dma_addr_t dma_handle;
141         struct uhci_td *td;
142
143         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
144         if (!td)
145                 return NULL;
146
147         td->dma_handle = dma_handle;
148
149         td->link = UHCI_PTR_TERM;
150         td->buffer = 0;
151
152         td->frame = -1;
153         td->dev = dev;
154
155         INIT_LIST_HEAD(&td->list);
156         INIT_LIST_HEAD(&td->remove_list);
157         INIT_LIST_HEAD(&td->fl_list);
158
159         usb_get_dev(dev);
160
161         return td;
162 }
163
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165                 u32 token, u32 buffer)
166 {
167         td->status = cpu_to_le32(status);
168         td->token = cpu_to_le32(token);
169         td->buffer = cpu_to_le32(buffer);
170 }
171
172 /*
173  * We insert Isochronous URB's directly into the frame list at the beginning
174  */
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
176 {
177         framenum %= UHCI_NUMFRAMES;
178
179         td->frame = framenum;
180
181         /* Is there a TD already mapped there? */
182         if (uhci->fl->frame_cpu[framenum]) {
183                 struct uhci_td *ftd, *ltd;
184
185                 ftd = uhci->fl->frame_cpu[framenum];
186                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
187
188                 list_add_tail(&td->fl_list, &ftd->fl_list);
189
190                 td->link = ltd->link;
191                 wmb();
192                 ltd->link = cpu_to_le32(td->dma_handle);
193         } else {
194                 td->link = uhci->fl->frame[framenum];
195                 wmb();
196                 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197                 uhci->fl->frame_cpu[framenum] = td;
198         }
199 }
200
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
202 {
203         /* If it's not inserted, don't remove it */
204         if (td->frame == -1 && list_empty(&td->fl_list))
205                 return;
206
207         if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208                 if (list_empty(&td->fl_list)) {
209                         uhci->fl->frame[td->frame] = td->link;
210                         uhci->fl->frame_cpu[td->frame] = NULL;
211                 } else {
212                         struct uhci_td *ntd;
213
214                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215                         uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216                         uhci->fl->frame_cpu[td->frame] = ntd;
217                 }
218         } else {
219                 struct uhci_td *ptd;
220
221                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222                 ptd->link = td->link;
223         }
224
225         wmb();
226         td->link = UHCI_PTR_TERM;
227
228         list_del_init(&td->fl_list);
229         td->frame = -1;
230 }
231
232 /*
233  * Inserts a td into qh list at the top.
234  */
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, u32 breadth)
236 {
237         struct list_head *tmp, *head;
238         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
239         struct uhci_td *td, *ptd;
240
241         if (list_empty(&urbp->td_list))
242                 return;
243
244         head = &urbp->td_list;
245         tmp = head->next;
246
247         /* Ordering isn't important here yet since the QH hasn't been */
248         /*  inserted into the schedule yet */
249         td = list_entry(tmp, struct uhci_td, list);
250
251         /* Add the first TD to the QH element pointer */
252         qh->element = cpu_to_le32(td->dma_handle) | breadth;
253
254         ptd = td;
255
256         /* Then link the rest of the TD's */
257         tmp = tmp->next;
258         while (tmp != head) {
259                 td = list_entry(tmp, struct uhci_td, list);
260
261                 tmp = tmp->next;
262
263                 ptd->link = cpu_to_le32(td->dma_handle) | breadth;
264
265                 ptd = td;
266         }
267
268         ptd->link = UHCI_PTR_TERM;
269 }
270
271 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
272 {
273         if (!list_empty(&td->list))
274                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
275         if (!list_empty(&td->remove_list))
276                 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
277         if (!list_empty(&td->fl_list))
278                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
279
280         if (td->dev)
281                 usb_put_dev(td->dev);
282
283         dma_pool_free(uhci->td_pool, td, td->dma_handle);
284 }
285
286 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
287 {
288         dma_addr_t dma_handle;
289         struct uhci_qh *qh;
290
291         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
292         if (!qh)
293                 return NULL;
294
295         qh->dma_handle = dma_handle;
296
297         qh->element = UHCI_PTR_TERM;
298         qh->link = UHCI_PTR_TERM;
299
300         qh->dev = dev;
301         qh->urbp = NULL;
302
303         INIT_LIST_HEAD(&qh->list);
304         INIT_LIST_HEAD(&qh->remove_list);
305
306         usb_get_dev(dev);
307
308         return qh;
309 }
310
311 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
312 {
313         if (!list_empty(&qh->list))
314                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
315         if (!list_empty(&qh->remove_list))
316                 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
317
318         if (qh->dev)
319                 usb_put_dev(qh->dev);
320
321         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
322 }
323
324 /*
325  * Append this urb's qh after the last qh in skelqh->list
326  *
327  * Note that urb_priv.queue_list doesn't have a separate queue head;
328  * it's a ring with every element "live".
329  */
330 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
331 {
332         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
333         struct list_head *tmp;
334         struct uhci_qh *lqh;
335
336         /* Grab the last QH */
337         lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
338
339         /* Point to the next skelqh */
340         urbp->qh->link = lqh->link;
341         wmb();                          /* Ordering is important */
342
343         /*
344          * Patch QHs for previous endpoint's queued URBs?  HC goes
345          * here next, not to the next skelqh it now points to.
346          *
347          *    lqh --> td ... --> qh ... --> td --> qh ... --> td
348          *     |                 |                 |
349          *     v                 v                 v
350          *     +<----------------+-----------------+
351          *     v
352          *    newqh --> td ... --> td
353          *     |
354          *     v
355          *    ...
356          *
357          * The HC could see (and use!) any of these as we write them.
358          */
359         lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
360         if (lqh->urbp) {
361                 list_for_each (tmp, &lqh->urbp->queue_list) {
362                         struct urb_priv *turbp =
363                                 list_entry(tmp, struct urb_priv, queue_list);
364
365                         turbp->qh->link = lqh->link;
366                 }
367         }
368
369         list_add_tail(&urbp->qh->list, &skelqh->list);
370 }
371
372 /*
373  * Start removal of QH from schedule; it finishes next frame.
374  * TDs should be unlinked before this is called.
375  */
376 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
377 {
378         struct uhci_qh *pqh;
379         u32 newlink;
380         unsigned int age;
381
382         if (!qh)
383                 return;
384
385         /*
386          * Only go through the hoops if it's actually linked in
387          */
388         if (!list_empty(&qh->list)) {
389
390                 /* If our queue is nonempty, make the next URB the head */
391                 if (!list_empty(&qh->urbp->queue_list)) {
392                         struct urb_priv *nurbp;
393
394                         nurbp = list_entry(qh->urbp->queue_list.next,
395                                         struct urb_priv, queue_list);
396                         nurbp->queued = 0;
397                         list_add(&nurbp->qh->list, &qh->list);
398                         newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
399                 } else
400                         newlink = qh->link;
401
402                 /* Fix up the previous QH's queue to link to either
403                  * the new head of this queue or the start of the
404                  * next endpoint's queue. */
405                 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
406                 pqh->link = newlink;
407                 if (pqh->urbp) {
408                         struct list_head *head, *tmp;
409
410                         head = &pqh->urbp->queue_list;
411                         tmp = head->next;
412                         while (head != tmp) {
413                                 struct urb_priv *turbp =
414                                         list_entry(tmp, struct urb_priv, queue_list);
415
416                                 tmp = tmp->next;
417
418                                 turbp->qh->link = newlink;
419                         }
420                 }
421                 wmb();
422
423                 /* Leave qh->link in case the HC is on the QH now, it will */
424                 /* continue the rest of the schedule */
425                 qh->element = UHCI_PTR_TERM;
426
427                 list_del_init(&qh->list);
428         }
429
430         list_del_init(&qh->urbp->queue_list);
431         qh->urbp = NULL;
432
433         age = uhci_get_current_frame_number(uhci);
434         if (age != uhci->qh_remove_age) {
435                 uhci_free_pending_qhs(uhci);
436                 uhci->qh_remove_age = age;
437         }
438
439         /* Check to see if the remove list is empty. Set the IOC bit */
440         /* to force an interrupt so we can remove the QH */
441         if (list_empty(&uhci->qh_remove_list))
442                 uhci_set_next_interrupt(uhci);
443
444         list_add(&qh->remove_list, &uhci->qh_remove_list);
445 }
446
447 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
448 {
449         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
450         struct list_head *head, *tmp;
451
452         head = &urbp->td_list;
453         tmp = head->next;
454         while (head != tmp) {
455                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
456
457                 tmp = tmp->next;
458
459                 if (toggle)
460                         td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
461                 else
462                         td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
463
464
465                 toggle ^= 1;
466         }
467
468         return toggle;
469 }
470
471 /* This function will append one URB's QH to another URB's QH. This is for */
472 /* queuing interrupt, control or bulk transfers */
473 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
474 {
475         struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
476         struct list_head *tmp;
477         struct uhci_td *lltd;
478
479         eurbp = eurb->hcpriv;
480         urbp = urb->hcpriv;
481
482         /* Find the first URB in the queue */
483         if (eurbp->queued) {
484                 struct list_head *head = &eurbp->queue_list;
485
486                 tmp = head->next;
487                 while (tmp != head) {
488                         struct urb_priv *turbp =
489                                 list_entry(tmp, struct urb_priv, queue_list);
490
491                         if (!turbp->queued)
492                                 break;
493
494                         tmp = tmp->next;
495                 }
496         } else
497                 tmp = &eurbp->queue_list;
498
499         furbp = list_entry(tmp, struct urb_priv, queue_list);
500         lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
501
502         lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
503
504         /* Control transfers always start with toggle 0 */
505         if (!usb_pipecontrol(urb->pipe))
506                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
507                                 usb_pipeout(urb->pipe),
508                                 uhci_fixup_toggle(urb,
509                                         uhci_toggle(td_token(lltd)) ^ 1));
510
511         /* All qh's in the queue need to link to the next queue */
512         urbp->qh->link = eurbp->qh->link;
513
514         wmb();                  /* Make sure we flush everything */
515
516         lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
517
518         list_add_tail(&urbp->queue_list, &furbp->queue_list);
519
520         urbp->queued = 1;
521 }
522
523 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
524 {
525         struct urb_priv *urbp, *nurbp;
526         struct list_head *head, *tmp;
527         struct urb_priv *purbp;
528         struct uhci_td *pltd;
529         unsigned int toggle;
530
531         urbp = urb->hcpriv;
532
533         if (list_empty(&urbp->queue_list))
534                 return;
535
536         nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
537
538         /*
539          * Fix up the toggle for the following URBs in the queue.
540          * Only needed for bulk and interrupt: control and isochronous
541          * endpoints don't propagate toggles between messages.
542          */
543         if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
544                 if (!urbp->queued)
545                         /* We just set the toggle in uhci_unlink_generic */
546                         toggle = usb_gettoggle(urb->dev,
547                                         usb_pipeendpoint(urb->pipe),
548                                         usb_pipeout(urb->pipe));
549                 else {
550                         /* If we're in the middle of the queue, grab the */
551                         /* toggle from the TD previous to us */
552                         purbp = list_entry(urbp->queue_list.prev,
553                                         struct urb_priv, queue_list);
554                         pltd = list_entry(purbp->td_list.prev,
555                                         struct uhci_td, list);
556                         toggle = uhci_toggle(td_token(pltd)) ^ 1;
557                 }
558
559                 head = &urbp->queue_list;
560                 tmp = head->next;
561                 while (head != tmp) {
562                         struct urb_priv *turbp;
563
564                         turbp = list_entry(tmp, struct urb_priv, queue_list);
565                         tmp = tmp->next;
566
567                         if (!turbp->queued)
568                                 break;
569                         toggle = uhci_fixup_toggle(turbp->urb, toggle);
570                 }
571
572                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
573                                 usb_pipeout(urb->pipe), toggle);
574         }
575
576         if (urbp->queued) {
577                 /* We're somewhere in the middle (or end).  The case where
578                  * we're at the head is handled in uhci_remove_qh(). */
579                 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
580                                 queue_list);
581
582                 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
583                 if (nurbp->queued)
584                         pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
585                 else
586                         /* The next URB happens to be the beginning, so */
587                         /*  we're the last, end the chain */
588                         pltd->link = UHCI_PTR_TERM;
589         }
590
591         /* urbp->queue_list is handled in uhci_remove_qh() */
592 }
593
594 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
595 {
596         struct urb_priv *urbp;
597
598         urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
599         if (!urbp)
600                 return NULL;
601
602         memset((void *)urbp, 0, sizeof(*urbp));
603
604         urbp->inserttime = jiffies;
605         urbp->fsbrtime = jiffies;
606         urbp->urb = urb;
607         
608         INIT_LIST_HEAD(&urbp->td_list);
609         INIT_LIST_HEAD(&urbp->queue_list);
610         INIT_LIST_HEAD(&urbp->urb_list);
611
612         list_add_tail(&urbp->urb_list, &uhci->urb_list);
613
614         urb->hcpriv = urbp;
615
616         return urbp;
617 }
618
619 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
620 {
621         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
622
623         td->urb = urb;
624
625         list_add_tail(&td->list, &urbp->td_list);
626 }
627
628 static void uhci_remove_td_from_urb(struct uhci_td *td)
629 {
630         if (list_empty(&td->list))
631                 return;
632
633         list_del_init(&td->list);
634
635         td->urb = NULL;
636 }
637
638 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
639 {
640         struct list_head *head, *tmp;
641         struct urb_priv *urbp;
642         unsigned int age;
643
644         urbp = (struct urb_priv *)urb->hcpriv;
645         if (!urbp)
646                 return;
647
648         if (!list_empty(&urbp->urb_list))
649                 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
650                                 "or uhci->remove_list!\n", urb);
651
652         age = uhci_get_current_frame_number(uhci);
653         if (age != uhci->td_remove_age) {
654                 uhci_free_pending_tds(uhci);
655                 uhci->td_remove_age = age;
656         }
657
658         /* Check to see if the remove list is empty. Set the IOC bit */
659         /* to force an interrupt so we can remove the TD's*/
660         if (list_empty(&uhci->td_remove_list))
661                 uhci_set_next_interrupt(uhci);
662
663         head = &urbp->td_list;
664         tmp = head->next;
665         while (tmp != head) {
666                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
667
668                 tmp = tmp->next;
669
670                 uhci_remove_td_from_urb(td);
671                 uhci_remove_td(uhci, td);
672                 list_add(&td->remove_list, &uhci->td_remove_list);
673         }
674
675         urb->hcpriv = NULL;
676         kmem_cache_free(uhci_up_cachep, urbp);
677 }
678
679 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
680 {
681         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
682
683         if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
684                 urbp->fsbr = 1;
685                 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
686                         uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
687         }
688 }
689
690 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
691 {
692         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
693
694         if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
695                 urbp->fsbr = 0;
696                 if (!--uhci->fsbr)
697                         uhci->fsbrtimeout = jiffies + FSBR_DELAY;
698         }
699 }
700
701 /*
702  * Map status to standard result codes
703  *
704  * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
705  * Note: status does not include the TD_CTRL_NAK bit.
706  * <dir_out> is True for output TDs and False for input TDs.
707  */
708 static int uhci_map_status(int status, int dir_out)
709 {
710         if (!status)
711                 return 0;
712         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
713                 return -EPROTO;
714         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
715                 if (dir_out)
716                         return -EPROTO;
717                 else
718                         return -EILSEQ;
719         }
720         if (status & TD_CTRL_BABBLE)                    /* Babble */
721                 return -EOVERFLOW;
722         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
723                 return -ENOSR;
724         if (status & TD_CTRL_STALLED)                   /* Stalled */
725                 return -EPIPE;
726         WARN_ON(status & TD_CTRL_ACTIVE);               /* Active */
727         return 0;
728 }
729
730 /*
731  * Control transfers
732  */
733 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
734 {
735         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
736         struct uhci_td *td;
737         struct uhci_qh *qh, *skelqh;
738         unsigned long destination, status;
739         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
740         int len = urb->transfer_buffer_length;
741         dma_addr_t data = urb->transfer_dma;
742
743         /* The "pipe" thing contains the destination in bits 8--18 */
744         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
745
746         /* 3 errors */
747         status = TD_CTRL_ACTIVE | uhci_maxerr(3);
748         if (urb->dev->speed == USB_SPEED_LOW)
749                 status |= TD_CTRL_LS;
750
751         /*
752          * Build the TD for the control request setup packet
753          */
754         td = uhci_alloc_td(uhci, urb->dev);
755         if (!td)
756                 return -ENOMEM;
757
758         uhci_add_td_to_urb(urb, td);
759         uhci_fill_td(td, status, destination | uhci_explen(7),
760                 urb->setup_dma);
761
762         /*
763          * If direction is "send", change the packet ID from SETUP (0x2D)
764          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
765          * set Short Packet Detect (SPD) for all data packets.
766          */
767         if (usb_pipeout(urb->pipe))
768                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
769         else {
770                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
771                 status |= TD_CTRL_SPD;
772         }
773
774         /*
775          * Build the DATA TD's
776          */
777         while (len > 0) {
778                 int pktsze = len;
779
780                 if (pktsze > maxsze)
781                         pktsze = maxsze;
782
783                 td = uhci_alloc_td(uhci, urb->dev);
784                 if (!td)
785                         return -ENOMEM;
786
787                 /* Alternate Data0/1 (start with Data1) */
788                 destination ^= TD_TOKEN_TOGGLE;
789         
790                 uhci_add_td_to_urb(urb, td);
791                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
792                         data);
793
794                 data += pktsze;
795                 len -= pktsze;
796         }
797
798         /*
799          * Build the final TD for control status 
800          */
801         td = uhci_alloc_td(uhci, urb->dev);
802         if (!td)
803                 return -ENOMEM;
804
805         /*
806          * It's IN if the pipe is an output pipe or we're not expecting
807          * data back.
808          */
809         destination &= ~TD_TOKEN_PID_MASK;
810         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
811                 destination |= USB_PID_IN;
812         else
813                 destination |= USB_PID_OUT;
814
815         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
816
817         status &= ~TD_CTRL_SPD;
818
819         uhci_add_td_to_urb(urb, td);
820         uhci_fill_td(td, status | TD_CTRL_IOC,
821                 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
822
823         qh = uhci_alloc_qh(uhci, urb->dev);
824         if (!qh)
825                 return -ENOMEM;
826
827         urbp->qh = qh;
828         qh->urbp = urbp;
829
830         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
831
832         /* Low-speed transfers get a different queue, and won't hog the bus */
833         if (urb->dev->speed == USB_SPEED_LOW)
834                 skelqh = uhci->skel_ls_control_qh;
835         else {
836                 skelqh = uhci->skel_fs_control_qh;
837                 uhci_inc_fsbr(uhci, urb);
838         }
839
840         if (eurb)
841                 uhci_append_queued_urb(uhci, eurb, urb);
842         else
843                 uhci_insert_qh(uhci, skelqh, urb);
844
845         return -EINPROGRESS;
846 }
847
848 /*
849  * If control-IN transfer was short, the status packet wasn't sent.
850  * This routine changes the element pointer in the QH to point at the
851  * status TD.  It's safe to do this even while the QH is live, because
852  * the hardware only updates the element pointer following a successful
853  * transfer.  The inactive TD for the short packet won't cause an update,
854  * so the pointer won't get overwritten.  The next time the controller
855  * sees this QH, it will send the status packet.
856  */
857 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
858 {
859         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
860         struct uhci_td *td;
861
862         urbp->short_control_packet = 1;
863
864         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
865         urbp->qh->element = td->dma_handle;
866
867         return -EINPROGRESS;
868 }
869
870
871 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
872 {
873         struct list_head *tmp, *head;
874         struct urb_priv *urbp = urb->hcpriv;
875         struct uhci_td *td;
876         unsigned int status;
877         int ret = 0;
878
879         if (list_empty(&urbp->td_list))
880                 return -EINVAL;
881
882         head = &urbp->td_list;
883
884         if (urbp->short_control_packet) {
885                 tmp = head->prev;
886                 goto status_stage;
887         }
888
889         tmp = head->next;
890         td = list_entry(tmp, struct uhci_td, list);
891
892         /* The first TD is the SETUP stage, check the status, but skip */
893         /*  the count */
894         status = uhci_status_bits(td_status(td));
895         if (status & TD_CTRL_ACTIVE)
896                 return -EINPROGRESS;
897
898         if (status)
899                 goto td_error;
900
901         urb->actual_length = 0;
902
903         /* The rest of the TD's (but the last) are data */
904         tmp = tmp->next;
905         while (tmp != head && tmp->next != head) {
906                 td = list_entry(tmp, struct uhci_td, list);
907
908                 tmp = tmp->next;
909
910                 status = uhci_status_bits(td_status(td));
911                 if (status & TD_CTRL_ACTIVE)
912                         return -EINPROGRESS;
913
914                 urb->actual_length += uhci_actual_length(td_status(td));
915
916                 if (status)
917                         goto td_error;
918
919                 /* Check to see if we received a short packet */
920                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
921                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
922                                 ret = -EREMOTEIO;
923                                 goto err;
924                         }
925
926                         if (uhci_packetid(td_token(td)) == USB_PID_IN)
927                                 return usb_control_retrigger_status(uhci, urb);
928                         else
929                                 return 0;
930                 }
931         }
932
933 status_stage:
934         td = list_entry(tmp, struct uhci_td, list);
935
936         /* Control status stage */
937         status = td_status(td);
938
939 #ifdef I_HAVE_BUGGY_APC_BACKUPS
940         /* APC BackUPS Pro kludge */
941         /* It tries to send all of the descriptor instead of the amount */
942         /*  we requested */
943         if (status & TD_CTRL_IOC &&     /* IOC is masked out by uhci_status_bits */
944             status & TD_CTRL_ACTIVE &&
945             status & TD_CTRL_NAK)
946                 return 0;
947 #endif
948
949         status = uhci_status_bits(status);
950         if (status & TD_CTRL_ACTIVE)
951                 return -EINPROGRESS;
952
953         if (status)
954                 goto td_error;
955
956         return 0;
957
958 td_error:
959         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
960
961 err:
962         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
963                 /* Some debugging code */
964                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
965                                 __FUNCTION__, status);
966
967                 if (errbuf) {
968                         /* Print the chain for debugging purposes */
969                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
970
971                         lprintk(errbuf);
972                 }
973         }
974
975         return ret;
976 }
977
978 /*
979  * Common submit for bulk and interrupt
980  */
981 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
982 {
983         struct uhci_td *td;
984         struct uhci_qh *qh;
985         unsigned long destination, status;
986         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
987         int len = urb->transfer_buffer_length;
988         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
989         dma_addr_t data = urb->transfer_dma;
990
991         if (len < 0)
992                 return -EINVAL;
993
994         /* The "pipe" thing contains the destination in bits 8--18 */
995         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
996
997         status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
998         if (urb->dev->speed == USB_SPEED_LOW)
999                 status |= TD_CTRL_LS;
1000         if (usb_pipein(urb->pipe))
1001                 status |= TD_CTRL_SPD;
1002
1003         /*
1004          * Build the DATA TD's
1005          */
1006         do {    /* Allow zero length packets */
1007                 int pktsze = maxsze;
1008
1009                 if (pktsze >= len) {
1010                         pktsze = len;
1011                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1012                                 status &= ~TD_CTRL_SPD;
1013                 }
1014
1015                 td = uhci_alloc_td(uhci, urb->dev);
1016                 if (!td)
1017                         return -ENOMEM;
1018
1019                 uhci_add_td_to_urb(urb, td);
1020                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
1021                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1022                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1023                         data);
1024
1025                 data += pktsze;
1026                 len -= maxsze;
1027
1028                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1029                         usb_pipeout(urb->pipe));
1030         } while (len > 0);
1031
1032         /*
1033          * URB_ZERO_PACKET means adding a 0-length packet, if direction
1034          * is OUT and the transfer_length was an exact multiple of maxsze,
1035          * hence (len = transfer_length - N * maxsze) == 0
1036          * however, if transfer_length == 0, the zero packet was already
1037          * prepared above.
1038          */
1039         if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
1040             !len && urb->transfer_buffer_length) {
1041                 td = uhci_alloc_td(uhci, urb->dev);
1042                 if (!td)
1043                         return -ENOMEM;
1044
1045                 uhci_add_td_to_urb(urb, td);
1046                 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
1047                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1048                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1049                         data);
1050
1051                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1052                         usb_pipeout(urb->pipe));
1053         }
1054
1055         /* Set the interrupt-on-completion flag on the last packet.
1056          * A more-or-less typical 4 KB URB (= size of one memory page)
1057          * will require about 3 ms to transfer; that's a little on the
1058          * fast side but not enough to justify delaying an interrupt
1059          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1060          * flag setting. */
1061         td->status |= cpu_to_le32(TD_CTRL_IOC);
1062
1063         qh = uhci_alloc_qh(uhci, urb->dev);
1064         if (!qh)
1065                 return -ENOMEM;
1066
1067         urbp->qh = qh;
1068         qh->urbp = urbp;
1069
1070         /* Always breadth first */
1071         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1072
1073         if (eurb)
1074                 uhci_append_queued_urb(uhci, eurb, urb);
1075         else
1076                 uhci_insert_qh(uhci, skelqh, urb);
1077
1078         return -EINPROGRESS;
1079 }
1080
1081 /*
1082  * Common result for bulk and interrupt
1083  */
1084 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1085 {
1086         struct list_head *tmp, *head;
1087         struct urb_priv *urbp = urb->hcpriv;
1088         struct uhci_td *td;
1089         unsigned int status = 0;
1090         int ret = 0;
1091
1092         urb->actual_length = 0;
1093
1094         head = &urbp->td_list;
1095         tmp = head->next;
1096         while (tmp != head) {
1097                 td = list_entry(tmp, struct uhci_td, list);
1098
1099                 tmp = tmp->next;
1100
1101                 status = uhci_status_bits(td_status(td));
1102                 if (status & TD_CTRL_ACTIVE)
1103                         return -EINPROGRESS;
1104
1105                 urb->actual_length += uhci_actual_length(td_status(td));
1106
1107                 if (status)
1108                         goto td_error;
1109
1110                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1111                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1112                                 ret = -EREMOTEIO;
1113                                 goto err;
1114                         } else
1115                                 return 0;
1116                 }
1117         }
1118
1119         return 0;
1120
1121 td_error:
1122         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1123         if (ret == -EPIPE)
1124                 /* endpoint has stalled - mark it halted */
1125                 usb_endpoint_halt(urb->dev, uhci_endpoint(td_token(td)),
1126                                 uhci_packetout(td_token(td)));
1127
1128 err:
1129         /* 
1130          * Enable this chunk of code if you want to see some more debugging.
1131          * But be careful, it has the tendancy to starve out khubd and prevent
1132          * disconnects from happening successfully if you have a slow debug
1133          * log interface (like a serial console.
1134          */
1135 #if 0
1136         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1137                 /* Some debugging code */
1138                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1139                                 __FUNCTION__, status);
1140
1141                 if (errbuf) {
1142                         /* Print the chain for debugging purposes */
1143                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1144
1145                         lprintk(errbuf);
1146                 }
1147         }
1148 #endif
1149         return ret;
1150 }
1151
1152 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1153 {
1154         int ret;
1155
1156         /* Can't have low-speed bulk transfers */
1157         if (urb->dev->speed == USB_SPEED_LOW)
1158                 return -EINVAL;
1159
1160         ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1161         if (ret == -EINPROGRESS)
1162                 uhci_inc_fsbr(uhci, urb);
1163
1164         return ret;
1165 }
1166
1167 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1168 {
1169         /* USB 1.1 interrupt transfers only involve one packet per interval;
1170          * that's the uhci_submit_common() "breadth first" policy.  Drivers
1171          * can submit urbs of any length, but longer ones might need many
1172          * intervals to complete.
1173          */
1174         return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1175 }
1176
1177 /*
1178  * Isochronous transfers
1179  */
1180 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1181 {
1182         struct urb *last_urb = NULL;
1183         struct list_head *tmp, *head;
1184         int ret = 0;
1185
1186         head = &uhci->urb_list;
1187         tmp = head->next;
1188         while (tmp != head) {
1189                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1190                 struct urb *u = up->urb;
1191
1192                 tmp = tmp->next;
1193
1194                 /* look for pending URB's with identical pipe handle */
1195                 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1196                     (u->status == -EINPROGRESS) && (u != urb)) {
1197                         if (!last_urb)
1198                                 *start = u->start_frame;
1199                         last_urb = u;
1200                 }
1201         }
1202
1203         if (last_urb) {
1204                 *end = (last_urb->start_frame + last_urb->number_of_packets *
1205                                 last_urb->interval) & (UHCI_NUMFRAMES-1);
1206                 ret = 0;
1207         } else
1208                 ret = -1;       /* no previous urb found */
1209
1210         return ret;
1211 }
1212
1213 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1214 {
1215         int limits;
1216         unsigned int start = 0, end = 0;
1217
1218         if (urb->number_of_packets > 900)       /* 900? Why? */
1219                 return -EFBIG;
1220
1221         limits = isochronous_find_limits(uhci, urb, &start, &end);
1222
1223         if (urb->transfer_flags & URB_ISO_ASAP) {
1224                 if (limits) {
1225                         int curframe;
1226
1227                         curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
1228                         urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1229                 } else
1230                         urb->start_frame = end;
1231         } else {
1232                 urb->start_frame %= UHCI_NUMFRAMES;
1233                 /* FIXME: Sanity check */
1234         }
1235
1236         return 0;
1237 }
1238
1239 /*
1240  * Isochronous transfers
1241  */
1242 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1243 {
1244         struct uhci_td *td;
1245         int i, ret, frame;
1246         int status, destination;
1247
1248         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1249         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1250
1251         ret = isochronous_find_start(uhci, urb);
1252         if (ret)
1253                 return ret;
1254
1255         frame = urb->start_frame;
1256         for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1257                 if (!urb->iso_frame_desc[i].length)
1258                         continue;
1259
1260                 td = uhci_alloc_td(uhci, urb->dev);
1261                 if (!td)
1262                         return -ENOMEM;
1263
1264                 uhci_add_td_to_urb(urb, td);
1265                 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1266                         urb->transfer_dma + urb->iso_frame_desc[i].offset);
1267
1268                 if (i + 1 >= urb->number_of_packets)
1269                         td->status |= cpu_to_le32(TD_CTRL_IOC);
1270
1271                 uhci_insert_td_frame_list(uhci, td, frame);
1272         }
1273
1274         return -EINPROGRESS;
1275 }
1276
1277 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1278 {
1279         struct list_head *tmp, *head;
1280         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1281         int status;
1282         int i, ret = 0;
1283
1284         urb->actual_length = 0;
1285
1286         i = 0;
1287         head = &urbp->td_list;
1288         tmp = head->next;
1289         while (tmp != head) {
1290                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1291                 int actlength;
1292
1293                 tmp = tmp->next;
1294
1295                 if (td_status(td) & TD_CTRL_ACTIVE)
1296                         return -EINPROGRESS;
1297
1298                 actlength = uhci_actual_length(td_status(td));
1299                 urb->iso_frame_desc[i].actual_length = actlength;
1300                 urb->actual_length += actlength;
1301
1302                 status = uhci_map_status(uhci_status_bits(td_status(td)),
1303                                 usb_pipeout(urb->pipe));
1304                 urb->iso_frame_desc[i].status = status;
1305                 if (status) {
1306                         urb->error_count++;
1307                         ret = status;
1308                 }
1309
1310                 i++;
1311         }
1312
1313         return ret;
1314 }
1315
1316 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1317 {
1318         struct list_head *tmp, *head;
1319
1320         /* We don't match Isoc transfers since they are special */
1321         if (usb_pipeisoc(urb->pipe))
1322                 return NULL;
1323
1324         head = &uhci->urb_list;
1325         tmp = head->next;
1326         while (tmp != head) {
1327                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1328                 struct urb *u = up->urb;
1329
1330                 tmp = tmp->next;
1331
1332                 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1333                         /* For control, ignore the direction */
1334                         if (usb_pipecontrol(urb->pipe) &&
1335                             (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1336                                 return u;
1337                         else if (u->pipe == urb->pipe)
1338                                 return u;
1339                 }
1340         }
1341
1342         return NULL;
1343 }
1344
1345 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1346 {
1347         int ret = -EINVAL;
1348         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1349         unsigned long flags;
1350         struct urb *eurb;
1351         int bustime;
1352
1353         spin_lock_irqsave(&uhci->schedule_lock, flags);
1354
1355         if (urb->status != -EINPROGRESS)        /* URB already unlinked! */
1356                 goto out;
1357
1358         eurb = uhci_find_urb_ep(uhci, urb);
1359
1360         if (!uhci_alloc_urb_priv(uhci, urb)) {
1361                 ret = -ENOMEM;
1362                 goto out;
1363         }
1364
1365         switch (usb_pipetype(urb->pipe)) {
1366         case PIPE_CONTROL:
1367                 ret = uhci_submit_control(uhci, urb, eurb);
1368                 break;
1369         case PIPE_INTERRUPT:
1370                 if (!eurb) {
1371                         bustime = usb_check_bandwidth(urb->dev, urb);
1372                         if (bustime < 0)
1373                                 ret = bustime;
1374                         else {
1375                                 ret = uhci_submit_interrupt(uhci, urb, eurb);
1376                                 if (ret == -EINPROGRESS)
1377                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1378                         }
1379                 } else {        /* inherit from parent */
1380                         urb->bandwidth = eurb->bandwidth;
1381                         ret = uhci_submit_interrupt(uhci, urb, eurb);
1382                 }
1383                 break;
1384         case PIPE_BULK:
1385                 ret = uhci_submit_bulk(uhci, urb, eurb);
1386                 break;
1387         case PIPE_ISOCHRONOUS:
1388                 bustime = usb_check_bandwidth(urb->dev, urb);
1389                 if (bustime < 0) {
1390                         ret = bustime;
1391                         break;
1392                 }
1393
1394                 ret = uhci_submit_isochronous(uhci, urb);
1395                 if (ret == -EINPROGRESS)
1396                         usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1397                 break;
1398         }
1399
1400         if (ret != -EINPROGRESS) {
1401                 /* Submit failed, so delete it from the urb_list */
1402                 struct urb_priv *urbp = urb->hcpriv;
1403
1404                 list_del_init(&urbp->urb_list);
1405                 uhci_destroy_urb_priv(uhci, urb);
1406         } else
1407                 ret = 0;
1408
1409 out:
1410         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1411         return ret;
1412 }
1413
1414 /*
1415  * Return the result of a transfer
1416  */
1417 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1418 {
1419         int ret = -EINPROGRESS;
1420         struct urb_priv *urbp;
1421
1422         spin_lock(&urb->lock);
1423
1424         urbp = (struct urb_priv *)urb->hcpriv;
1425
1426         if (urb->status != -EINPROGRESS)        /* URB already dequeued */
1427                 goto out;
1428
1429         switch (usb_pipetype(urb->pipe)) {
1430         case PIPE_CONTROL:
1431                 ret = uhci_result_control(uhci, urb);
1432                 break;
1433         case PIPE_BULK:
1434         case PIPE_INTERRUPT:
1435                 ret = uhci_result_common(uhci, urb);
1436                 break;
1437         case PIPE_ISOCHRONOUS:
1438                 ret = uhci_result_isochronous(uhci, urb);
1439                 break;
1440         }
1441
1442         if (ret == -EINPROGRESS)
1443                 goto out;
1444         urb->status = ret;
1445
1446         switch (usb_pipetype(urb->pipe)) {
1447         case PIPE_CONTROL:
1448         case PIPE_BULK:
1449         case PIPE_ISOCHRONOUS:
1450                 /* Release bandwidth for Interrupt or Isoc. transfers */
1451                 if (urb->bandwidth)
1452                         usb_release_bandwidth(urb->dev, urb, 1);
1453                 uhci_unlink_generic(uhci, urb);
1454                 break;
1455         case PIPE_INTERRUPT:
1456                 /* Release bandwidth for Interrupt or Isoc. transfers */
1457                 /* Make sure we don't release if we have a queued URB */
1458                 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1459                         usb_release_bandwidth(urb->dev, urb, 0);
1460                 else
1461                         /* bandwidth was passed on to queued URB, */
1462                         /* so don't let usb_unlink_urb() release it */
1463                         urb->bandwidth = 0;
1464                 uhci_unlink_generic(uhci, urb);
1465                 break;
1466         default:
1467                 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1468                                 "for urb %p\n",
1469                                 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1470         }
1471
1472         /* Move it from uhci->urb_list to uhci->complete_list */
1473         uhci_moveto_complete(uhci, urbp);
1474
1475 out:
1476         spin_unlock(&urb->lock);
1477 }
1478
1479 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1480 {
1481         struct list_head *head, *tmp;
1482         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1483         int prevactive = 1;
1484
1485         uhci_dec_fsbr(uhci, urb);       /* Safe since it checks */
1486
1487         /*
1488          * Now we need to find out what the last successful toggle was
1489          * so we can update the local data toggle for the next transfer
1490          *
1491          * There's 3 way's the last successful completed TD is found:
1492          *
1493          * 1) The TD is NOT active and the actual length < expected length
1494          * 2) The TD is NOT active and it's the last TD in the chain
1495          * 3) The TD is active and the previous TD is NOT active
1496          *
1497          * Control and Isochronous ignore the toggle, so this is safe
1498          * for all types
1499          */
1500         head = &urbp->td_list;
1501         tmp = head->next;
1502         while (tmp != head) {
1503                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1504
1505                 tmp = tmp->next;
1506
1507                 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1508                     (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
1509                     tmp == head))
1510                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1511                                 uhci_packetout(td_token(td)),
1512                                 uhci_toggle(td_token(td)) ^ 1);
1513                 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1514                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1515                                 uhci_packetout(td_token(td)),
1516                                 uhci_toggle(td_token(td)));
1517
1518                 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1519         }
1520
1521         uhci_delete_queued_urb(uhci, urb);
1522
1523         /* The interrupt loop will reclaim the QH's */
1524         uhci_remove_qh(uhci, urbp->qh);
1525         urbp->qh = NULL;
1526 }
1527
1528 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1529 {
1530         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1531         unsigned long flags;
1532         struct urb_priv *urbp;
1533         unsigned int age;
1534
1535         spin_lock_irqsave(&uhci->schedule_lock, flags);
1536         urbp = urb->hcpriv;
1537         if (!urbp)                      /* URB was never linked! */
1538                 goto done;
1539         list_del_init(&urbp->urb_list);
1540
1541         uhci_unlink_generic(uhci, urb);
1542
1543         age = uhci_get_current_frame_number(uhci);
1544         if (age != uhci->urb_remove_age) {
1545                 uhci_remove_pending_urbps(uhci);
1546                 uhci->urb_remove_age = age;
1547         }
1548
1549         /* If we're the first, set the next interrupt bit */
1550         if (list_empty(&uhci->urb_remove_list))
1551                 uhci_set_next_interrupt(uhci);
1552         list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1553
1554 done:
1555         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1556         return 0;
1557 }
1558
1559 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1560 {
1561         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1562         struct list_head *head, *tmp;
1563         int count = 0;
1564
1565         uhci_dec_fsbr(uhci, urb);
1566
1567         urbp->fsbr_timeout = 1;
1568
1569         /*
1570          * Ideally we would want to fix qh->element as well, but it's
1571          * read/write by the HC, so that can introduce a race. It's not
1572          * really worth the hassle
1573          */
1574
1575         head = &urbp->td_list;
1576         tmp = head->next;
1577         while (tmp != head) {
1578                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1579
1580                 tmp = tmp->next;
1581
1582                 /*
1583                  * Make sure we don't do the last one (since it'll have the
1584                  * TERM bit set) as well as we skip every so many TD's to
1585                  * make sure it doesn't hog the bandwidth
1586                  */
1587                 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1588                         td->link |= UHCI_PTR_DEPTH;
1589
1590                 count++;
1591         }
1592
1593         return 0;
1594 }
1595
1596 /*
1597  * uhci_get_current_frame_number()
1598  *
1599  * returns the current frame number for a USB bus/controller.
1600  */
1601 static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1602 {
1603         return inw(uhci->io_addr + USBFRNUM);
1604 }
1605
1606 static int init_stall_timer(struct usb_hcd *hcd);
1607
1608 static void stall_callback(unsigned long ptr)
1609 {
1610         struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1611         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1612         struct list_head list, *tmp, *head;
1613         unsigned long flags;
1614
1615         INIT_LIST_HEAD(&list);
1616
1617         spin_lock_irqsave(&uhci->schedule_lock, flags);
1618         if (!list_empty(&uhci->urb_remove_list) &&
1619             uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1620                 uhci_remove_pending_urbps(uhci);
1621                 uhci_finish_completion(hcd, NULL);
1622         }
1623
1624         head = &uhci->urb_list;
1625         tmp = head->next;
1626         while (tmp != head) {
1627                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1628                 struct urb *u = up->urb;
1629
1630                 tmp = tmp->next;
1631
1632                 spin_lock(&u->lock);
1633
1634                 /* Check if the FSBR timed out */
1635                 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1636                         uhci_fsbr_timeout(uhci, u);
1637
1638                 /* Check if the URB timed out */
1639                 if (u->timeout && u->status == -EINPROGRESS &&
1640                         time_after_eq(jiffies, up->inserttime + u->timeout)) {
1641                         u->status = -ETIMEDOUT;
1642                         list_move_tail(&up->urb_list, &list);
1643                 }
1644
1645                 spin_unlock(&u->lock);
1646         }
1647         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1648
1649         head = &list;
1650         tmp = head->next;
1651         while (tmp != head) {
1652                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1653                 struct urb *u = up->urb;
1654
1655                 tmp = tmp->next;
1656
1657                 uhci_urb_dequeue(hcd, u);
1658         }
1659
1660         /* Really disable FSBR */
1661         if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1662                 uhci->fsbrtimeout = 0;
1663                 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1664         }
1665
1666         /* Poll for and perform state transitions */
1667         hc_state_transitions(uhci);
1668
1669         init_stall_timer(hcd);
1670 }
1671
1672 static int init_stall_timer(struct usb_hcd *hcd)
1673 {
1674         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1675
1676         init_timer(&uhci->stall_timer);
1677         uhci->stall_timer.function = stall_callback;
1678         uhci->stall_timer.data = (unsigned long)hcd;
1679         uhci->stall_timer.expires = jiffies + (HZ / 10);
1680         add_timer(&uhci->stall_timer);
1681
1682         return 0;
1683 }
1684
1685 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1686 {
1687         struct list_head *tmp, *head;
1688
1689         head = &uhci->qh_remove_list;
1690         tmp = head->next;
1691         while (tmp != head) {
1692                 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
1693
1694                 tmp = tmp->next;
1695
1696                 list_del_init(&qh->remove_list);
1697
1698                 uhci_free_qh(uhci, qh);
1699         }
1700 }
1701
1702 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1703 {
1704         struct list_head *tmp, *head;
1705
1706         head = &uhci->td_remove_list;
1707         tmp = head->next;
1708         while (tmp != head) {
1709                 struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list);
1710
1711                 tmp = tmp->next;
1712
1713                 list_del_init(&td->remove_list);
1714
1715                 uhci_free_td(uhci, td);
1716         }
1717 }
1718
1719 static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1720 {
1721         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1722
1723         uhci_destroy_urb_priv(uhci, urb);
1724
1725         spin_unlock(&uhci->schedule_lock);
1726         usb_hcd_giveback_urb(hcd, urb, regs);
1727         spin_lock(&uhci->schedule_lock);
1728 }
1729
1730 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1731 {
1732         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1733         struct list_head *tmp, *head;
1734
1735         head = &uhci->complete_list;
1736         tmp = head->next;
1737         while (tmp != head) {
1738                 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1739                 struct urb *urb = urbp->urb;
1740
1741                 list_del_init(&urbp->urb_list);
1742                 uhci_finish_urb(hcd, urb, regs);
1743
1744                 head = &uhci->complete_list;
1745                 tmp = head->next;
1746         }
1747 }
1748
1749 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1750 {
1751
1752         /* Splice the urb_remove_list onto the end of the complete_list */
1753         list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1754 }
1755
1756 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1757 {
1758         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1759         unsigned int io_addr = uhci->io_addr;
1760         unsigned short status;
1761         struct list_head *tmp, *head;
1762         unsigned int age;
1763
1764         /*
1765          * Read the interrupt status, and write it back to clear the
1766          * interrupt cause.  Contrary to the UHCI specification, the
1767          * "HC Halted" status bit is persistent: it is RO, not R/WC.
1768          */
1769         status = inw(io_addr + USBSTS);
1770         if (!(status & ~USBSTS_HCH))    /* shared interrupt, not mine */
1771                 return IRQ_NONE;
1772         outw(status, io_addr + USBSTS);         /* Clear it */
1773
1774         if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1775                 if (status & USBSTS_HSE)
1776                         dev_err(uhci_dev(uhci), "host system error, "
1777                                         "PCI problems?\n");
1778                 if (status & USBSTS_HCPE)
1779                         dev_err(uhci_dev(uhci), "host controller process "
1780                                         "error, something bad happened!\n");
1781                 if ((status & USBSTS_HCH) && uhci->state > 0) {
1782                         dev_err(uhci_dev(uhci), "host controller halted, "
1783                                         "very bad!\n");
1784                         /* FIXME: Reset the controller, fix the offending TD */
1785                 }
1786         }
1787
1788         if (status & USBSTS_RD)
1789                 uhci->resume_detect = 1;
1790
1791         spin_lock(&uhci->schedule_lock);
1792
1793         age = uhci_get_current_frame_number(uhci);
1794         if (age != uhci->qh_remove_age)
1795                 uhci_free_pending_qhs(uhci);
1796         if (age != uhci->td_remove_age)
1797                 uhci_free_pending_tds(uhci);
1798         if (age != uhci->urb_remove_age)
1799                 uhci_remove_pending_urbps(uhci);
1800
1801         if (list_empty(&uhci->urb_remove_list) &&
1802             list_empty(&uhci->td_remove_list) &&
1803             list_empty(&uhci->qh_remove_list))
1804                 uhci_clear_next_interrupt(uhci);
1805         else
1806                 uhci_set_next_interrupt(uhci);
1807
1808         /* Walk the list of pending URB's to see which ones completed */
1809         head = &uhci->urb_list;
1810         tmp = head->next;
1811         while (tmp != head) {
1812                 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1813                 struct urb *urb = urbp->urb;
1814
1815                 tmp = tmp->next;
1816
1817                 /* Checks the status and does all of the magic necessary */
1818                 uhci_transfer_result(uhci, urb);
1819         }
1820         uhci_finish_completion(hcd, regs);
1821
1822         spin_unlock(&uhci->schedule_lock);
1823
1824         /* Wake up anyone waiting for an URB to complete */
1825         wake_up_all(&uhci->waitqh);
1826
1827         return IRQ_HANDLED;
1828 }
1829
1830 static void reset_hc(struct uhci_hcd *uhci)
1831 {
1832         unsigned int io_addr = uhci->io_addr;
1833
1834         /* Global reset for 50ms */
1835         uhci->state = UHCI_RESET;
1836         outw(USBCMD_GRESET, io_addr + USBCMD);
1837         set_current_state(TASK_UNINTERRUPTIBLE);
1838         schedule_timeout((HZ*50+999) / 1000);
1839         outw(0, io_addr + USBCMD);
1840
1841         /* Another 10ms delay */
1842         set_current_state(TASK_UNINTERRUPTIBLE);
1843         schedule_timeout((HZ*10+999) / 1000);
1844         uhci->resume_detect = 0;
1845 }
1846
1847 static void suspend_hc(struct uhci_hcd *uhci)
1848 {
1849         unsigned int io_addr = uhci->io_addr;
1850
1851         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1852         uhci->state = UHCI_SUSPENDED;
1853         uhci->resume_detect = 0;
1854         outw(USBCMD_EGSM, io_addr + USBCMD);
1855 }
1856
1857 static void wakeup_hc(struct uhci_hcd *uhci)
1858 {
1859         unsigned int io_addr = uhci->io_addr;
1860
1861         switch (uhci->state) {
1862                 case UHCI_SUSPENDED:            /* Start the resume */
1863                         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1864
1865                         /* Global resume for >= 20ms */
1866                         outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1867                         uhci->state = UHCI_RESUMING_1;
1868                         uhci->state_end = jiffies + (20*HZ+999) / 1000;
1869                         break;
1870
1871                 case UHCI_RESUMING_1:           /* End global resume */
1872                         uhci->state = UHCI_RESUMING_2;
1873                         outw(0, io_addr + USBCMD);
1874                         /* Falls through */
1875
1876                 case UHCI_RESUMING_2:           /* Wait for EOP to be sent */
1877                         if (inw(io_addr + USBCMD) & USBCMD_FGR)
1878                                 break;
1879
1880                         /* Run for at least 1 second, and
1881                          * mark it configured with a 64-byte max packet */
1882                         uhci->state = UHCI_RUNNING_GRACE;
1883                         uhci->state_end = jiffies + HZ;
1884                         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1885                                         io_addr + USBCMD);
1886                         break;
1887
1888                 case UHCI_RUNNING_GRACE:        /* Now allowed to suspend */
1889                         uhci->state = UHCI_RUNNING;
1890                         break;
1891
1892                 default:
1893                         break;
1894         }
1895 }
1896
1897 static int ports_active(struct uhci_hcd *uhci)
1898 {
1899         unsigned int io_addr = uhci->io_addr;
1900         int connection = 0;
1901         int i;
1902
1903         for (i = 0; i < uhci->rh_numports; i++)
1904                 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1905
1906         return connection;
1907 }
1908
1909 static int suspend_allowed(struct uhci_hcd *uhci)
1910 {
1911         unsigned int io_addr = uhci->io_addr;
1912         int i;
1913
1914         if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1915                 return 1;
1916
1917         /* Some of Intel's USB controllers have a bug that causes false
1918          * resume indications if any port has an over current condition.
1919          * To prevent problems, we will not allow a global suspend if
1920          * any ports are OC.
1921          *
1922          * Some motherboards using Intel's chipsets (but not using all
1923          * the USB ports) appear to hardwire the over current inputs active
1924          * to disable the USB ports.
1925          */
1926
1927         /* check for over current condition on any port */
1928         for (i = 0; i < uhci->rh_numports; i++) {
1929                 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1930                         return 0;
1931         }
1932
1933         return 1;
1934 }
1935
1936 static void hc_state_transitions(struct uhci_hcd *uhci)
1937 {
1938         switch (uhci->state) {
1939                 case UHCI_RUNNING:
1940
1941                         /* global suspend if nothing connected for 1 second */
1942                         if (!ports_active(uhci) && suspend_allowed(uhci)) {
1943                                 uhci->state = UHCI_SUSPENDING_GRACE;
1944                                 uhci->state_end = jiffies + HZ;
1945                         }
1946                         break;
1947
1948                 case UHCI_SUSPENDING_GRACE:
1949                         if (ports_active(uhci))
1950                                 uhci->state = UHCI_RUNNING;
1951                         else if (time_after_eq(jiffies, uhci->state_end))
1952                                 suspend_hc(uhci);
1953                         break;
1954
1955                 case UHCI_SUSPENDED:
1956
1957                         /* wakeup if requested by a device */
1958                         if (uhci->resume_detect)
1959                                 wakeup_hc(uhci);
1960                         break;
1961
1962                 case UHCI_RESUMING_1:
1963                 case UHCI_RESUMING_2:
1964                 case UHCI_RUNNING_GRACE:
1965                         if (time_after_eq(jiffies, uhci->state_end))
1966                                 wakeup_hc(uhci);
1967                         break;
1968
1969                 default:
1970                         break;
1971         }
1972 }
1973
1974 static void start_hc(struct uhci_hcd *uhci)
1975 {
1976         unsigned int io_addr = uhci->io_addr;
1977         int timeout = 1000;
1978
1979         /*
1980          * Reset the HC - this will force us to get a
1981          * new notification of any already connected
1982          * ports due to the virtual disconnect that it
1983          * implies.
1984          */
1985         outw(USBCMD_HCRESET, io_addr + USBCMD);
1986         while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1987                 if (!--timeout) {
1988                         dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1989                         break;
1990                 }
1991         }
1992
1993         /* Turn on all interrupts */
1994         outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1995                 io_addr + USBINTR);
1996
1997         /* Start at frame 0 */
1998         outw(0, io_addr + USBFRNUM);
1999         outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2000
2001         /* Run and mark it configured with a 64-byte max packet */
2002         uhci->state = UHCI_RUNNING_GRACE;
2003         uhci->state_end = jiffies + HZ;
2004         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2005
2006         uhci->hcd.state = USB_STATE_RUNNING;
2007 }
2008
2009 /*
2010  * De-allocate all resources..
2011  */
2012 static void release_uhci(struct uhci_hcd *uhci)
2013 {
2014         int i;
2015
2016         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2017                 if (uhci->skelqh[i]) {
2018                         uhci_free_qh(uhci, uhci->skelqh[i]);
2019                         uhci->skelqh[i] = NULL;
2020                 }
2021
2022         if (uhci->term_td) {
2023                 uhci_free_td(uhci, uhci->term_td);
2024                 uhci->term_td = NULL;
2025         }
2026
2027         if (uhci->qh_pool) {
2028                 dma_pool_destroy(uhci->qh_pool);
2029                 uhci->qh_pool = NULL;
2030         }
2031
2032         if (uhci->td_pool) {
2033                 dma_pool_destroy(uhci->td_pool);
2034                 uhci->td_pool = NULL;
2035         }
2036
2037         if (uhci->fl) {
2038                 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2039                                 uhci->fl, uhci->fl->dma_handle);
2040                 uhci->fl = NULL;
2041         }
2042
2043 #ifdef CONFIG_PROC_FS
2044         if (uhci->proc_entry) {
2045                 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
2046                 uhci->proc_entry = NULL;
2047         }
2048 #endif
2049 }
2050
2051 static int uhci_reset(struct usb_hcd *hcd)
2052 {
2053         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2054
2055         uhci->io_addr = (unsigned long) hcd->regs;
2056
2057         /* Turn off all interrupts */
2058         outw(0, uhci->io_addr + USBINTR);
2059
2060         /* Maybe kick BIOS off this hardware.  Then reset, so we won't get
2061          * interrupts from any previous setup.
2062          */
2063         reset_hc(uhci);
2064         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2065                         USBLEGSUP_DEFAULT);
2066         return 0;
2067 }
2068
2069 /*
2070  * Allocate a frame list, and then setup the skeleton
2071  *
2072  * The hardware doesn't really know any difference
2073  * in the queues, but the order does matter for the
2074  * protocols higher up. The order is:
2075  *
2076  *  - any isochronous events handled before any
2077  *    of the queues. We don't do that here, because
2078  *    we'll create the actual TD entries on demand.
2079  *  - The first queue is the interrupt queue.
2080  *  - The second queue is the control queue, split into low- and full-speed
2081  *  - The third queue is bulk queue.
2082  *  - The fourth queue is the bandwidth reclamation queue, which loops back
2083  *    to the full-speed control queue.
2084  */
2085 static int uhci_start(struct usb_hcd *hcd)
2086 {
2087         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2088         int retval = -EBUSY;
2089         int i, port;
2090         unsigned io_size;
2091         dma_addr_t dma_handle;
2092         struct usb_device *udev;
2093 #ifdef CONFIG_PROC_FS
2094         struct proc_dir_entry *ent;
2095 #endif
2096
2097         io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
2098
2099 #ifdef CONFIG_PROC_FS
2100         ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2101         if (!ent) {
2102                 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
2103                 retval = -ENOMEM;
2104                 goto err_create_proc_entry;
2105         }
2106
2107         ent->data = uhci;
2108         ent->proc_fops = &uhci_proc_operations;
2109         ent->size = 0;
2110         uhci->proc_entry = ent;
2111 #endif
2112
2113         uhci->fsbr = 0;
2114         uhci->fsbrtimeout = 0;
2115
2116         spin_lock_init(&uhci->schedule_lock);
2117         INIT_LIST_HEAD(&uhci->qh_remove_list);
2118
2119         INIT_LIST_HEAD(&uhci->td_remove_list);
2120
2121         INIT_LIST_HEAD(&uhci->urb_remove_list);
2122
2123         INIT_LIST_HEAD(&uhci->urb_list);
2124
2125         INIT_LIST_HEAD(&uhci->complete_list);
2126
2127         init_waitqueue_head(&uhci->waitqh);
2128
2129         uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2130                         &dma_handle, 0);
2131         if (!uhci->fl) {
2132                 dev_err(uhci_dev(uhci), "unable to allocate "
2133                                 "consistent memory for frame list\n");
2134                 goto err_alloc_fl;
2135         }
2136
2137         memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2138
2139         uhci->fl->dma_handle = dma_handle;
2140
2141         uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2142                         sizeof(struct uhci_td), 16, 0);
2143         if (!uhci->td_pool) {
2144                 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2145                 goto err_create_td_pool;
2146         }
2147
2148         uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2149                         sizeof(struct uhci_qh), 16, 0);
2150         if (!uhci->qh_pool) {
2151                 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2152                 goto err_create_qh_pool;
2153         }
2154
2155         /* Initialize the root hub */
2156
2157         /* UHCI specs says devices must have 2 ports, but goes on to say */
2158         /*  they may have more but give no way to determine how many they */
2159         /*  have. However, according to the UHCI spec, Bit 7 is always set */
2160         /*  to 1. So we try to use this to our advantage */
2161         for (port = 0; port < (io_size - 0x10) / 2; port++) {
2162                 unsigned int portstatus;
2163
2164                 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2165                 if (!(portstatus & 0x0080))
2166                         break;
2167         }
2168         if (debug)
2169                 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2170
2171         /* This is experimental so anything less than 2 or greater than 8 is */
2172         /*  something weird and we'll ignore it */
2173         if (port < 2 || port > UHCI_RH_MAXCHILD) {
2174                 dev_info(uhci_dev(uhci), "port count misdetected? "
2175                                 "forcing to 2 ports\n");
2176                 port = 2;
2177         }
2178
2179         uhci->rh_numports = port;
2180
2181         hcd->self.root_hub = udev = usb_alloc_dev(NULL, &hcd->self, 0);
2182         if (!udev) {
2183                 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2184                 goto err_alloc_root_hub;
2185         }
2186
2187         uhci->term_td = uhci_alloc_td(uhci, udev);
2188         if (!uhci->term_td) {
2189                 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2190                 goto err_alloc_term_td;
2191         }
2192
2193         for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2194                 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2195                 if (!uhci->skelqh[i]) {
2196                         dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2197                         goto err_alloc_skelqh;
2198                 }
2199         }
2200
2201         /*
2202          * 8 Interrupt queues; link all higher int queues to int1,
2203          * then link int1 to control and control to bulk
2204          */
2205         uhci->skel_int128_qh->link =
2206                         uhci->skel_int64_qh->link =
2207                         uhci->skel_int32_qh->link =
2208                         uhci->skel_int16_qh->link =
2209                         uhci->skel_int8_qh->link =
2210                         uhci->skel_int4_qh->link =
2211                         uhci->skel_int2_qh->link =
2212                         cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2213         uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2214
2215         uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2216         uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2217         uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2218
2219         /* This dummy TD is to work around a bug in Intel PIIX controllers */
2220         uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2221                 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2222         uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2223
2224         uhci->skel_term_qh->link = UHCI_PTR_TERM;
2225         uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2226
2227         /*
2228          * Fill the frame list: make all entries point to the proper
2229          * interrupt queue.
2230          *
2231          * The interrupt queues will be interleaved as evenly as possible.
2232          * There's not much to be done about period-1 interrupts; they have
2233          * to occur in every frame.  But we can schedule period-2 interrupts
2234          * in odd-numbered frames, period-4 interrupts in frames congruent
2235          * to 2 (mod 4), and so on.  This way each frame only has two
2236          * interrupt QHs, which will help spread out bandwidth utilization.
2237          */
2238         for (i = 0; i < UHCI_NUMFRAMES; i++) {
2239                 int irq;
2240
2241                 /*
2242                  * ffs (Find First bit Set) does exactly what we need:
2243                  * 1,3,5,...  => ffs = 0 => use skel_int2_qh = skelqh[6],
2244                  * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2245                  * ffs > 6 => not on any high-period queue, so use
2246                  *      skel_int1_qh = skelqh[7].
2247                  * Add UHCI_NUMFRAMES to insure at least one bit is set.
2248                  */
2249                 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2250                 if (irq < 0)
2251                         irq = 7;
2252
2253                 /* Only place we don't use the frame list routines */
2254                 uhci->fl->frame[i] = cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2255         }
2256
2257         start_hc(uhci);
2258
2259         init_stall_timer(hcd);
2260
2261         udev->speed = USB_SPEED_FULL;
2262
2263         if (usb_register_root_hub(udev, uhci_dev(uhci)) != 0) {
2264                 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2265                 retval = -ENOMEM;
2266                 goto err_start_root_hub;
2267         }
2268
2269         return 0;
2270
2271 /*
2272  * error exits:
2273  */
2274 err_start_root_hub:
2275         reset_hc(uhci);
2276
2277         del_timer_sync(&uhci->stall_timer);
2278
2279 err_alloc_skelqh:
2280         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2281                 if (uhci->skelqh[i]) {
2282                         uhci_free_qh(uhci, uhci->skelqh[i]);
2283                         uhci->skelqh[i] = NULL;
2284                 }
2285
2286         uhci_free_td(uhci, uhci->term_td);
2287         uhci->term_td = NULL;
2288
2289 err_alloc_term_td:
2290         usb_put_dev(udev);
2291         hcd->self.root_hub = NULL;
2292
2293 err_alloc_root_hub:
2294         dma_pool_destroy(uhci->qh_pool);
2295         uhci->qh_pool = NULL;
2296
2297 err_create_qh_pool:
2298         dma_pool_destroy(uhci->td_pool);
2299         uhci->td_pool = NULL;
2300
2301 err_create_td_pool:
2302         dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2303                         uhci->fl, uhci->fl->dma_handle);
2304         uhci->fl = NULL;
2305
2306 err_alloc_fl:
2307 #ifdef CONFIG_PROC_FS
2308         remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2309         uhci->proc_entry = NULL;
2310
2311 err_create_proc_entry:
2312 #endif
2313
2314         return retval;
2315 }
2316
2317 static void uhci_stop(struct usb_hcd *hcd)
2318 {
2319         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2320
2321         del_timer_sync(&uhci->stall_timer);
2322
2323         /*
2324          * At this point, we're guaranteed that no new connects can be made
2325          * to this bus since there are no more parents
2326          */
2327
2328         reset_hc(uhci);
2329
2330         spin_lock_irq(&uhci->schedule_lock);
2331         uhci_free_pending_qhs(uhci);
2332         uhci_free_pending_tds(uhci);
2333         uhci_remove_pending_urbps(uhci);
2334         uhci_finish_completion(hcd, NULL);
2335
2336         uhci_free_pending_qhs(uhci);
2337         uhci_free_pending_tds(uhci);
2338         spin_unlock_irq(&uhci->schedule_lock);
2339
2340         /* Wake up anyone waiting for an URB to complete */
2341         wake_up_all(&uhci->waitqh);
2342         
2343         release_uhci(uhci);
2344 }
2345
2346 #ifdef CONFIG_PM
2347 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2348 {
2349         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2350
2351         /* Don't try to suspend broken motherboards, reset instead */
2352         if (suspend_allowed(uhci)) {
2353                 suspend_hc(uhci);
2354                 uhci->saved_framenumber =
2355                                 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2356         } else
2357                 reset_hc(uhci);
2358         return 0;
2359 }
2360
2361 static int uhci_resume(struct usb_hcd *hcd)
2362 {
2363         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2364
2365         pci_set_master(to_pci_dev(uhci_dev(uhci)));
2366
2367         if (uhci->state == UHCI_SUSPENDED) {
2368
2369                 /*
2370                  * Some systems don't maintain the UHCI register values
2371                  * during a PM suspend/resume cycle, so reinitialize
2372                  * the Frame Number, the Framelist Base Address, and the
2373                  * Interrupt Enable registers.
2374                  */
2375                 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2376                 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2377                 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2378                                 USBINTR_SP, uhci->io_addr + USBINTR);
2379                 uhci->resume_detect = 1;
2380         } else {
2381                 reset_hc(uhci);
2382                 start_hc(uhci);
2383         }
2384         uhci->hcd.state = USB_STATE_RUNNING;
2385         return 0;
2386 }
2387 #endif
2388
2389 static struct usb_hcd *uhci_hcd_alloc(void)
2390 {
2391         struct uhci_hcd *uhci;
2392
2393         uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2394         if (!uhci)
2395                 return NULL;
2396
2397         memset(uhci, 0, sizeof(*uhci));
2398         uhci->hcd.product_desc = "UHCI Host Controller";
2399         return &uhci->hcd;
2400 }
2401
2402 static void uhci_hcd_free(struct usb_hcd *hcd)
2403 {
2404         kfree(hcd_to_uhci(hcd));
2405 }
2406
2407 /* Are there any URBs for a particular device/endpoint on a given list? */
2408 static int urbs_for_ep_list(struct list_head *head,
2409                 struct hcd_dev *hdev, int ep)
2410 {
2411         struct urb_priv *urbp;
2412
2413         list_for_each_entry(urbp, head, urb_list) {
2414                 struct urb *urb = urbp->urb;
2415
2416                 if (hdev == urb->dev->hcpriv && ep ==
2417                                 (usb_pipeendpoint(urb->pipe) |
2418                                  usb_pipein(urb->pipe)))
2419                         return 1;
2420         }
2421         return 0;
2422 }
2423
2424 /* Are there any URBs for a particular device/endpoint? */
2425 static int urbs_for_ep(struct uhci_hcd *uhci, struct hcd_dev *hdev, int ep)
2426 {
2427         int rc;
2428
2429         spin_lock_irq(&uhci->schedule_lock);
2430         rc = (urbs_for_ep_list(&uhci->urb_list, hdev, ep) ||
2431                         urbs_for_ep_list(&uhci->complete_list, hdev, ep) ||
2432                         urbs_for_ep_list(&uhci->urb_remove_list, hdev, ep));
2433         spin_unlock_irq(&uhci->schedule_lock);
2434         return rc;
2435 }
2436
2437 /* Wait until all the URBs for a particular device/endpoint are gone */
2438 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2439                 struct hcd_dev *hdev, int endpoint)
2440 {
2441         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2442
2443         wait_event_interruptible(uhci->waitqh,
2444                         !urbs_for_ep(uhci, hdev, endpoint));
2445 }
2446
2447 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2448 {
2449         return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2450 }
2451
2452 static const char hcd_name[] = "uhci_hcd";
2453
2454 static const struct hc_driver uhci_driver = {
2455         .description =          hcd_name,
2456
2457         /* Generic hardware linkage */
2458         .irq =                  uhci_irq,
2459         .flags =                HCD_USB11,
2460
2461         /* Basic lifecycle operations */
2462         .reset =                uhci_reset,
2463         .start =                uhci_start,
2464 #ifdef CONFIG_PM
2465         .suspend =              uhci_suspend,
2466         .resume =               uhci_resume,
2467 #endif
2468         .stop =                 uhci_stop,
2469
2470         .hcd_alloc =            uhci_hcd_alloc,
2471         .hcd_free =             uhci_hcd_free,
2472
2473         .urb_enqueue =          uhci_urb_enqueue,
2474         .urb_dequeue =          uhci_urb_dequeue,
2475
2476         .endpoint_disable =     uhci_hcd_endpoint_disable,
2477         .get_frame_number =     uhci_hcd_get_frame_number,
2478
2479         .hub_status_data =      uhci_hub_status_data,
2480         .hub_control =          uhci_hub_control,
2481 };
2482
2483 static const struct pci_device_id uhci_pci_ids[] = { {
2484         /* handle any USB UHCI controller */
2485         PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2486         .driver_data =  (unsigned long) &uhci_driver,
2487         }, { /* end: all zeroes */ }
2488 };
2489
2490 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2491
2492 static struct pci_driver uhci_pci_driver = {
2493         .name =         (char *)hcd_name,
2494         .id_table =     uhci_pci_ids,
2495
2496         .probe =        usb_hcd_pci_probe,
2497         .remove =       usb_hcd_pci_remove,
2498
2499 #ifdef  CONFIG_PM
2500         .suspend =      usb_hcd_pci_suspend,
2501         .resume =       usb_hcd_pci_resume,
2502 #endif  /* PM */
2503 };
2504  
2505 static int __init uhci_hcd_init(void)
2506 {
2507         int retval = -ENOMEM;
2508
2509         printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2510
2511         if (usb_disabled())
2512                 return -ENODEV;
2513
2514         if (debug) {
2515                 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2516                 if (!errbuf)
2517                         goto errbuf_failed;
2518         }
2519
2520 #ifdef CONFIG_PROC_FS
2521         uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, 0);
2522         if (!uhci_proc_root)
2523                 goto proc_failed;
2524 #endif
2525
2526         uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2527                 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2528         if (!uhci_up_cachep)
2529                 goto up_failed;
2530
2531         retval = pci_module_init(&uhci_pci_driver);
2532         if (retval)
2533                 goto init_failed;
2534
2535         return 0;
2536
2537 init_failed:
2538         if (kmem_cache_destroy(uhci_up_cachep))
2539                 warn("not all urb_priv's were freed!");
2540
2541 up_failed:
2542
2543 #ifdef CONFIG_PROC_FS
2544         remove_proc_entry("driver/uhci", 0);
2545
2546 proc_failed:
2547 #endif
2548         if (errbuf)
2549                 kfree(errbuf);
2550
2551 errbuf_failed:
2552
2553         return retval;
2554 }
2555
2556 static void __exit uhci_hcd_cleanup(void) 
2557 {
2558         pci_unregister_driver(&uhci_pci_driver);
2559         
2560         if (kmem_cache_destroy(uhci_up_cachep))
2561                 warn("not all urb_priv's were freed!");
2562
2563 #ifdef CONFIG_PROC_FS
2564         remove_proc_entry("driver/uhci", 0);
2565 #endif
2566
2567         if (errbuf)
2568                 kfree(errbuf);
2569 }
2570
2571 module_init(uhci_hcd_init);
2572 module_exit(uhci_hcd_cleanup);
2573
2574 MODULE_AUTHOR(DRIVER_AUTHOR);
2575 MODULE_DESCRIPTION(DRIVER_DESC);
2576 MODULE_LICENSE("GPL");