vserver 1.9.3
[linux-2.6.git] / drivers / usb / host / uhci-hcd.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17  *
18  * Intel documents this fairly well, and as far as I know there
19  * are no royalties or anything like that, but even so there are
20  * people who decided that they want to do the same thing in a
21  * completely different way.
22  *
23  * WARNING! The USB documentation is downright evil. Most of it
24  * is just crap, written by a committee. You're better off ignoring
25  * most of it, the important stuff is:
26  *  - the low-level protocol (fairly simple but lots of small details)
27  *  - working around the horridness of the rest
28  */
29
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
32 #define DEBUG
33 #else
34 #undef DEBUG
35 #endif
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
50 #include <linux/pm.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54
55 #include <asm/bitops.h>
56 #include <asm/uaccess.h>
57 #include <asm/io.h>
58 #include <asm/irq.h>
59 #include <asm/system.h>
60
61 #include "../core/hcd.h"
62 #include "uhci-hcd.h"
63
64 /*
65  * Version Information
66  */
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
70 Alan Stern"
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
72
73 /*
74  * debug = 0, no debugging messages
75  * debug = 1, dump failed URB's except for stalls
76  * debug = 2, dump all failed URB's (including stalls)
77  *            show all queues in /proc/driver/uhci/[pci_addr]
78  * debug = 3, show all TD's in URB's when dumping
79  */
80 #ifdef DEBUG
81 static int debug = 1;
82 #else
83 static int debug = 0;
84 #endif
85 module_param(debug, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(debug, "Debug level");
87 static char *errbuf;
88 #define ERRBUF_LEN    (32 * 1024)
89
90 #include "uhci-hub.c"
91 #include "uhci-debug.c"
92
93 static kmem_cache_t *uhci_up_cachep;    /* urb_priv */
94
95 static int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
102
103 static void hc_state_transitions(struct uhci_hcd *uhci);
104
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT    msecs_to_jiffies(50)
107 #define FSBR_DELAY      msecs_to_jiffies(50)
108
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
113
114 /*
115  * Technically, updating td->status here is a race, but it's not really a
116  * problem. The worst that can happen is that we set the IOC bit again
117  * generating a spurious interrupt. We could fix this by creating another
118  * QH and leaving the IOC bit always set, but then we would have to play
119  * games with the FSBR code to make sure we get the correct order in all
120  * the cases. I don't think it's worth the effort
121  */
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
123 {
124         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
125 }
126
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
128 {
129         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
130 }
131
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci, 
133                                         struct urb_priv *urbp)
134 {
135         list_move_tail(&urbp->urb_list, &uhci->complete_list);
136 }
137
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
139 {
140         dma_addr_t dma_handle;
141         struct uhci_td *td;
142
143         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
144         if (!td)
145                 return NULL;
146
147         td->dma_handle = dma_handle;
148
149         td->link = UHCI_PTR_TERM;
150         td->buffer = 0;
151
152         td->frame = -1;
153         td->dev = dev;
154
155         INIT_LIST_HEAD(&td->list);
156         INIT_LIST_HEAD(&td->remove_list);
157         INIT_LIST_HEAD(&td->fl_list);
158
159         usb_get_dev(dev);
160
161         return td;
162 }
163
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165                 u32 token, u32 buffer)
166 {
167         td->status = cpu_to_le32(status);
168         td->token = cpu_to_le32(token);
169         td->buffer = cpu_to_le32(buffer);
170 }
171
172 /*
173  * We insert Isochronous URB's directly into the frame list at the beginning
174  */
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
176 {
177         framenum %= UHCI_NUMFRAMES;
178
179         td->frame = framenum;
180
181         /* Is there a TD already mapped there? */
182         if (uhci->fl->frame_cpu[framenum]) {
183                 struct uhci_td *ftd, *ltd;
184
185                 ftd = uhci->fl->frame_cpu[framenum];
186                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
187
188                 list_add_tail(&td->fl_list, &ftd->fl_list);
189
190                 td->link = ltd->link;
191                 wmb();
192                 ltd->link = cpu_to_le32(td->dma_handle);
193         } else {
194                 td->link = uhci->fl->frame[framenum];
195                 wmb();
196                 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197                 uhci->fl->frame_cpu[framenum] = td;
198         }
199 }
200
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
202 {
203         /* If it's not inserted, don't remove it */
204         if (td->frame == -1 && list_empty(&td->fl_list))
205                 return;
206
207         if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208                 if (list_empty(&td->fl_list)) {
209                         uhci->fl->frame[td->frame] = td->link;
210                         uhci->fl->frame_cpu[td->frame] = NULL;
211                 } else {
212                         struct uhci_td *ntd;
213
214                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215                         uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216                         uhci->fl->frame_cpu[td->frame] = ntd;
217                 }
218         } else {
219                 struct uhci_td *ptd;
220
221                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222                 ptd->link = td->link;
223         }
224
225         wmb();
226         td->link = UHCI_PTR_TERM;
227
228         list_del_init(&td->fl_list);
229         td->frame = -1;
230 }
231
232 /*
233  * Inserts a td into qh list at the top.
234  */
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
236 {
237         struct list_head *tmp, *head;
238         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
239         struct uhci_td *td, *ptd;
240
241         if (list_empty(&urbp->td_list))
242                 return;
243
244         head = &urbp->td_list;
245         tmp = head->next;
246
247         /* Ordering isn't important here yet since the QH hasn't been */
248         /*  inserted into the schedule yet */
249         td = list_entry(tmp, struct uhci_td, list);
250
251         /* Add the first TD to the QH element pointer */
252         qh->element = cpu_to_le32(td->dma_handle) | breadth;
253
254         ptd = td;
255
256         /* Then link the rest of the TD's */
257         tmp = tmp->next;
258         while (tmp != head) {
259                 td = list_entry(tmp, struct uhci_td, list);
260
261                 tmp = tmp->next;
262
263                 ptd->link = cpu_to_le32(td->dma_handle) | breadth;
264
265                 ptd = td;
266         }
267
268         ptd->link = UHCI_PTR_TERM;
269 }
270
271 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
272 {
273         if (!list_empty(&td->list))
274                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
275         if (!list_empty(&td->remove_list))
276                 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
277         if (!list_empty(&td->fl_list))
278                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
279
280         if (td->dev)
281                 usb_put_dev(td->dev);
282
283         dma_pool_free(uhci->td_pool, td, td->dma_handle);
284 }
285
286 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
287 {
288         dma_addr_t dma_handle;
289         struct uhci_qh *qh;
290
291         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
292         if (!qh)
293                 return NULL;
294
295         qh->dma_handle = dma_handle;
296
297         qh->element = UHCI_PTR_TERM;
298         qh->link = UHCI_PTR_TERM;
299
300         qh->dev = dev;
301         qh->urbp = NULL;
302
303         INIT_LIST_HEAD(&qh->list);
304         INIT_LIST_HEAD(&qh->remove_list);
305
306         usb_get_dev(dev);
307
308         return qh;
309 }
310
311 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
312 {
313         if (!list_empty(&qh->list))
314                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
315         if (!list_empty(&qh->remove_list))
316                 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
317
318         if (qh->dev)
319                 usb_put_dev(qh->dev);
320
321         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
322 }
323
324 /*
325  * Append this urb's qh after the last qh in skelqh->list
326  *
327  * Note that urb_priv.queue_list doesn't have a separate queue head;
328  * it's a ring with every element "live".
329  */
330 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
331 {
332         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
333         struct list_head *tmp;
334         struct uhci_qh *lqh;
335
336         /* Grab the last QH */
337         lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
338
339         /* Point to the next skelqh */
340         urbp->qh->link = lqh->link;
341         wmb();                          /* Ordering is important */
342
343         /*
344          * Patch QHs for previous endpoint's queued URBs?  HC goes
345          * here next, not to the next skelqh it now points to.
346          *
347          *    lqh --> td ... --> qh ... --> td --> qh ... --> td
348          *     |                 |                 |
349          *     v                 v                 v
350          *     +<----------------+-----------------+
351          *     v
352          *    newqh --> td ... --> td
353          *     |
354          *     v
355          *    ...
356          *
357          * The HC could see (and use!) any of these as we write them.
358          */
359         lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
360         if (lqh->urbp) {
361                 list_for_each (tmp, &lqh->urbp->queue_list) {
362                         struct urb_priv *turbp =
363                                 list_entry(tmp, struct urb_priv, queue_list);
364
365                         turbp->qh->link = lqh->link;
366                 }
367         }
368
369         list_add_tail(&urbp->qh->list, &skelqh->list);
370 }
371
372 /*
373  * Start removal of QH from schedule; it finishes next frame.
374  * TDs should be unlinked before this is called.
375  */
376 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
377 {
378         struct uhci_qh *pqh;
379         __le32 newlink;
380         unsigned int age;
381
382         if (!qh)
383                 return;
384
385         /*
386          * Only go through the hoops if it's actually linked in
387          */
388         if (!list_empty(&qh->list)) {
389
390                 /* If our queue is nonempty, make the next URB the head */
391                 if (!list_empty(&qh->urbp->queue_list)) {
392                         struct urb_priv *nurbp;
393
394                         nurbp = list_entry(qh->urbp->queue_list.next,
395                                         struct urb_priv, queue_list);
396                         nurbp->queued = 0;
397                         list_add(&nurbp->qh->list, &qh->list);
398                         newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
399                 } else
400                         newlink = qh->link;
401
402                 /* Fix up the previous QH's queue to link to either
403                  * the new head of this queue or the start of the
404                  * next endpoint's queue. */
405                 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
406                 pqh->link = newlink;
407                 if (pqh->urbp) {
408                         struct list_head *head, *tmp;
409
410                         head = &pqh->urbp->queue_list;
411                         tmp = head->next;
412                         while (head != tmp) {
413                                 struct urb_priv *turbp =
414                                         list_entry(tmp, struct urb_priv, queue_list);
415
416                                 tmp = tmp->next;
417
418                                 turbp->qh->link = newlink;
419                         }
420                 }
421                 wmb();
422
423                 /* Leave qh->link in case the HC is on the QH now, it will */
424                 /* continue the rest of the schedule */
425                 qh->element = UHCI_PTR_TERM;
426
427                 list_del_init(&qh->list);
428         }
429
430         list_del_init(&qh->urbp->queue_list);
431         qh->urbp = NULL;
432
433         age = uhci_get_current_frame_number(uhci);
434         if (age != uhci->qh_remove_age) {
435                 uhci_free_pending_qhs(uhci);
436                 uhci->qh_remove_age = age;
437         }
438
439         /* Check to see if the remove list is empty. Set the IOC bit */
440         /* to force an interrupt so we can remove the QH */
441         if (list_empty(&uhci->qh_remove_list))
442                 uhci_set_next_interrupt(uhci);
443
444         list_add(&qh->remove_list, &uhci->qh_remove_list);
445 }
446
447 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
448 {
449         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
450         struct list_head *head, *tmp;
451
452         head = &urbp->td_list;
453         tmp = head->next;
454         while (head != tmp) {
455                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
456
457                 tmp = tmp->next;
458
459                 if (toggle)
460                         td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
461                 else
462                         td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
463
464
465                 toggle ^= 1;
466         }
467
468         return toggle;
469 }
470
471 /* This function will append one URB's QH to another URB's QH. This is for */
472 /* queuing interrupt, control or bulk transfers */
473 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
474 {
475         struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
476         struct list_head *tmp;
477         struct uhci_td *lltd;
478
479         eurbp = eurb->hcpriv;
480         urbp = urb->hcpriv;
481
482         /* Find the first URB in the queue */
483         if (eurbp->queued) {
484                 struct list_head *head = &eurbp->queue_list;
485
486                 tmp = head->next;
487                 while (tmp != head) {
488                         struct urb_priv *turbp =
489                                 list_entry(tmp, struct urb_priv, queue_list);
490
491                         if (!turbp->queued)
492                                 break;
493
494                         tmp = tmp->next;
495                 }
496         } else
497                 tmp = &eurbp->queue_list;
498
499         furbp = list_entry(tmp, struct urb_priv, queue_list);
500         lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
501
502         lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
503
504         /* Control transfers always start with toggle 0 */
505         if (!usb_pipecontrol(urb->pipe))
506                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
507                                 usb_pipeout(urb->pipe),
508                                 uhci_fixup_toggle(urb,
509                                         uhci_toggle(td_token(lltd)) ^ 1));
510
511         /* All qh's in the queue need to link to the next queue */
512         urbp->qh->link = eurbp->qh->link;
513
514         wmb();                  /* Make sure we flush everything */
515
516         lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
517
518         list_add_tail(&urbp->queue_list, &furbp->queue_list);
519
520         urbp->queued = 1;
521 }
522
523 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
524 {
525         struct urb_priv *urbp, *nurbp;
526         struct list_head *head, *tmp;
527         struct urb_priv *purbp;
528         struct uhci_td *pltd;
529         unsigned int toggle;
530
531         urbp = urb->hcpriv;
532
533         if (list_empty(&urbp->queue_list))
534                 return;
535
536         nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
537
538         /*
539          * Fix up the toggle for the following URBs in the queue.
540          * Only needed for bulk and interrupt: control and isochronous
541          * endpoints don't propagate toggles between messages.
542          */
543         if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
544                 if (!urbp->queued)
545                         /* We just set the toggle in uhci_unlink_generic */
546                         toggle = usb_gettoggle(urb->dev,
547                                         usb_pipeendpoint(urb->pipe),
548                                         usb_pipeout(urb->pipe));
549                 else {
550                         /* If we're in the middle of the queue, grab the */
551                         /* toggle from the TD previous to us */
552                         purbp = list_entry(urbp->queue_list.prev,
553                                         struct urb_priv, queue_list);
554                         pltd = list_entry(purbp->td_list.prev,
555                                         struct uhci_td, list);
556                         toggle = uhci_toggle(td_token(pltd)) ^ 1;
557                 }
558
559                 head = &urbp->queue_list;
560                 tmp = head->next;
561                 while (head != tmp) {
562                         struct urb_priv *turbp;
563
564                         turbp = list_entry(tmp, struct urb_priv, queue_list);
565                         tmp = tmp->next;
566
567                         if (!turbp->queued)
568                                 break;
569                         toggle = uhci_fixup_toggle(turbp->urb, toggle);
570                 }
571
572                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
573                                 usb_pipeout(urb->pipe), toggle);
574         }
575
576         if (urbp->queued) {
577                 /* We're somewhere in the middle (or end).  The case where
578                  * we're at the head is handled in uhci_remove_qh(). */
579                 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
580                                 queue_list);
581
582                 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
583                 if (nurbp->queued)
584                         pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
585                 else
586                         /* The next URB happens to be the beginning, so */
587                         /*  we're the last, end the chain */
588                         pltd->link = UHCI_PTR_TERM;
589         }
590
591         /* urbp->queue_list is handled in uhci_remove_qh() */
592 }
593
594 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
595 {
596         struct urb_priv *urbp;
597
598         urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
599         if (!urbp)
600                 return NULL;
601
602         memset((void *)urbp, 0, sizeof(*urbp));
603
604         urbp->inserttime = jiffies;
605         urbp->fsbrtime = jiffies;
606         urbp->urb = urb;
607         
608         INIT_LIST_HEAD(&urbp->td_list);
609         INIT_LIST_HEAD(&urbp->queue_list);
610         INIT_LIST_HEAD(&urbp->urb_list);
611
612         list_add_tail(&urbp->urb_list, &uhci->urb_list);
613
614         urb->hcpriv = urbp;
615
616         return urbp;
617 }
618
619 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
620 {
621         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
622
623         td->urb = urb;
624
625         list_add_tail(&td->list, &urbp->td_list);
626 }
627
628 static void uhci_remove_td_from_urb(struct uhci_td *td)
629 {
630         if (list_empty(&td->list))
631                 return;
632
633         list_del_init(&td->list);
634
635         td->urb = NULL;
636 }
637
638 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
639 {
640         struct list_head *head, *tmp;
641         struct urb_priv *urbp;
642         unsigned int age;
643
644         urbp = (struct urb_priv *)urb->hcpriv;
645         if (!urbp)
646                 return;
647
648         if (!list_empty(&urbp->urb_list))
649                 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
650                                 "or uhci->remove_list!\n", urb);
651
652         age = uhci_get_current_frame_number(uhci);
653         if (age != uhci->td_remove_age) {
654                 uhci_free_pending_tds(uhci);
655                 uhci->td_remove_age = age;
656         }
657
658         /* Check to see if the remove list is empty. Set the IOC bit */
659         /* to force an interrupt so we can remove the TD's*/
660         if (list_empty(&uhci->td_remove_list))
661                 uhci_set_next_interrupt(uhci);
662
663         head = &urbp->td_list;
664         tmp = head->next;
665         while (tmp != head) {
666                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
667
668                 tmp = tmp->next;
669
670                 uhci_remove_td_from_urb(td);
671                 uhci_remove_td(uhci, td);
672                 list_add(&td->remove_list, &uhci->td_remove_list);
673         }
674
675         urb->hcpriv = NULL;
676         kmem_cache_free(uhci_up_cachep, urbp);
677 }
678
679 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
680 {
681         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
682
683         if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
684                 urbp->fsbr = 1;
685                 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
686                         uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
687         }
688 }
689
690 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
691 {
692         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
693
694         if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
695                 urbp->fsbr = 0;
696                 if (!--uhci->fsbr)
697                         uhci->fsbrtimeout = jiffies + FSBR_DELAY;
698         }
699 }
700
701 /*
702  * Map status to standard result codes
703  *
704  * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
705  * Note: status does not include the TD_CTRL_NAK bit.
706  * <dir_out> is True for output TDs and False for input TDs.
707  */
708 static int uhci_map_status(int status, int dir_out)
709 {
710         if (!status)
711                 return 0;
712         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
713                 return -EPROTO;
714         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
715                 if (dir_out)
716                         return -EPROTO;
717                 else
718                         return -EILSEQ;
719         }
720         if (status & TD_CTRL_BABBLE)                    /* Babble */
721                 return -EOVERFLOW;
722         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
723                 return -ENOSR;
724         if (status & TD_CTRL_STALLED)                   /* Stalled */
725                 return -EPIPE;
726         WARN_ON(status & TD_CTRL_ACTIVE);               /* Active */
727         return 0;
728 }
729
730 /*
731  * Control transfers
732  */
733 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
734 {
735         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
736         struct uhci_td *td;
737         struct uhci_qh *qh, *skelqh;
738         unsigned long destination, status;
739         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
740         int len = urb->transfer_buffer_length;
741         dma_addr_t data = urb->transfer_dma;
742
743         /* The "pipe" thing contains the destination in bits 8--18 */
744         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
745
746         /* 3 errors */
747         status = TD_CTRL_ACTIVE | uhci_maxerr(3);
748         if (urb->dev->speed == USB_SPEED_LOW)
749                 status |= TD_CTRL_LS;
750
751         /*
752          * Build the TD for the control request setup packet
753          */
754         td = uhci_alloc_td(uhci, urb->dev);
755         if (!td)
756                 return -ENOMEM;
757
758         uhci_add_td_to_urb(urb, td);
759         uhci_fill_td(td, status, destination | uhci_explen(7),
760                 urb->setup_dma);
761
762         /*
763          * If direction is "send", change the packet ID from SETUP (0x2D)
764          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
765          * set Short Packet Detect (SPD) for all data packets.
766          */
767         if (usb_pipeout(urb->pipe))
768                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
769         else {
770                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
771                 status |= TD_CTRL_SPD;
772         }
773
774         /*
775          * Build the DATA TD's
776          */
777         while (len > 0) {
778                 int pktsze = len;
779
780                 if (pktsze > maxsze)
781                         pktsze = maxsze;
782
783                 td = uhci_alloc_td(uhci, urb->dev);
784                 if (!td)
785                         return -ENOMEM;
786
787                 /* Alternate Data0/1 (start with Data1) */
788                 destination ^= TD_TOKEN_TOGGLE;
789         
790                 uhci_add_td_to_urb(urb, td);
791                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
792                         data);
793
794                 data += pktsze;
795                 len -= pktsze;
796         }
797
798         /*
799          * Build the final TD for control status 
800          */
801         td = uhci_alloc_td(uhci, urb->dev);
802         if (!td)
803                 return -ENOMEM;
804
805         /*
806          * It's IN if the pipe is an output pipe or we're not expecting
807          * data back.
808          */
809         destination &= ~TD_TOKEN_PID_MASK;
810         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
811                 destination |= USB_PID_IN;
812         else
813                 destination |= USB_PID_OUT;
814
815         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
816
817         status &= ~TD_CTRL_SPD;
818
819         uhci_add_td_to_urb(urb, td);
820         uhci_fill_td(td, status | TD_CTRL_IOC,
821                 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
822
823         qh = uhci_alloc_qh(uhci, urb->dev);
824         if (!qh)
825                 return -ENOMEM;
826
827         urbp->qh = qh;
828         qh->urbp = urbp;
829
830         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
831
832         /* Low-speed transfers get a different queue, and won't hog the bus */
833         if (urb->dev->speed == USB_SPEED_LOW)
834                 skelqh = uhci->skel_ls_control_qh;
835         else {
836                 skelqh = uhci->skel_fs_control_qh;
837                 uhci_inc_fsbr(uhci, urb);
838         }
839
840         if (eurb)
841                 uhci_append_queued_urb(uhci, eurb, urb);
842         else
843                 uhci_insert_qh(uhci, skelqh, urb);
844
845         return -EINPROGRESS;
846 }
847
848 /*
849  * If control-IN transfer was short, the status packet wasn't sent.
850  * This routine changes the element pointer in the QH to point at the
851  * status TD.  It's safe to do this even while the QH is live, because
852  * the hardware only updates the element pointer following a successful
853  * transfer.  The inactive TD for the short packet won't cause an update,
854  * so the pointer won't get overwritten.  The next time the controller
855  * sees this QH, it will send the status packet.
856  */
857 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
858 {
859         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
860         struct uhci_td *td;
861
862         urbp->short_control_packet = 1;
863
864         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
865         urbp->qh->element = cpu_to_le32(td->dma_handle);
866
867         return -EINPROGRESS;
868 }
869
870
871 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
872 {
873         struct list_head *tmp, *head;
874         struct urb_priv *urbp = urb->hcpriv;
875         struct uhci_td *td;
876         unsigned int status;
877         int ret = 0;
878
879         if (list_empty(&urbp->td_list))
880                 return -EINVAL;
881
882         head = &urbp->td_list;
883
884         if (urbp->short_control_packet) {
885                 tmp = head->prev;
886                 goto status_stage;
887         }
888
889         tmp = head->next;
890         td = list_entry(tmp, struct uhci_td, list);
891
892         /* The first TD is the SETUP stage, check the status, but skip */
893         /*  the count */
894         status = uhci_status_bits(td_status(td));
895         if (status & TD_CTRL_ACTIVE)
896                 return -EINPROGRESS;
897
898         if (status)
899                 goto td_error;
900
901         urb->actual_length = 0;
902
903         /* The rest of the TD's (but the last) are data */
904         tmp = tmp->next;
905         while (tmp != head && tmp->next != head) {
906                 td = list_entry(tmp, struct uhci_td, list);
907
908                 tmp = tmp->next;
909
910                 status = uhci_status_bits(td_status(td));
911                 if (status & TD_CTRL_ACTIVE)
912                         return -EINPROGRESS;
913
914                 urb->actual_length += uhci_actual_length(td_status(td));
915
916                 if (status)
917                         goto td_error;
918
919                 /* Check to see if we received a short packet */
920                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
921                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
922                                 ret = -EREMOTEIO;
923                                 goto err;
924                         }
925
926                         if (uhci_packetid(td_token(td)) == USB_PID_IN)
927                                 return usb_control_retrigger_status(uhci, urb);
928                         else
929                                 return 0;
930                 }
931         }
932
933 status_stage:
934         td = list_entry(tmp, struct uhci_td, list);
935
936         /* Control status stage */
937         status = td_status(td);
938
939 #ifdef I_HAVE_BUGGY_APC_BACKUPS
940         /* APC BackUPS Pro kludge */
941         /* It tries to send all of the descriptor instead of the amount */
942         /*  we requested */
943         if (status & TD_CTRL_IOC &&     /* IOC is masked out by uhci_status_bits */
944             status & TD_CTRL_ACTIVE &&
945             status & TD_CTRL_NAK)
946                 return 0;
947 #endif
948
949         status = uhci_status_bits(status);
950         if (status & TD_CTRL_ACTIVE)
951                 return -EINPROGRESS;
952
953         if (status)
954                 goto td_error;
955
956         return 0;
957
958 td_error:
959         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
960
961 err:
962         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
963                 /* Some debugging code */
964                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
965                                 __FUNCTION__, status);
966
967                 if (errbuf) {
968                         /* Print the chain for debugging purposes */
969                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
970
971                         lprintk(errbuf);
972                 }
973         }
974
975         return ret;
976 }
977
978 /*
979  * Common submit for bulk and interrupt
980  */
981 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
982 {
983         struct uhci_td *td;
984         struct uhci_qh *qh;
985         unsigned long destination, status;
986         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
987         int len = urb->transfer_buffer_length;
988         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
989         dma_addr_t data = urb->transfer_dma;
990
991         if (len < 0)
992                 return -EINVAL;
993
994         /* The "pipe" thing contains the destination in bits 8--18 */
995         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
996
997         status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
998         if (urb->dev->speed == USB_SPEED_LOW)
999                 status |= TD_CTRL_LS;
1000         if (usb_pipein(urb->pipe))
1001                 status |= TD_CTRL_SPD;
1002
1003         /*
1004          * Build the DATA TD's
1005          */
1006         do {    /* Allow zero length packets */
1007                 int pktsze = maxsze;
1008
1009                 if (pktsze >= len) {
1010                         pktsze = len;
1011                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1012                                 status &= ~TD_CTRL_SPD;
1013                 }
1014
1015                 td = uhci_alloc_td(uhci, urb->dev);
1016                 if (!td)
1017                         return -ENOMEM;
1018
1019                 uhci_add_td_to_urb(urb, td);
1020                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
1021                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1022                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1023                         data);
1024
1025                 data += pktsze;
1026                 len -= maxsze;
1027
1028                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1029                         usb_pipeout(urb->pipe));
1030         } while (len > 0);
1031
1032         /*
1033          * URB_ZERO_PACKET means adding a 0-length packet, if direction
1034          * is OUT and the transfer_length was an exact multiple of maxsze,
1035          * hence (len = transfer_length - N * maxsze) == 0
1036          * however, if transfer_length == 0, the zero packet was already
1037          * prepared above.
1038          */
1039         if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
1040             !len && urb->transfer_buffer_length) {
1041                 td = uhci_alloc_td(uhci, urb->dev);
1042                 if (!td)
1043                         return -ENOMEM;
1044
1045                 uhci_add_td_to_urb(urb, td);
1046                 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
1047                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1048                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
1049                         data);
1050
1051                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1052                         usb_pipeout(urb->pipe));
1053         }
1054
1055         /* Set the interrupt-on-completion flag on the last packet.
1056          * A more-or-less typical 4 KB URB (= size of one memory page)
1057          * will require about 3 ms to transfer; that's a little on the
1058          * fast side but not enough to justify delaying an interrupt
1059          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1060          * flag setting. */
1061         td->status |= cpu_to_le32(TD_CTRL_IOC);
1062
1063         qh = uhci_alloc_qh(uhci, urb->dev);
1064         if (!qh)
1065                 return -ENOMEM;
1066
1067         urbp->qh = qh;
1068         qh->urbp = urbp;
1069
1070         /* Always breadth first */
1071         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1072
1073         if (eurb)
1074                 uhci_append_queued_urb(uhci, eurb, urb);
1075         else
1076                 uhci_insert_qh(uhci, skelqh, urb);
1077
1078         return -EINPROGRESS;
1079 }
1080
1081 /*
1082  * Common result for bulk and interrupt
1083  */
1084 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1085 {
1086         struct list_head *tmp, *head;
1087         struct urb_priv *urbp = urb->hcpriv;
1088         struct uhci_td *td;
1089         unsigned int status = 0;
1090         int ret = 0;
1091
1092         urb->actual_length = 0;
1093
1094         head = &urbp->td_list;
1095         tmp = head->next;
1096         while (tmp != head) {
1097                 td = list_entry(tmp, struct uhci_td, list);
1098
1099                 tmp = tmp->next;
1100
1101                 status = uhci_status_bits(td_status(td));
1102                 if (status & TD_CTRL_ACTIVE)
1103                         return -EINPROGRESS;
1104
1105                 urb->actual_length += uhci_actual_length(td_status(td));
1106
1107                 if (status)
1108                         goto td_error;
1109
1110                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1111                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1112                                 ret = -EREMOTEIO;
1113                                 goto err;
1114                         } else
1115                                 return 0;
1116                 }
1117         }
1118
1119         return 0;
1120
1121 td_error:
1122         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1123
1124 err:
1125         /* 
1126          * Enable this chunk of code if you want to see some more debugging.
1127          * But be careful, it has the tendancy to starve out khubd and prevent
1128          * disconnects from happening successfully if you have a slow debug
1129          * log interface (like a serial console.
1130          */
1131 #if 0
1132         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1133                 /* Some debugging code */
1134                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1135                                 __FUNCTION__, status);
1136
1137                 if (errbuf) {
1138                         /* Print the chain for debugging purposes */
1139                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1140
1141                         lprintk(errbuf);
1142                 }
1143         }
1144 #endif
1145         return ret;
1146 }
1147
1148 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1149 {
1150         int ret;
1151
1152         /* Can't have low-speed bulk transfers */
1153         if (urb->dev->speed == USB_SPEED_LOW)
1154                 return -EINVAL;
1155
1156         ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1157         if (ret == -EINPROGRESS)
1158                 uhci_inc_fsbr(uhci, urb);
1159
1160         return ret;
1161 }
1162
1163 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1164 {
1165         /* USB 1.1 interrupt transfers only involve one packet per interval;
1166          * that's the uhci_submit_common() "breadth first" policy.  Drivers
1167          * can submit urbs of any length, but longer ones might need many
1168          * intervals to complete.
1169          */
1170         return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1171 }
1172
1173 /*
1174  * Isochronous transfers
1175  */
1176 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1177 {
1178         struct urb *last_urb = NULL;
1179         struct list_head *tmp, *head;
1180         int ret = 0;
1181
1182         head = &uhci->urb_list;
1183         tmp = head->next;
1184         while (tmp != head) {
1185                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1186                 struct urb *u = up->urb;
1187
1188                 tmp = tmp->next;
1189
1190                 /* look for pending URB's with identical pipe handle */
1191                 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1192                     (u->status == -EINPROGRESS) && (u != urb)) {
1193                         if (!last_urb)
1194                                 *start = u->start_frame;
1195                         last_urb = u;
1196                 }
1197         }
1198
1199         if (last_urb) {
1200                 *end = (last_urb->start_frame + last_urb->number_of_packets *
1201                                 last_urb->interval) & (UHCI_NUMFRAMES-1);
1202                 ret = 0;
1203         } else
1204                 ret = -1;       /* no previous urb found */
1205
1206         return ret;
1207 }
1208
1209 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1210 {
1211         int limits;
1212         unsigned int start = 0, end = 0;
1213
1214         if (urb->number_of_packets > 900)       /* 900? Why? */
1215                 return -EFBIG;
1216
1217         limits = isochronous_find_limits(uhci, urb, &start, &end);
1218
1219         if (urb->transfer_flags & URB_ISO_ASAP) {
1220                 if (limits) {
1221                         int curframe;
1222
1223                         curframe = uhci_get_current_frame_number(uhci) % UHCI_NUMFRAMES;
1224                         urb->start_frame = (curframe + 10) % UHCI_NUMFRAMES;
1225                 } else
1226                         urb->start_frame = end;
1227         } else {
1228                 urb->start_frame %= UHCI_NUMFRAMES;
1229                 /* FIXME: Sanity check */
1230         }
1231
1232         return 0;
1233 }
1234
1235 /*
1236  * Isochronous transfers
1237  */
1238 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1239 {
1240         struct uhci_td *td;
1241         int i, ret, frame;
1242         int status, destination;
1243
1244         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1245         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1246
1247         ret = isochronous_find_start(uhci, urb);
1248         if (ret)
1249                 return ret;
1250
1251         frame = urb->start_frame;
1252         for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1253                 if (!urb->iso_frame_desc[i].length)
1254                         continue;
1255
1256                 td = uhci_alloc_td(uhci, urb->dev);
1257                 if (!td)
1258                         return -ENOMEM;
1259
1260                 uhci_add_td_to_urb(urb, td);
1261                 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1262                         urb->transfer_dma + urb->iso_frame_desc[i].offset);
1263
1264                 if (i + 1 >= urb->number_of_packets)
1265                         td->status |= cpu_to_le32(TD_CTRL_IOC);
1266
1267                 uhci_insert_td_frame_list(uhci, td, frame);
1268         }
1269
1270         return -EINPROGRESS;
1271 }
1272
1273 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1274 {
1275         struct list_head *tmp, *head;
1276         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1277         int status;
1278         int i, ret = 0;
1279
1280         urb->actual_length = 0;
1281
1282         i = 0;
1283         head = &urbp->td_list;
1284         tmp = head->next;
1285         while (tmp != head) {
1286                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1287                 int actlength;
1288
1289                 tmp = tmp->next;
1290
1291                 if (td_status(td) & TD_CTRL_ACTIVE)
1292                         return -EINPROGRESS;
1293
1294                 actlength = uhci_actual_length(td_status(td));
1295                 urb->iso_frame_desc[i].actual_length = actlength;
1296                 urb->actual_length += actlength;
1297
1298                 status = uhci_map_status(uhci_status_bits(td_status(td)),
1299                                 usb_pipeout(urb->pipe));
1300                 urb->iso_frame_desc[i].status = status;
1301                 if (status) {
1302                         urb->error_count++;
1303                         ret = status;
1304                 }
1305
1306                 i++;
1307         }
1308
1309         return ret;
1310 }
1311
1312 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1313 {
1314         struct list_head *tmp, *head;
1315
1316         /* We don't match Isoc transfers since they are special */
1317         if (usb_pipeisoc(urb->pipe))
1318                 return NULL;
1319
1320         head = &uhci->urb_list;
1321         tmp = head->next;
1322         while (tmp != head) {
1323                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1324                 struct urb *u = up->urb;
1325
1326                 tmp = tmp->next;
1327
1328                 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1329                         /* For control, ignore the direction */
1330                         if (usb_pipecontrol(urb->pipe) &&
1331                             (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1332                                 return u;
1333                         else if (u->pipe == urb->pipe)
1334                                 return u;
1335                 }
1336         }
1337
1338         return NULL;
1339 }
1340
1341 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1342 {
1343         int ret;
1344         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1345         unsigned long flags;
1346         struct urb *eurb;
1347         int bustime;
1348
1349         spin_lock_irqsave(&uhci->schedule_lock, flags);
1350
1351         ret = urb->status;
1352         if (ret != -EINPROGRESS)                /* URB already unlinked! */
1353                 goto out;
1354
1355         eurb = uhci_find_urb_ep(uhci, urb);
1356
1357         if (!uhci_alloc_urb_priv(uhci, urb)) {
1358                 ret = -ENOMEM;
1359                 goto out;
1360         }
1361
1362         switch (usb_pipetype(urb->pipe)) {
1363         case PIPE_CONTROL:
1364                 ret = uhci_submit_control(uhci, urb, eurb);
1365                 break;
1366         case PIPE_INTERRUPT:
1367                 if (!eurb) {
1368                         bustime = usb_check_bandwidth(urb->dev, urb);
1369                         if (bustime < 0)
1370                                 ret = bustime;
1371                         else {
1372                                 ret = uhci_submit_interrupt(uhci, urb, eurb);
1373                                 if (ret == -EINPROGRESS)
1374                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1375                         }
1376                 } else {        /* inherit from parent */
1377                         urb->bandwidth = eurb->bandwidth;
1378                         ret = uhci_submit_interrupt(uhci, urb, eurb);
1379                 }
1380                 break;
1381         case PIPE_BULK:
1382                 ret = uhci_submit_bulk(uhci, urb, eurb);
1383                 break;
1384         case PIPE_ISOCHRONOUS:
1385                 bustime = usb_check_bandwidth(urb->dev, urb);
1386                 if (bustime < 0) {
1387                         ret = bustime;
1388                         break;
1389                 }
1390
1391                 ret = uhci_submit_isochronous(uhci, urb);
1392                 if (ret == -EINPROGRESS)
1393                         usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1394                 break;
1395         }
1396
1397         if (ret != -EINPROGRESS) {
1398                 /* Submit failed, so delete it from the urb_list */
1399                 struct urb_priv *urbp = urb->hcpriv;
1400
1401                 list_del_init(&urbp->urb_list);
1402                 uhci_destroy_urb_priv(uhci, urb);
1403         } else
1404                 ret = 0;
1405
1406 out:
1407         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1408         return ret;
1409 }
1410
1411 /*
1412  * Return the result of a transfer
1413  */
1414 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1415 {
1416         int ret = -EINPROGRESS;
1417         struct urb_priv *urbp;
1418
1419         spin_lock(&urb->lock);
1420
1421         urbp = (struct urb_priv *)urb->hcpriv;
1422
1423         if (urb->status != -EINPROGRESS)        /* URB already dequeued */
1424                 goto out;
1425
1426         switch (usb_pipetype(urb->pipe)) {
1427         case PIPE_CONTROL:
1428                 ret = uhci_result_control(uhci, urb);
1429                 break;
1430         case PIPE_BULK:
1431         case PIPE_INTERRUPT:
1432                 ret = uhci_result_common(uhci, urb);
1433                 break;
1434         case PIPE_ISOCHRONOUS:
1435                 ret = uhci_result_isochronous(uhci, urb);
1436                 break;
1437         }
1438
1439         if (ret == -EINPROGRESS)
1440                 goto out;
1441         urb->status = ret;
1442
1443         switch (usb_pipetype(urb->pipe)) {
1444         case PIPE_CONTROL:
1445         case PIPE_BULK:
1446         case PIPE_ISOCHRONOUS:
1447                 /* Release bandwidth for Interrupt or Isoc. transfers */
1448                 if (urb->bandwidth)
1449                         usb_release_bandwidth(urb->dev, urb, 1);
1450                 uhci_unlink_generic(uhci, urb);
1451                 break;
1452         case PIPE_INTERRUPT:
1453                 /* Release bandwidth for Interrupt or Isoc. transfers */
1454                 /* Make sure we don't release if we have a queued URB */
1455                 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1456                         usb_release_bandwidth(urb->dev, urb, 0);
1457                 else
1458                         /* bandwidth was passed on to queued URB, */
1459                         /* so don't let usb_unlink_urb() release it */
1460                         urb->bandwidth = 0;
1461                 uhci_unlink_generic(uhci, urb);
1462                 break;
1463         default:
1464                 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1465                                 "for urb %p\n",
1466                                 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1467         }
1468
1469         /* Move it from uhci->urb_list to uhci->complete_list */
1470         uhci_moveto_complete(uhci, urbp);
1471
1472 out:
1473         spin_unlock(&urb->lock);
1474 }
1475
1476 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1477 {
1478         struct list_head *head, *tmp;
1479         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1480         int prevactive = 1;
1481
1482         uhci_dec_fsbr(uhci, urb);       /* Safe since it checks */
1483
1484         /*
1485          * Now we need to find out what the last successful toggle was
1486          * so we can update the local data toggle for the next transfer
1487          *
1488          * There's 3 way's the last successful completed TD is found:
1489          *
1490          * 1) The TD is NOT active and the actual length < expected length
1491          * 2) The TD is NOT active and it's the last TD in the chain
1492          * 3) The TD is active and the previous TD is NOT active
1493          *
1494          * Control and Isochronous ignore the toggle, so this is safe
1495          * for all types
1496          */
1497         head = &urbp->td_list;
1498         tmp = head->next;
1499         while (tmp != head) {
1500                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1501
1502                 tmp = tmp->next;
1503
1504                 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1505                     (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td)) ||
1506                     tmp == head))
1507                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1508                                 uhci_packetout(td_token(td)),
1509                                 uhci_toggle(td_token(td)) ^ 1);
1510                 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1511                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1512                                 uhci_packetout(td_token(td)),
1513                                 uhci_toggle(td_token(td)));
1514
1515                 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1516         }
1517
1518         uhci_delete_queued_urb(uhci, urb);
1519
1520         /* The interrupt loop will reclaim the QH's */
1521         uhci_remove_qh(uhci, urbp->qh);
1522         urbp->qh = NULL;
1523 }
1524
1525 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1526 {
1527         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1528         unsigned long flags;
1529         struct urb_priv *urbp;
1530         unsigned int age;
1531
1532         spin_lock_irqsave(&uhci->schedule_lock, flags);
1533         urbp = urb->hcpriv;
1534         if (!urbp)                      /* URB was never linked! */
1535                 goto done;
1536         list_del_init(&urbp->urb_list);
1537
1538         uhci_unlink_generic(uhci, urb);
1539
1540         age = uhci_get_current_frame_number(uhci);
1541         if (age != uhci->urb_remove_age) {
1542                 uhci_remove_pending_urbps(uhci);
1543                 uhci->urb_remove_age = age;
1544         }
1545
1546         /* If we're the first, set the next interrupt bit */
1547         if (list_empty(&uhci->urb_remove_list))
1548                 uhci_set_next_interrupt(uhci);
1549         list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1550
1551 done:
1552         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1553         return 0;
1554 }
1555
1556 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1557 {
1558         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1559         struct list_head *head, *tmp;
1560         int count = 0;
1561
1562         uhci_dec_fsbr(uhci, urb);
1563
1564         urbp->fsbr_timeout = 1;
1565
1566         /*
1567          * Ideally we would want to fix qh->element as well, but it's
1568          * read/write by the HC, so that can introduce a race. It's not
1569          * really worth the hassle
1570          */
1571
1572         head = &urbp->td_list;
1573         tmp = head->next;
1574         while (tmp != head) {
1575                 struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
1576
1577                 tmp = tmp->next;
1578
1579                 /*
1580                  * Make sure we don't do the last one (since it'll have the
1581                  * TERM bit set) as well as we skip every so many TD's to
1582                  * make sure it doesn't hog the bandwidth
1583                  */
1584                 if (tmp != head && (count % DEPTH_INTERVAL) == (DEPTH_INTERVAL - 1))
1585                         td->link |= UHCI_PTR_DEPTH;
1586
1587                 count++;
1588         }
1589
1590         return 0;
1591 }
1592
1593 /*
1594  * uhci_get_current_frame_number()
1595  *
1596  * returns the current frame number for a USB bus/controller.
1597  */
1598 static int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1599 {
1600         return inw(uhci->io_addr + USBFRNUM);
1601 }
1602
1603 static int init_stall_timer(struct usb_hcd *hcd);
1604
1605 static void stall_callback(unsigned long ptr)
1606 {
1607         struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1608         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1609         struct list_head list, *tmp, *head;
1610         unsigned long flags;
1611         int called_uhci_finish_completion = 0;
1612
1613         INIT_LIST_HEAD(&list);
1614
1615         spin_lock_irqsave(&uhci->schedule_lock, flags);
1616         if (!list_empty(&uhci->urb_remove_list) &&
1617             uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1618                 uhci_remove_pending_urbps(uhci);
1619                 uhci_finish_completion(hcd, NULL);
1620                 called_uhci_finish_completion = 1;
1621         }
1622
1623         head = &uhci->urb_list;
1624         tmp = head->next;
1625         while (tmp != head) {
1626                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1627                 struct urb *u = up->urb;
1628
1629                 tmp = tmp->next;
1630
1631                 spin_lock(&u->lock);
1632
1633                 /* Check if the FSBR timed out */
1634                 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1635                         uhci_fsbr_timeout(uhci, u);
1636
1637                 spin_unlock(&u->lock);
1638         }
1639         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1640
1641         /* Wake up anyone waiting for an URB to complete */
1642         if (called_uhci_finish_completion)
1643                 wake_up_all(&uhci->waitqh);
1644
1645         head = &list;
1646         tmp = head->next;
1647         while (tmp != head) {
1648                 struct urb_priv *up = list_entry(tmp, struct urb_priv, urb_list);
1649                 struct urb *u = up->urb;
1650
1651                 tmp = tmp->next;
1652
1653                 uhci_urb_dequeue(hcd, u);
1654         }
1655
1656         /* Really disable FSBR */
1657         if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1658                 uhci->fsbrtimeout = 0;
1659                 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1660         }
1661
1662         /* Poll for and perform state transitions */
1663         hc_state_transitions(uhci);
1664
1665         init_stall_timer(hcd);
1666 }
1667
1668 static int init_stall_timer(struct usb_hcd *hcd)
1669 {
1670         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1671
1672         init_timer(&uhci->stall_timer);
1673         uhci->stall_timer.function = stall_callback;
1674         uhci->stall_timer.data = (unsigned long)hcd;
1675         uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
1676         add_timer(&uhci->stall_timer);
1677
1678         return 0;
1679 }
1680
1681 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1682 {
1683         struct list_head *tmp, *head;
1684
1685         head = &uhci->qh_remove_list;
1686         tmp = head->next;
1687         while (tmp != head) {
1688                 struct uhci_qh *qh = list_entry(tmp, struct uhci_qh, remove_list);
1689
1690                 tmp = tmp->next;
1691
1692                 list_del_init(&qh->remove_list);
1693
1694                 uhci_free_qh(uhci, qh);
1695         }
1696 }
1697
1698 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1699 {
1700         struct list_head *tmp, *head;
1701
1702         head = &uhci->td_remove_list;
1703         tmp = head->next;
1704         while (tmp != head) {
1705                 struct uhci_td *td = list_entry(tmp, struct uhci_td, remove_list);
1706
1707                 tmp = tmp->next;
1708
1709                 list_del_init(&td->remove_list);
1710
1711                 uhci_free_td(uhci, td);
1712         }
1713 }
1714
1715 static void uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1716 {
1717         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1718
1719         uhci_destroy_urb_priv(uhci, urb);
1720
1721         spin_unlock(&uhci->schedule_lock);
1722         usb_hcd_giveback_urb(hcd, urb, regs);
1723         spin_lock(&uhci->schedule_lock);
1724 }
1725
1726 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1727 {
1728         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1729         struct list_head *tmp, *head;
1730
1731         head = &uhci->complete_list;
1732         tmp = head->next;
1733         while (tmp != head) {
1734                 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1735                 struct urb *urb = urbp->urb;
1736
1737                 list_del_init(&urbp->urb_list);
1738                 uhci_finish_urb(hcd, urb, regs);
1739
1740                 head = &uhci->complete_list;
1741                 tmp = head->next;
1742         }
1743 }
1744
1745 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1746 {
1747
1748         /* Splice the urb_remove_list onto the end of the complete_list */
1749         list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1750 }
1751
1752 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1753 {
1754         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1755         unsigned long io_addr = uhci->io_addr;
1756         unsigned short status;
1757         struct list_head *tmp, *head;
1758         unsigned int age;
1759
1760         /*
1761          * Read the interrupt status, and write it back to clear the
1762          * interrupt cause.  Contrary to the UHCI specification, the
1763          * "HC Halted" status bit is persistent: it is RO, not R/WC.
1764          */
1765         status = inw(io_addr + USBSTS);
1766         if (!(status & ~USBSTS_HCH))    /* shared interrupt, not mine */
1767                 return IRQ_NONE;
1768         outw(status, io_addr + USBSTS);         /* Clear it */
1769
1770         if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1771                 if (status & USBSTS_HSE)
1772                         dev_err(uhci_dev(uhci), "host system error, "
1773                                         "PCI problems?\n");
1774                 if (status & USBSTS_HCPE)
1775                         dev_err(uhci_dev(uhci), "host controller process "
1776                                         "error, something bad happened!\n");
1777                 if ((status & USBSTS_HCH) && uhci->state > 0) {
1778                         dev_err(uhci_dev(uhci), "host controller halted, "
1779                                         "very bad!\n");
1780                         /* FIXME: Reset the controller, fix the offending TD */
1781                 }
1782         }
1783
1784         if (status & USBSTS_RD)
1785                 uhci->resume_detect = 1;
1786
1787         spin_lock(&uhci->schedule_lock);
1788
1789         age = uhci_get_current_frame_number(uhci);
1790         if (age != uhci->qh_remove_age)
1791                 uhci_free_pending_qhs(uhci);
1792         if (age != uhci->td_remove_age)
1793                 uhci_free_pending_tds(uhci);
1794         if (age != uhci->urb_remove_age)
1795                 uhci_remove_pending_urbps(uhci);
1796
1797         if (list_empty(&uhci->urb_remove_list) &&
1798             list_empty(&uhci->td_remove_list) &&
1799             list_empty(&uhci->qh_remove_list))
1800                 uhci_clear_next_interrupt(uhci);
1801         else
1802                 uhci_set_next_interrupt(uhci);
1803
1804         /* Walk the list of pending URB's to see which ones completed */
1805         head = &uhci->urb_list;
1806         tmp = head->next;
1807         while (tmp != head) {
1808                 struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
1809                 struct urb *urb = urbp->urb;
1810
1811                 tmp = tmp->next;
1812
1813                 /* Checks the status and does all of the magic necessary */
1814                 uhci_transfer_result(uhci, urb);
1815         }
1816         uhci_finish_completion(hcd, regs);
1817
1818         spin_unlock(&uhci->schedule_lock);
1819
1820         /* Wake up anyone waiting for an URB to complete */
1821         wake_up_all(&uhci->waitqh);
1822
1823         return IRQ_HANDLED;
1824 }
1825
1826 static void reset_hc(struct uhci_hcd *uhci)
1827 {
1828         unsigned long io_addr = uhci->io_addr;
1829
1830         /* Turn off PIRQ, SMI, and all interrupts.  This also turns off
1831          * the BIOS's USB Legacy Support.
1832          */
1833         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
1834         outw(0, uhci->io_addr + USBINTR);
1835
1836         /* Global reset for 50ms */
1837         uhci->state = UHCI_RESET;
1838         outw(USBCMD_GRESET, io_addr + USBCMD);
1839         msleep(50);
1840         outw(0, io_addr + USBCMD);
1841
1842         /* Another 10ms delay */
1843         msleep(10);
1844         uhci->resume_detect = 0;
1845 }
1846
1847 static void suspend_hc(struct uhci_hcd *uhci)
1848 {
1849         unsigned long io_addr = uhci->io_addr;
1850
1851         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1852         uhci->state = UHCI_SUSPENDED;
1853         uhci->resume_detect = 0;
1854         outw(USBCMD_EGSM, io_addr + USBCMD);
1855 }
1856
1857 static void wakeup_hc(struct uhci_hcd *uhci)
1858 {
1859         unsigned long io_addr = uhci->io_addr;
1860
1861         switch (uhci->state) {
1862                 case UHCI_SUSPENDED:            /* Start the resume */
1863                         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1864
1865                         /* Global resume for >= 20ms */
1866                         outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1867                         uhci->state = UHCI_RESUMING_1;
1868                         uhci->state_end = jiffies + msecs_to_jiffies(20);
1869                         break;
1870
1871                 case UHCI_RESUMING_1:           /* End global resume */
1872                         uhci->state = UHCI_RESUMING_2;
1873                         outw(0, io_addr + USBCMD);
1874                         /* Falls through */
1875
1876                 case UHCI_RESUMING_2:           /* Wait for EOP to be sent */
1877                         if (inw(io_addr + USBCMD) & USBCMD_FGR)
1878                                 break;
1879
1880                         /* Run for at least 1 second, and
1881                          * mark it configured with a 64-byte max packet */
1882                         uhci->state = UHCI_RUNNING_GRACE;
1883                         uhci->state_end = jiffies + HZ;
1884                         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1885                                         io_addr + USBCMD);
1886                         break;
1887
1888                 case UHCI_RUNNING_GRACE:        /* Now allowed to suspend */
1889                         uhci->state = UHCI_RUNNING;
1890                         break;
1891
1892                 default:
1893                         break;
1894         }
1895 }
1896
1897 static int ports_active(struct uhci_hcd *uhci)
1898 {
1899         unsigned long io_addr = uhci->io_addr;
1900         int connection = 0;
1901         int i;
1902
1903         for (i = 0; i < uhci->rh_numports; i++)
1904                 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1905
1906         return connection;
1907 }
1908
1909 static int suspend_allowed(struct uhci_hcd *uhci)
1910 {
1911         unsigned long io_addr = uhci->io_addr;
1912         int i;
1913
1914         if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1915                 return 1;
1916
1917         /* Some of Intel's USB controllers have a bug that causes false
1918          * resume indications if any port has an over current condition.
1919          * To prevent problems, we will not allow a global suspend if
1920          * any ports are OC.
1921          *
1922          * Some motherboards using Intel's chipsets (but not using all
1923          * the USB ports) appear to hardwire the over current inputs active
1924          * to disable the USB ports.
1925          */
1926
1927         /* check for over current condition on any port */
1928         for (i = 0; i < uhci->rh_numports; i++) {
1929                 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1930                         return 0;
1931         }
1932
1933         return 1;
1934 }
1935
1936 static void hc_state_transitions(struct uhci_hcd *uhci)
1937 {
1938         switch (uhci->state) {
1939                 case UHCI_RUNNING:
1940
1941                         /* global suspend if nothing connected for 1 second */
1942                         if (!ports_active(uhci) && suspend_allowed(uhci)) {
1943                                 uhci->state = UHCI_SUSPENDING_GRACE;
1944                                 uhci->state_end = jiffies + HZ;
1945                         }
1946                         break;
1947
1948                 case UHCI_SUSPENDING_GRACE:
1949                         if (ports_active(uhci))
1950                                 uhci->state = UHCI_RUNNING;
1951                         else if (time_after_eq(jiffies, uhci->state_end))
1952                                 suspend_hc(uhci);
1953                         break;
1954
1955                 case UHCI_SUSPENDED:
1956
1957                         /* wakeup if requested by a device */
1958                         if (uhci->resume_detect)
1959                                 wakeup_hc(uhci);
1960                         break;
1961
1962                 case UHCI_RESUMING_1:
1963                 case UHCI_RESUMING_2:
1964                 case UHCI_RUNNING_GRACE:
1965                         if (time_after_eq(jiffies, uhci->state_end))
1966                                 wakeup_hc(uhci);
1967                         break;
1968
1969                 default:
1970                         break;
1971         }
1972 }
1973
1974 static void start_hc(struct uhci_hcd *uhci)
1975 {
1976         unsigned long io_addr = uhci->io_addr;
1977         int timeout = 1000;
1978
1979         /*
1980          * Reset the HC - this will force us to get a
1981          * new notification of any already connected
1982          * ports due to the virtual disconnect that it
1983          * implies.
1984          */
1985         outw(USBCMD_HCRESET, io_addr + USBCMD);
1986         while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1987                 if (!--timeout) {
1988                         dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1989                         break;
1990                 }
1991         }
1992
1993         /* Turn on PIRQ and all interrupts */
1994         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
1995                         USBLEGSUP_DEFAULT);
1996         outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1997                 io_addr + USBINTR);
1998
1999         /* Start at frame 0 */
2000         outw(0, io_addr + USBFRNUM);
2001         outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
2002
2003         /* Run and mark it configured with a 64-byte max packet */
2004         uhci->state = UHCI_RUNNING_GRACE;
2005         uhci->state_end = jiffies + HZ;
2006         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
2007
2008         uhci->hcd.state = USB_STATE_RUNNING;
2009 }
2010
2011 /*
2012  * De-allocate all resources..
2013  */
2014 static void release_uhci(struct uhci_hcd *uhci)
2015 {
2016         int i;
2017
2018         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2019                 if (uhci->skelqh[i]) {
2020                         uhci_free_qh(uhci, uhci->skelqh[i]);
2021                         uhci->skelqh[i] = NULL;
2022                 }
2023
2024         if (uhci->term_td) {
2025                 uhci_free_td(uhci, uhci->term_td);
2026                 uhci->term_td = NULL;
2027         }
2028
2029         if (uhci->qh_pool) {
2030                 dma_pool_destroy(uhci->qh_pool);
2031                 uhci->qh_pool = NULL;
2032         }
2033
2034         if (uhci->td_pool) {
2035                 dma_pool_destroy(uhci->td_pool);
2036                 uhci->td_pool = NULL;
2037         }
2038
2039         if (uhci->fl) {
2040                 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2041                                 uhci->fl, uhci->fl->dma_handle);
2042                 uhci->fl = NULL;
2043         }
2044
2045 #ifdef CONFIG_PROC_FS
2046         if (uhci->proc_entry) {
2047                 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
2048                 uhci->proc_entry = NULL;
2049         }
2050 #endif
2051 }
2052
2053 static int uhci_reset(struct usb_hcd *hcd)
2054 {
2055         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2056
2057         uhci->io_addr = (unsigned long) hcd->regs;
2058
2059         /* Kick BIOS off this hardware and reset, so we won't get
2060          * interrupts from any previous setup.
2061          */
2062         reset_hc(uhci);
2063         return 0;
2064 }
2065
2066 /*
2067  * Allocate a frame list, and then setup the skeleton
2068  *
2069  * The hardware doesn't really know any difference
2070  * in the queues, but the order does matter for the
2071  * protocols higher up. The order is:
2072  *
2073  *  - any isochronous events handled before any
2074  *    of the queues. We don't do that here, because
2075  *    we'll create the actual TD entries on demand.
2076  *  - The first queue is the interrupt queue.
2077  *  - The second queue is the control queue, split into low- and full-speed
2078  *  - The third queue is bulk queue.
2079  *  - The fourth queue is the bandwidth reclamation queue, which loops back
2080  *    to the full-speed control queue.
2081  */
2082 static int uhci_start(struct usb_hcd *hcd)
2083 {
2084         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2085         int retval = -EBUSY;
2086         int i, port;
2087         unsigned io_size;
2088         dma_addr_t dma_handle;
2089         struct usb_device *udev;
2090 #ifdef CONFIG_PROC_FS
2091         struct proc_dir_entry *ent;
2092 #endif
2093
2094         io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
2095
2096 #ifdef CONFIG_PROC_FS
2097         ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
2098         if (!ent) {
2099                 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
2100                 retval = -ENOMEM;
2101                 goto err_create_proc_entry;
2102         }
2103
2104         ent->data = uhci;
2105         ent->proc_fops = &uhci_proc_operations;
2106         ent->size = 0;
2107         uhci->proc_entry = ent;
2108 #endif
2109
2110         uhci->fsbr = 0;
2111         uhci->fsbrtimeout = 0;
2112
2113         spin_lock_init(&uhci->schedule_lock);
2114         INIT_LIST_HEAD(&uhci->qh_remove_list);
2115
2116         INIT_LIST_HEAD(&uhci->td_remove_list);
2117
2118         INIT_LIST_HEAD(&uhci->urb_remove_list);
2119
2120         INIT_LIST_HEAD(&uhci->urb_list);
2121
2122         INIT_LIST_HEAD(&uhci->complete_list);
2123
2124         init_waitqueue_head(&uhci->waitqh);
2125
2126         uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2127                         &dma_handle, 0);
2128         if (!uhci->fl) {
2129                 dev_err(uhci_dev(uhci), "unable to allocate "
2130                                 "consistent memory for frame list\n");
2131                 goto err_alloc_fl;
2132         }
2133
2134         memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2135
2136         uhci->fl->dma_handle = dma_handle;
2137
2138         uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2139                         sizeof(struct uhci_td), 16, 0);
2140         if (!uhci->td_pool) {
2141                 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2142                 goto err_create_td_pool;
2143         }
2144
2145         uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2146                         sizeof(struct uhci_qh), 16, 0);
2147         if (!uhci->qh_pool) {
2148                 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2149                 goto err_create_qh_pool;
2150         }
2151
2152         /* Initialize the root hub */
2153
2154         /* UHCI specs says devices must have 2 ports, but goes on to say */
2155         /*  they may have more but give no way to determine how many they */
2156         /*  have. However, according to the UHCI spec, Bit 7 is always set */
2157         /*  to 1. So we try to use this to our advantage */
2158         for (port = 0; port < (io_size - 0x10) / 2; port++) {
2159                 unsigned int portstatus;
2160
2161                 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2162                 if (!(portstatus & 0x0080))
2163                         break;
2164         }
2165         if (debug)
2166                 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2167
2168         /* This is experimental so anything less than 2 or greater than 8 is */
2169         /*  something weird and we'll ignore it */
2170         if (port < 2 || port > UHCI_RH_MAXCHILD) {
2171                 dev_info(uhci_dev(uhci), "port count misdetected? "
2172                                 "forcing to 2 ports\n");
2173                 port = 2;
2174         }
2175
2176         uhci->rh_numports = port;
2177
2178         udev = usb_alloc_dev(NULL, &hcd->self, 0);
2179         if (!udev) {
2180                 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2181                 goto err_alloc_root_hub;
2182         }
2183
2184         uhci->term_td = uhci_alloc_td(uhci, udev);
2185         if (!uhci->term_td) {
2186                 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2187                 goto err_alloc_term_td;
2188         }
2189
2190         for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2191                 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2192                 if (!uhci->skelqh[i]) {
2193                         dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2194                         goto err_alloc_skelqh;
2195                 }
2196         }
2197
2198         /*
2199          * 8 Interrupt queues; link all higher int queues to int1,
2200          * then link int1 to control and control to bulk
2201          */
2202         uhci->skel_int128_qh->link =
2203                         uhci->skel_int64_qh->link =
2204                         uhci->skel_int32_qh->link =
2205                         uhci->skel_int16_qh->link =
2206                         uhci->skel_int8_qh->link =
2207                         uhci->skel_int4_qh->link =
2208                         uhci->skel_int2_qh->link =
2209                         cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2210         uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2211
2212         uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2213         uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2214         uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2215
2216         /* This dummy TD is to work around a bug in Intel PIIX controllers */
2217         uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2218                 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2219         uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2220
2221         uhci->skel_term_qh->link = UHCI_PTR_TERM;
2222         uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2223
2224         /*
2225          * Fill the frame list: make all entries point to the proper
2226          * interrupt queue.
2227          *
2228          * The interrupt queues will be interleaved as evenly as possible.
2229          * There's not much to be done about period-1 interrupts; they have
2230          * to occur in every frame.  But we can schedule period-2 interrupts
2231          * in odd-numbered frames, period-4 interrupts in frames congruent
2232          * to 2 (mod 4), and so on.  This way each frame only has two
2233          * interrupt QHs, which will help spread out bandwidth utilization.
2234          */
2235         for (i = 0; i < UHCI_NUMFRAMES; i++) {
2236                 int irq;
2237
2238                 /*
2239                  * ffs (Find First bit Set) does exactly what we need:
2240                  * 1,3,5,...  => ffs = 0 => use skel_int2_qh = skelqh[6],
2241                  * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2242                  * ffs > 6 => not on any high-period queue, so use
2243                  *      skel_int1_qh = skelqh[7].
2244                  * Add UHCI_NUMFRAMES to insure at least one bit is set.
2245                  */
2246                 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2247                 if (irq < 0)
2248                         irq = 7;
2249
2250                 /* Only place we don't use the frame list routines */
2251                 uhci->fl->frame[i] = UHCI_PTR_QH |
2252                                 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2253         }
2254
2255         /*
2256          * Some architectures require a full mb() to enforce completion of
2257          * the memory writes above before the I/O transfers in start_hc().
2258          */
2259         mb();
2260         start_hc(uhci);
2261
2262         init_stall_timer(hcd);
2263
2264         udev->speed = USB_SPEED_FULL;
2265
2266         if (hcd_register_root(udev, &uhci->hcd) != 0) {
2267                 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2268                 retval = -ENOMEM;
2269                 goto err_start_root_hub;
2270         }
2271
2272         return 0;
2273
2274 /*
2275  * error exits:
2276  */
2277 err_start_root_hub:
2278         reset_hc(uhci);
2279
2280         del_timer_sync(&uhci->stall_timer);
2281
2282 err_alloc_skelqh:
2283         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2284                 if (uhci->skelqh[i]) {
2285                         uhci_free_qh(uhci, uhci->skelqh[i]);
2286                         uhci->skelqh[i] = NULL;
2287                 }
2288
2289         uhci_free_td(uhci, uhci->term_td);
2290         uhci->term_td = NULL;
2291
2292 err_alloc_term_td:
2293         usb_put_dev(udev);
2294
2295 err_alloc_root_hub:
2296         dma_pool_destroy(uhci->qh_pool);
2297         uhci->qh_pool = NULL;
2298
2299 err_create_qh_pool:
2300         dma_pool_destroy(uhci->td_pool);
2301         uhci->td_pool = NULL;
2302
2303 err_create_td_pool:
2304         dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2305                         uhci->fl, uhci->fl->dma_handle);
2306         uhci->fl = NULL;
2307
2308 err_alloc_fl:
2309 #ifdef CONFIG_PROC_FS
2310         remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2311         uhci->proc_entry = NULL;
2312
2313 err_create_proc_entry:
2314 #endif
2315
2316         return retval;
2317 }
2318
2319 static void uhci_stop(struct usb_hcd *hcd)
2320 {
2321         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2322
2323         del_timer_sync(&uhci->stall_timer);
2324
2325         /*
2326          * At this point, we're guaranteed that no new connects can be made
2327          * to this bus since there are no more parents
2328          */
2329
2330         reset_hc(uhci);
2331
2332         spin_lock_irq(&uhci->schedule_lock);
2333         uhci_free_pending_qhs(uhci);
2334         uhci_free_pending_tds(uhci);
2335         uhci_remove_pending_urbps(uhci);
2336         uhci_finish_completion(hcd, NULL);
2337
2338         uhci_free_pending_qhs(uhci);
2339         uhci_free_pending_tds(uhci);
2340         spin_unlock_irq(&uhci->schedule_lock);
2341
2342         /* Wake up anyone waiting for an URB to complete */
2343         wake_up_all(&uhci->waitqh);
2344         
2345         release_uhci(uhci);
2346 }
2347
2348 #ifdef CONFIG_PM
2349 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2350 {
2351         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2352
2353         /* Don't try to suspend broken motherboards, reset instead */
2354         if (suspend_allowed(uhci)) {
2355                 suspend_hc(uhci);
2356                 uhci->saved_framenumber =
2357                                 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2358         } else
2359                 reset_hc(uhci);
2360         return 0;
2361 }
2362
2363 static int uhci_resume(struct usb_hcd *hcd)
2364 {
2365         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2366
2367         pci_set_master(to_pci_dev(uhci_dev(uhci)));
2368
2369         if (uhci->state == UHCI_SUSPENDED) {
2370
2371                 /*
2372                  * Some systems don't maintain the UHCI register values
2373                  * during a PM suspend/resume cycle, so reinitialize
2374                  * the Frame Number, Framelist Base Address, Interrupt
2375                  * Enable, and Legacy Support registers.
2376                  */
2377                 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2378                                 0);
2379                 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2380                 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2381                 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2382                                 USBINTR_SP, uhci->io_addr + USBINTR);
2383                 uhci->resume_detect = 1;
2384                 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2385                                 USBLEGSUP_DEFAULT);
2386         } else {
2387                 reset_hc(uhci);
2388                 start_hc(uhci);
2389         }
2390         uhci->hcd.state = USB_STATE_RUNNING;
2391         return 0;
2392 }
2393 #endif
2394
2395 static struct usb_hcd *uhci_hcd_alloc(void)
2396 {
2397         struct uhci_hcd *uhci;
2398
2399         uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2400         if (!uhci)
2401                 return NULL;
2402
2403         memset(uhci, 0, sizeof(*uhci));
2404         uhci->hcd.product_desc = "UHCI Host Controller";
2405         return &uhci->hcd;
2406 }
2407
2408 static void uhci_hcd_free(struct usb_hcd *hcd)
2409 {
2410         kfree(hcd_to_uhci(hcd));
2411 }
2412
2413 /* Are there any URBs for a particular device/endpoint on a given list? */
2414 static int urbs_for_ep_list(struct list_head *head,
2415                 struct hcd_dev *hdev, int ep)
2416 {
2417         struct urb_priv *urbp;
2418
2419         list_for_each_entry(urbp, head, urb_list) {
2420                 struct urb *urb = urbp->urb;
2421
2422                 if (hdev == urb->dev->hcpriv && ep ==
2423                                 (usb_pipeendpoint(urb->pipe) |
2424                                  usb_pipein(urb->pipe)))
2425                         return 1;
2426         }
2427         return 0;
2428 }
2429
2430 /* Are there any URBs for a particular device/endpoint? */
2431 static int urbs_for_ep(struct uhci_hcd *uhci, struct hcd_dev *hdev, int ep)
2432 {
2433         int rc;
2434
2435         spin_lock_irq(&uhci->schedule_lock);
2436         rc = (urbs_for_ep_list(&uhci->urb_list, hdev, ep) ||
2437                         urbs_for_ep_list(&uhci->complete_list, hdev, ep) ||
2438                         urbs_for_ep_list(&uhci->urb_remove_list, hdev, ep));
2439         spin_unlock_irq(&uhci->schedule_lock);
2440         return rc;
2441 }
2442
2443 /* Wait until all the URBs for a particular device/endpoint are gone */
2444 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2445                 struct hcd_dev *hdev, int endpoint)
2446 {
2447         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2448
2449         wait_event_interruptible(uhci->waitqh,
2450                         !urbs_for_ep(uhci, hdev, endpoint));
2451 }
2452
2453 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2454 {
2455         return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2456 }
2457
2458 static const char hcd_name[] = "uhci_hcd";
2459
2460 static const struct hc_driver uhci_driver = {
2461         .description =          hcd_name,
2462
2463         /* Generic hardware linkage */
2464         .irq =                  uhci_irq,
2465         .flags =                HCD_USB11,
2466
2467         /* Basic lifecycle operations */
2468         .reset =                uhci_reset,
2469         .start =                uhci_start,
2470 #ifdef CONFIG_PM
2471         .suspend =              uhci_suspend,
2472         .resume =               uhci_resume,
2473 #endif
2474         .stop =                 uhci_stop,
2475
2476         .hcd_alloc =            uhci_hcd_alloc,
2477         .hcd_free =             uhci_hcd_free,
2478
2479         .urb_enqueue =          uhci_urb_enqueue,
2480         .urb_dequeue =          uhci_urb_dequeue,
2481
2482         .endpoint_disable =     uhci_hcd_endpoint_disable,
2483         .get_frame_number =     uhci_hcd_get_frame_number,
2484
2485         .hub_status_data =      uhci_hub_status_data,
2486         .hub_control =          uhci_hub_control,
2487 };
2488
2489 static const struct pci_device_id uhci_pci_ids[] = { {
2490         /* handle any USB UHCI controller */
2491         PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2492         .driver_data =  (unsigned long) &uhci_driver,
2493         }, { /* end: all zeroes */ }
2494 };
2495
2496 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2497
2498 static struct pci_driver uhci_pci_driver = {
2499         .name =         (char *)hcd_name,
2500         .id_table =     uhci_pci_ids,
2501
2502         .probe =        usb_hcd_pci_probe,
2503         .remove =       usb_hcd_pci_remove,
2504
2505 #ifdef  CONFIG_PM
2506         .suspend =      usb_hcd_pci_suspend,
2507         .resume =       usb_hcd_pci_resume,
2508 #endif  /* PM */
2509 };
2510  
2511 static int __init uhci_hcd_init(void)
2512 {
2513         int retval = -ENOMEM;
2514
2515         printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2516
2517         if (usb_disabled())
2518                 return -ENODEV;
2519
2520         if (debug) {
2521                 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2522                 if (!errbuf)
2523                         goto errbuf_failed;
2524         }
2525
2526 #ifdef CONFIG_PROC_FS
2527         uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, NULL);
2528         if (!uhci_proc_root)
2529                 goto proc_failed;
2530 #endif
2531
2532         uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2533                 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2534         if (!uhci_up_cachep)
2535                 goto up_failed;
2536
2537         retval = pci_module_init(&uhci_pci_driver);
2538         if (retval)
2539                 goto init_failed;
2540
2541         return 0;
2542
2543 init_failed:
2544         if (kmem_cache_destroy(uhci_up_cachep))
2545                 warn("not all urb_priv's were freed!");
2546
2547 up_failed:
2548
2549 #ifdef CONFIG_PROC_FS
2550         remove_proc_entry("driver/uhci", NULL);
2551
2552 proc_failed:
2553 #endif
2554         if (errbuf)
2555                 kfree(errbuf);
2556
2557 errbuf_failed:
2558
2559         return retval;
2560 }
2561
2562 static void __exit uhci_hcd_cleanup(void) 
2563 {
2564         pci_unregister_driver(&uhci_pci_driver);
2565         
2566         if (kmem_cache_destroy(uhci_up_cachep))
2567                 warn("not all urb_priv's were freed!");
2568
2569 #ifdef CONFIG_PROC_FS
2570         remove_proc_entry("driver/uhci", NULL);
2571 #endif
2572
2573         if (errbuf)
2574                 kfree(errbuf);
2575 }
2576
2577 module_init(uhci_hcd_init);
2578 module_exit(uhci_hcd_cleanup);
2579
2580 MODULE_AUTHOR(DRIVER_AUTHOR);
2581 MODULE_DESCRIPTION(DRIVER_DESC);
2582 MODULE_LICENSE("GPL");