upgrade to linux 2.6.10-1.12_FC2
[linux-2.6.git] / drivers / usb / host / uhci-hcd.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
17  *
18  * Intel documents this fairly well, and as far as I know there
19  * are no royalties or anything like that, but even so there are
20  * people who decided that they want to do the same thing in a
21  * completely different way.
22  *
23  * WARNING! The USB documentation is downright evil. Most of it
24  * is just crap, written by a committee. You're better off ignoring
25  * most of it, the important stuff is:
26  *  - the low-level protocol (fairly simple but lots of small details)
27  *  - working around the horridness of the rest
28  */
29
30 #include <linux/config.h>
31 #ifdef CONFIG_USB_DEBUG
32 #define DEBUG
33 #else
34 #undef DEBUG
35 #endif
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/kernel.h>
39 #include <linux/init.h>
40 #include <linux/delay.h>
41 #include <linux/ioport.h>
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/smp_lock.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/interrupt.h>
48 #include <linux/spinlock.h>
49 #include <linux/proc_fs.h>
50 #include <linux/pm.h>
51 #include <linux/dmapool.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/usb.h>
54 #include <linux/bitops.h>
55
56 #include <asm/uaccess.h>
57 #include <asm/io.h>
58 #include <asm/irq.h>
59 #include <asm/system.h>
60
61 #include "../core/hcd.h"
62 #include "uhci-hcd.h"
63
64 /*
65  * Version Information
66  */
67 #define DRIVER_VERSION "v2.2"
68 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
69 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
70 Alan Stern"
71 #define DRIVER_DESC "USB Universal Host Controller Interface driver"
72
73 /*
74  * debug = 0, no debugging messages
75  * debug = 1, dump failed URB's except for stalls
76  * debug = 2, dump all failed URB's (including stalls)
77  *            show all queues in /proc/driver/uhci/[pci_addr]
78  * debug = 3, show all TD's in URB's when dumping
79  */
80 #ifdef DEBUG
81 static int debug = 1;
82 #else
83 static int debug = 0;
84 #endif
85 module_param(debug, int, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(debug, "Debug level");
87 static char *errbuf;
88 #define ERRBUF_LEN    (32 * 1024)
89
90 #include "uhci-hub.c"
91 #include "uhci-debug.c"
92
93 static kmem_cache_t *uhci_up_cachep;    /* urb_priv */
94
95 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci);
96 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
97 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
98 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
99 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs);
100 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
101 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
102
103 static void hc_state_transitions(struct uhci_hcd *uhci);
104
105 /* If a transfer is still active after this much time, turn off FSBR */
106 #define IDLE_TIMEOUT    msecs_to_jiffies(50)
107 #define FSBR_DELAY      msecs_to_jiffies(50)
108
109 /* When we timeout an idle transfer for FSBR, we'll switch it over to */
110 /* depth first traversal. We'll do it in groups of this number of TD's */
111 /* to make sure it doesn't hog all of the bandwidth */
112 #define DEPTH_INTERVAL 5
113
114 /*
115  * Technically, updating td->status here is a race, but it's not really a
116  * problem. The worst that can happen is that we set the IOC bit again
117  * generating a spurious interrupt. We could fix this by creating another
118  * QH and leaving the IOC bit always set, but then we would have to play
119  * games with the FSBR code to make sure we get the correct order in all
120  * the cases. I don't think it's worth the effort
121  */
122 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
123 {
124         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
125 }
126
127 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
128 {
129         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
130 }
131
132 static inline void uhci_moveto_complete(struct uhci_hcd *uhci, 
133                                         struct urb_priv *urbp)
134 {
135         list_move_tail(&urbp->urb_list, &uhci->complete_list);
136 }
137
138 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
139 {
140         dma_addr_t dma_handle;
141         struct uhci_td *td;
142
143         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
144         if (!td)
145                 return NULL;
146
147         td->dma_handle = dma_handle;
148
149         td->link = UHCI_PTR_TERM;
150         td->buffer = 0;
151
152         td->frame = -1;
153         td->dev = dev;
154
155         INIT_LIST_HEAD(&td->list);
156         INIT_LIST_HEAD(&td->remove_list);
157         INIT_LIST_HEAD(&td->fl_list);
158
159         usb_get_dev(dev);
160
161         return td;
162 }
163
164 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
165                 u32 token, u32 buffer)
166 {
167         td->status = cpu_to_le32(status);
168         td->token = cpu_to_le32(token);
169         td->buffer = cpu_to_le32(buffer);
170 }
171
172 /*
173  * We insert Isochronous URB's directly into the frame list at the beginning
174  */
175 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
176 {
177         framenum &= (UHCI_NUMFRAMES - 1);
178
179         td->frame = framenum;
180
181         /* Is there a TD already mapped there? */
182         if (uhci->fl->frame_cpu[framenum]) {
183                 struct uhci_td *ftd, *ltd;
184
185                 ftd = uhci->fl->frame_cpu[framenum];
186                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
187
188                 list_add_tail(&td->fl_list, &ftd->fl_list);
189
190                 td->link = ltd->link;
191                 wmb();
192                 ltd->link = cpu_to_le32(td->dma_handle);
193         } else {
194                 td->link = uhci->fl->frame[framenum];
195                 wmb();
196                 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
197                 uhci->fl->frame_cpu[framenum] = td;
198         }
199 }
200
201 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
202 {
203         /* If it's not inserted, don't remove it */
204         if (td->frame == -1 && list_empty(&td->fl_list))
205                 return;
206
207         if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
208                 if (list_empty(&td->fl_list)) {
209                         uhci->fl->frame[td->frame] = td->link;
210                         uhci->fl->frame_cpu[td->frame] = NULL;
211                 } else {
212                         struct uhci_td *ntd;
213
214                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215                         uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
216                         uhci->fl->frame_cpu[td->frame] = ntd;
217                 }
218         } else {
219                 struct uhci_td *ptd;
220
221                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222                 ptd->link = td->link;
223         }
224
225         wmb();
226         td->link = UHCI_PTR_TERM;
227
228         list_del_init(&td->fl_list);
229         td->frame = -1;
230 }
231
232 /*
233  * Inserts a td list into qh.
234  */
235 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
236 {
237         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
238         struct uhci_td *td;
239         u32 *plink;
240
241         /* Ordering isn't important here yet since the QH hasn't been */
242         /* inserted into the schedule yet */
243         plink = &qh->element;
244         list_for_each_entry(td, &urbp->td_list, list) {
245                 *plink = cpu_to_le32(td->dma_handle) | breadth;
246                 plink = &td->link;
247         }
248         *plink = UHCI_PTR_TERM;
249 }
250
251 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
252 {
253         if (!list_empty(&td->list))
254                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
255         if (!list_empty(&td->remove_list))
256                 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
257         if (!list_empty(&td->fl_list))
258                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
259
260         if (td->dev)
261                 usb_put_dev(td->dev);
262
263         dma_pool_free(uhci->td_pool, td, td->dma_handle);
264 }
265
266 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
267 {
268         dma_addr_t dma_handle;
269         struct uhci_qh *qh;
270
271         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
272         if (!qh)
273                 return NULL;
274
275         qh->dma_handle = dma_handle;
276
277         qh->element = UHCI_PTR_TERM;
278         qh->link = UHCI_PTR_TERM;
279
280         qh->dev = dev;
281         qh->urbp = NULL;
282
283         INIT_LIST_HEAD(&qh->list);
284         INIT_LIST_HEAD(&qh->remove_list);
285
286         usb_get_dev(dev);
287
288         return qh;
289 }
290
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
292 {
293         if (!list_empty(&qh->list))
294                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
295         if (!list_empty(&qh->remove_list))
296                 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
297
298         if (qh->dev)
299                 usb_put_dev(qh->dev);
300
301         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
302 }
303
304 /*
305  * Append this urb's qh after the last qh in skelqh->list
306  *
307  * Note that urb_priv.queue_list doesn't have a separate queue head;
308  * it's a ring with every element "live".
309  */
310 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
311 {
312         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
313         struct urb_priv *turbp;
314         struct uhci_qh *lqh;
315
316         /* Grab the last QH */
317         lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
318
319         /* Point to the next skelqh */
320         urbp->qh->link = lqh->link;
321         wmb();                          /* Ordering is important */
322
323         /*
324          * Patch QHs for previous endpoint's queued URBs?  HC goes
325          * here next, not to the next skelqh it now points to.
326          *
327          *    lqh --> td ... --> qh ... --> td --> qh ... --> td
328          *     |                 |                 |
329          *     v                 v                 v
330          *     +<----------------+-----------------+
331          *     v
332          *    newqh --> td ... --> td
333          *     |
334          *     v
335          *    ...
336          *
337          * The HC could see (and use!) any of these as we write them.
338          */
339         lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
340         if (lqh->urbp) {
341                 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
342                         turbp->qh->link = lqh->link;
343         }
344
345         list_add_tail(&urbp->qh->list, &skelqh->list);
346 }
347
348 /*
349  * Start removal of QH from schedule; it finishes next frame.
350  * TDs should be unlinked before this is called.
351  */
352 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
353 {
354         struct uhci_qh *pqh;
355         __le32 newlink;
356         unsigned int age;
357
358         if (!qh)
359                 return;
360
361         /*
362          * Only go through the hoops if it's actually linked in
363          */
364         if (!list_empty(&qh->list)) {
365
366                 /* If our queue is nonempty, make the next URB the head */
367                 if (!list_empty(&qh->urbp->queue_list)) {
368                         struct urb_priv *nurbp;
369
370                         nurbp = list_entry(qh->urbp->queue_list.next,
371                                         struct urb_priv, queue_list);
372                         nurbp->queued = 0;
373                         list_add(&nurbp->qh->list, &qh->list);
374                         newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
375                 } else
376                         newlink = qh->link;
377
378                 /* Fix up the previous QH's queue to link to either
379                  * the new head of this queue or the start of the
380                  * next endpoint's queue. */
381                 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
382                 pqh->link = newlink;
383                 if (pqh->urbp) {
384                         struct urb_priv *turbp;
385
386                         list_for_each_entry(turbp, &pqh->urbp->queue_list,
387                                         queue_list)
388                                 turbp->qh->link = newlink;
389                 }
390                 wmb();
391
392                 /* Leave qh->link in case the HC is on the QH now, it will */
393                 /* continue the rest of the schedule */
394                 qh->element = UHCI_PTR_TERM;
395
396                 list_del_init(&qh->list);
397         }
398
399         list_del_init(&qh->urbp->queue_list);
400         qh->urbp = NULL;
401
402         age = uhci_get_current_frame_number(uhci);
403         if (age != uhci->qh_remove_age) {
404                 uhci_free_pending_qhs(uhci);
405                 uhci->qh_remove_age = age;
406         }
407
408         /* Check to see if the remove list is empty. Set the IOC bit */
409         /* to force an interrupt so we can remove the QH */
410         if (list_empty(&uhci->qh_remove_list))
411                 uhci_set_next_interrupt(uhci);
412
413         list_add(&qh->remove_list, &uhci->qh_remove_list);
414 }
415
416 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
417 {
418         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
419         struct uhci_td *td;
420
421         list_for_each_entry(td, &urbp->td_list, list) {
422                 if (toggle)
423                         td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
424                 else
425                         td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
426
427                 toggle ^= 1;
428         }
429
430         return toggle;
431 }
432
433 /* This function will append one URB's QH to another URB's QH. This is for */
434 /* queuing interrupt, control or bulk transfers */
435 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
436 {
437         struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
438         struct uhci_td *lltd;
439
440         eurbp = eurb->hcpriv;
441         urbp = urb->hcpriv;
442
443         /* Find the first URB in the queue */
444         furbp = eurbp;
445         if (eurbp->queued) {
446                 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
447                         if (!furbp->queued)
448                                 break;
449         }
450
451         lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
452
453         lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
454
455         /* Control transfers always start with toggle 0 */
456         if (!usb_pipecontrol(urb->pipe))
457                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
458                                 usb_pipeout(urb->pipe),
459                                 uhci_fixup_toggle(urb,
460                                         uhci_toggle(td_token(lltd)) ^ 1));
461
462         /* All qh's in the queue need to link to the next queue */
463         urbp->qh->link = eurbp->qh->link;
464
465         wmb();                  /* Make sure we flush everything */
466
467         lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
468
469         list_add_tail(&urbp->queue_list, &furbp->queue_list);
470
471         urbp->queued = 1;
472 }
473
474 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
475 {
476         struct urb_priv *urbp, *nurbp, *purbp, *turbp;
477         struct uhci_td *pltd;
478         unsigned int toggle;
479
480         urbp = urb->hcpriv;
481
482         if (list_empty(&urbp->queue_list))
483                 return;
484
485         nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
486
487         /*
488          * Fix up the toggle for the following URBs in the queue.
489          * Only needed for bulk and interrupt: control and isochronous
490          * endpoints don't propagate toggles between messages.
491          */
492         if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
493                 if (!urbp->queued)
494                         /* We just set the toggle in uhci_unlink_generic */
495                         toggle = usb_gettoggle(urb->dev,
496                                         usb_pipeendpoint(urb->pipe),
497                                         usb_pipeout(urb->pipe));
498                 else {
499                         /* If we're in the middle of the queue, grab the */
500                         /* toggle from the TD previous to us */
501                         purbp = list_entry(urbp->queue_list.prev,
502                                         struct urb_priv, queue_list);
503                         pltd = list_entry(purbp->td_list.prev,
504                                         struct uhci_td, list);
505                         toggle = uhci_toggle(td_token(pltd)) ^ 1;
506                 }
507
508                 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
509                         if (!turbp->queued)
510                                 break;
511                         toggle = uhci_fixup_toggle(turbp->urb, toggle);
512                 }
513
514                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
515                                 usb_pipeout(urb->pipe), toggle);
516         }
517
518         if (urbp->queued) {
519                 /* We're somewhere in the middle (or end).  The case where
520                  * we're at the head is handled in uhci_remove_qh(). */
521                 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
522                                 queue_list);
523
524                 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
525                 if (nurbp->queued)
526                         pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
527                 else
528                         /* The next URB happens to be the beginning, so */
529                         /*  we're the last, end the chain */
530                         pltd->link = UHCI_PTR_TERM;
531         }
532
533         /* urbp->queue_list is handled in uhci_remove_qh() */
534 }
535
536 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
537 {
538         struct urb_priv *urbp;
539
540         urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
541         if (!urbp)
542                 return NULL;
543
544         memset((void *)urbp, 0, sizeof(*urbp));
545
546         urbp->inserttime = jiffies;
547         urbp->fsbrtime = jiffies;
548         urbp->urb = urb;
549         
550         INIT_LIST_HEAD(&urbp->td_list);
551         INIT_LIST_HEAD(&urbp->queue_list);
552         INIT_LIST_HEAD(&urbp->urb_list);
553
554         list_add_tail(&urbp->urb_list, &uhci->urb_list);
555
556         urb->hcpriv = urbp;
557
558         return urbp;
559 }
560
561 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
562 {
563         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
564
565         td->urb = urb;
566
567         list_add_tail(&td->list, &urbp->td_list);
568 }
569
570 static void uhci_remove_td_from_urb(struct uhci_td *td)
571 {
572         if (list_empty(&td->list))
573                 return;
574
575         list_del_init(&td->list);
576
577         td->urb = NULL;
578 }
579
580 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
581 {
582         struct uhci_td *td, *tmp;
583         struct urb_priv *urbp;
584         unsigned int age;
585
586         urbp = (struct urb_priv *)urb->hcpriv;
587         if (!urbp)
588                 return;
589
590         if (!list_empty(&urbp->urb_list))
591                 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
592                                 "or uhci->remove_list!\n", urb);
593
594         age = uhci_get_current_frame_number(uhci);
595         if (age != uhci->td_remove_age) {
596                 uhci_free_pending_tds(uhci);
597                 uhci->td_remove_age = age;
598         }
599
600         /* Check to see if the remove list is empty. Set the IOC bit */
601         /* to force an interrupt so we can remove the TD's*/
602         if (list_empty(&uhci->td_remove_list))
603                 uhci_set_next_interrupt(uhci);
604
605         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
606                 uhci_remove_td_from_urb(td);
607                 uhci_remove_td(uhci, td);
608                 list_add(&td->remove_list, &uhci->td_remove_list);
609         }
610
611         urb->hcpriv = NULL;
612         kmem_cache_free(uhci_up_cachep, urbp);
613 }
614
615 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
616 {
617         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
618
619         if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
620                 urbp->fsbr = 1;
621                 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
622                         uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
623         }
624 }
625
626 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
627 {
628         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
629
630         if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
631                 urbp->fsbr = 0;
632                 if (!--uhci->fsbr)
633                         uhci->fsbrtimeout = jiffies + FSBR_DELAY;
634         }
635 }
636
637 /*
638  * Map status to standard result codes
639  *
640  * <status> is (td->status & 0xF60000) [a.k.a. uhci_status_bits(td->status)]
641  * Note: status does not include the TD_CTRL_NAK bit.
642  * <dir_out> is True for output TDs and False for input TDs.
643  */
644 static int uhci_map_status(int status, int dir_out)
645 {
646         if (!status)
647                 return 0;
648         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
649                 return -EPROTO;
650         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
651                 if (dir_out)
652                         return -EPROTO;
653                 else
654                         return -EILSEQ;
655         }
656         if (status & TD_CTRL_BABBLE)                    /* Babble */
657                 return -EOVERFLOW;
658         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
659                 return -ENOSR;
660         if (status & TD_CTRL_STALLED)                   /* Stalled */
661                 return -EPIPE;
662         WARN_ON(status & TD_CTRL_ACTIVE);               /* Active */
663         return 0;
664 }
665
666 /*
667  * Control transfers
668  */
669 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
670 {
671         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
672         struct uhci_td *td;
673         struct uhci_qh *qh, *skelqh;
674         unsigned long destination, status;
675         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
676         int len = urb->transfer_buffer_length;
677         dma_addr_t data = urb->transfer_dma;
678
679         /* The "pipe" thing contains the destination in bits 8--18 */
680         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
681
682         /* 3 errors */
683         status = TD_CTRL_ACTIVE | uhci_maxerr(3);
684         if (urb->dev->speed == USB_SPEED_LOW)
685                 status |= TD_CTRL_LS;
686
687         /*
688          * Build the TD for the control request setup packet
689          */
690         td = uhci_alloc_td(uhci, urb->dev);
691         if (!td)
692                 return -ENOMEM;
693
694         uhci_add_td_to_urb(urb, td);
695         uhci_fill_td(td, status, destination | uhci_explen(7),
696                 urb->setup_dma);
697
698         /*
699          * If direction is "send", change the packet ID from SETUP (0x2D)
700          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
701          * set Short Packet Detect (SPD) for all data packets.
702          */
703         if (usb_pipeout(urb->pipe))
704                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
705         else {
706                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
707                 status |= TD_CTRL_SPD;
708         }
709
710         /*
711          * Build the DATA TD's
712          */
713         while (len > 0) {
714                 int pktsze = len;
715
716                 if (pktsze > maxsze)
717                         pktsze = maxsze;
718
719                 td = uhci_alloc_td(uhci, urb->dev);
720                 if (!td)
721                         return -ENOMEM;
722
723                 /* Alternate Data0/1 (start with Data1) */
724                 destination ^= TD_TOKEN_TOGGLE;
725         
726                 uhci_add_td_to_urb(urb, td);
727                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
728                         data);
729
730                 data += pktsze;
731                 len -= pktsze;
732         }
733
734         /*
735          * Build the final TD for control status 
736          */
737         td = uhci_alloc_td(uhci, urb->dev);
738         if (!td)
739                 return -ENOMEM;
740
741         /*
742          * It's IN if the pipe is an output pipe or we're not expecting
743          * data back.
744          */
745         destination &= ~TD_TOKEN_PID_MASK;
746         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
747                 destination |= USB_PID_IN;
748         else
749                 destination |= USB_PID_OUT;
750
751         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
752
753         status &= ~TD_CTRL_SPD;
754
755         uhci_add_td_to_urb(urb, td);
756         uhci_fill_td(td, status | TD_CTRL_IOC,
757                 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
758
759         qh = uhci_alloc_qh(uhci, urb->dev);
760         if (!qh)
761                 return -ENOMEM;
762
763         urbp->qh = qh;
764         qh->urbp = urbp;
765
766         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
767
768         /* Low-speed transfers get a different queue, and won't hog the bus.
769          * Also, some devices enumerate better without FSBR; the easiest way
770          * to do that is to put URBs on the low-speed queue while the device
771          * is in the DEFAULT state. */
772         if (urb->dev->speed == USB_SPEED_LOW ||
773                         urb->dev->state == USB_STATE_DEFAULT)
774                 skelqh = uhci->skel_ls_control_qh;
775         else {
776                 skelqh = uhci->skel_fs_control_qh;
777                 uhci_inc_fsbr(uhci, urb);
778         }
779
780         if (eurb)
781                 uhci_append_queued_urb(uhci, eurb, urb);
782         else
783                 uhci_insert_qh(uhci, skelqh, urb);
784
785         return -EINPROGRESS;
786 }
787
788 /*
789  * If control-IN transfer was short, the status packet wasn't sent.
790  * This routine changes the element pointer in the QH to point at the
791  * status TD.  It's safe to do this even while the QH is live, because
792  * the hardware only updates the element pointer following a successful
793  * transfer.  The inactive TD for the short packet won't cause an update,
794  * so the pointer won't get overwritten.  The next time the controller
795  * sees this QH, it will send the status packet.
796  */
797 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
798 {
799         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
800         struct uhci_td *td;
801
802         urbp->short_control_packet = 1;
803
804         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
805         urbp->qh->element = cpu_to_le32(td->dma_handle);
806
807         return -EINPROGRESS;
808 }
809
810
811 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
812 {
813         struct list_head *tmp, *head;
814         struct urb_priv *urbp = urb->hcpriv;
815         struct uhci_td *td;
816         unsigned int status;
817         int ret = 0;
818
819         if (list_empty(&urbp->td_list))
820                 return -EINVAL;
821
822         head = &urbp->td_list;
823
824         if (urbp->short_control_packet) {
825                 tmp = head->prev;
826                 goto status_stage;
827         }
828
829         tmp = head->next;
830         td = list_entry(tmp, struct uhci_td, list);
831
832         /* The first TD is the SETUP stage, check the status, but skip */
833         /*  the count */
834         status = uhci_status_bits(td_status(td));
835         if (status & TD_CTRL_ACTIVE)
836                 return -EINPROGRESS;
837
838         if (status)
839                 goto td_error;
840
841         urb->actual_length = 0;
842
843         /* The rest of the TD's (but the last) are data */
844         tmp = tmp->next;
845         while (tmp != head && tmp->next != head) {
846                 td = list_entry(tmp, struct uhci_td, list);
847
848                 tmp = tmp->next;
849
850                 status = uhci_status_bits(td_status(td));
851                 if (status & TD_CTRL_ACTIVE)
852                         return -EINPROGRESS;
853
854                 urb->actual_length += uhci_actual_length(td_status(td));
855
856                 if (status)
857                         goto td_error;
858
859                 /* Check to see if we received a short packet */
860                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
861                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
862                                 ret = -EREMOTEIO;
863                                 goto err;
864                         }
865
866                         if (uhci_packetid(td_token(td)) == USB_PID_IN)
867                                 return usb_control_retrigger_status(uhci, urb);
868                         else
869                                 return 0;
870                 }
871         }
872
873 status_stage:
874         td = list_entry(tmp, struct uhci_td, list);
875
876         /* Control status stage */
877         status = td_status(td);
878
879 #ifdef I_HAVE_BUGGY_APC_BACKUPS
880         /* APC BackUPS Pro kludge */
881         /* It tries to send all of the descriptor instead of the amount */
882         /*  we requested */
883         if (status & TD_CTRL_IOC &&     /* IOC is masked out by uhci_status_bits */
884             status & TD_CTRL_ACTIVE &&
885             status & TD_CTRL_NAK)
886                 return 0;
887 #endif
888
889         status = uhci_status_bits(status);
890         if (status & TD_CTRL_ACTIVE)
891                 return -EINPROGRESS;
892
893         if (status)
894                 goto td_error;
895
896         return 0;
897
898 td_error:
899         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
900
901 err:
902         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
903                 /* Some debugging code */
904                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
905                                 __FUNCTION__, status);
906
907                 if (errbuf) {
908                         /* Print the chain for debugging purposes */
909                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
910
911                         lprintk(errbuf);
912                 }
913         }
914
915         return ret;
916 }
917
918 /*
919  * Common submit for bulk and interrupt
920  */
921 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
922 {
923         struct uhci_td *td;
924         struct uhci_qh *qh;
925         unsigned long destination, status;
926         int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
927         int len = urb->transfer_buffer_length;
928         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
929         dma_addr_t data = urb->transfer_dma;
930
931         if (len < 0)
932                 return -EINVAL;
933
934         /* The "pipe" thing contains the destination in bits 8--18 */
935         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
936
937         status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
938         if (urb->dev->speed == USB_SPEED_LOW)
939                 status |= TD_CTRL_LS;
940         if (usb_pipein(urb->pipe))
941                 status |= TD_CTRL_SPD;
942
943         /*
944          * Build the DATA TD's
945          */
946         do {    /* Allow zero length packets */
947                 int pktsze = maxsze;
948
949                 if (pktsze >= len) {
950                         pktsze = len;
951                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
952                                 status &= ~TD_CTRL_SPD;
953                 }
954
955                 td = uhci_alloc_td(uhci, urb->dev);
956                 if (!td)
957                         return -ENOMEM;
958
959                 uhci_add_td_to_urb(urb, td);
960                 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
961                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
962                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
963                         data);
964
965                 data += pktsze;
966                 len -= maxsze;
967
968                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
969                         usb_pipeout(urb->pipe));
970         } while (len > 0);
971
972         /*
973          * URB_ZERO_PACKET means adding a 0-length packet, if direction
974          * is OUT and the transfer_length was an exact multiple of maxsze,
975          * hence (len = transfer_length - N * maxsze) == 0
976          * however, if transfer_length == 0, the zero packet was already
977          * prepared above.
978          */
979         if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
980             !len && urb->transfer_buffer_length) {
981                 td = uhci_alloc_td(uhci, urb->dev);
982                 if (!td)
983                         return -ENOMEM;
984
985                 uhci_add_td_to_urb(urb, td);
986                 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
987                         (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
988                          usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
989                         data);
990
991                 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
992                         usb_pipeout(urb->pipe));
993         }
994
995         /* Set the interrupt-on-completion flag on the last packet.
996          * A more-or-less typical 4 KB URB (= size of one memory page)
997          * will require about 3 ms to transfer; that's a little on the
998          * fast side but not enough to justify delaying an interrupt
999          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1000          * flag setting. */
1001         td->status |= cpu_to_le32(TD_CTRL_IOC);
1002
1003         qh = uhci_alloc_qh(uhci, urb->dev);
1004         if (!qh)
1005                 return -ENOMEM;
1006
1007         urbp->qh = qh;
1008         qh->urbp = urbp;
1009
1010         /* Always breadth first */
1011         uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
1012
1013         if (eurb)
1014                 uhci_append_queued_urb(uhci, eurb, urb);
1015         else
1016                 uhci_insert_qh(uhci, skelqh, urb);
1017
1018         return -EINPROGRESS;
1019 }
1020
1021 /*
1022  * Common result for bulk and interrupt
1023  */
1024 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1025 {
1026         struct urb_priv *urbp = urb->hcpriv;
1027         struct uhci_td *td;
1028         unsigned int status = 0;
1029         int ret = 0;
1030
1031         urb->actual_length = 0;
1032
1033         list_for_each_entry(td, &urbp->td_list, list) {
1034                 status = uhci_status_bits(td_status(td));
1035                 if (status & TD_CTRL_ACTIVE)
1036                         return -EINPROGRESS;
1037
1038                 urb->actual_length += uhci_actual_length(td_status(td));
1039
1040                 if (status)
1041                         goto td_error;
1042
1043                 if (uhci_actual_length(td_status(td)) < uhci_expected_length(td_token(td))) {
1044                         if (urb->transfer_flags & URB_SHORT_NOT_OK) {
1045                                 ret = -EREMOTEIO;
1046                                 goto err;
1047                         } else
1048                                 return 0;
1049                 }
1050         }
1051
1052         return 0;
1053
1054 td_error:
1055         ret = uhci_map_status(status, uhci_packetout(td_token(td)));
1056
1057 err:
1058         /* 
1059          * Enable this chunk of code if you want to see some more debugging.
1060          * But be careful, it has the tendancy to starve out khubd and prevent
1061          * disconnects from happening successfully if you have a slow debug
1062          * log interface (like a serial console.
1063          */
1064 #if 0
1065         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1066                 /* Some debugging code */
1067                 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
1068                                 __FUNCTION__, status);
1069
1070                 if (errbuf) {
1071                         /* Print the chain for debugging purposes */
1072                         uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
1073
1074                         lprintk(errbuf);
1075                 }
1076         }
1077 #endif
1078         return ret;
1079 }
1080
1081 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1082 {
1083         int ret;
1084
1085         /* Can't have low-speed bulk transfers */
1086         if (urb->dev->speed == USB_SPEED_LOW)
1087                 return -EINVAL;
1088
1089         ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1090         if (ret == -EINPROGRESS)
1091                 uhci_inc_fsbr(uhci, urb);
1092
1093         return ret;
1094 }
1095
1096 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1097 {
1098         /* USB 1.1 interrupt transfers only involve one packet per interval;
1099          * that's the uhci_submit_common() "breadth first" policy.  Drivers
1100          * can submit urbs of any length, but longer ones might need many
1101          * intervals to complete.
1102          */
1103         return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1104 }
1105
1106 /*
1107  * Isochronous transfers
1108  */
1109 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1110 {
1111         struct urb *last_urb = NULL;
1112         struct urb_priv *up;
1113         int ret = 0;
1114
1115         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1116                 struct urb *u = up->urb;
1117
1118                 /* look for pending URB's with identical pipe handle */
1119                 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1120                     (u->status == -EINPROGRESS) && (u != urb)) {
1121                         if (!last_urb)
1122                                 *start = u->start_frame;
1123                         last_urb = u;
1124                 }
1125         }
1126
1127         if (last_urb) {
1128                 *end = (last_urb->start_frame + last_urb->number_of_packets *
1129                                 last_urb->interval) & (UHCI_NUMFRAMES-1);
1130                 ret = 0;
1131         } else
1132                 ret = -1;       /* no previous urb found */
1133
1134         return ret;
1135 }
1136
1137 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1138 {
1139         int limits;
1140         unsigned int start = 0, end = 0;
1141
1142         if (urb->number_of_packets > 900)       /* 900? Why? */
1143                 return -EFBIG;
1144
1145         limits = isochronous_find_limits(uhci, urb, &start, &end);
1146
1147         if (urb->transfer_flags & URB_ISO_ASAP) {
1148                 if (limits)
1149                         urb->start_frame =
1150                                         (uhci_get_current_frame_number(uhci) +
1151                                                 10) & (UHCI_NUMFRAMES - 1);
1152                 else
1153                         urb->start_frame = end;
1154         } else {
1155                 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1156                 /* FIXME: Sanity check */
1157         }
1158
1159         return 0;
1160 }
1161
1162 /*
1163  * Isochronous transfers
1164  */
1165 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1166 {
1167         struct uhci_td *td;
1168         int i, ret, frame;
1169         int status, destination;
1170
1171         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1172         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1173
1174         ret = isochronous_find_start(uhci, urb);
1175         if (ret)
1176                 return ret;
1177
1178         frame = urb->start_frame;
1179         for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1180                 if (!urb->iso_frame_desc[i].length)
1181                         continue;
1182
1183                 td = uhci_alloc_td(uhci, urb->dev);
1184                 if (!td)
1185                         return -ENOMEM;
1186
1187                 uhci_add_td_to_urb(urb, td);
1188                 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1189                         urb->transfer_dma + urb->iso_frame_desc[i].offset);
1190
1191                 if (i + 1 >= urb->number_of_packets)
1192                         td->status |= cpu_to_le32(TD_CTRL_IOC);
1193
1194                 uhci_insert_td_frame_list(uhci, td, frame);
1195         }
1196
1197         return -EINPROGRESS;
1198 }
1199
1200 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1201 {
1202         struct uhci_td *td;
1203         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1204         int status;
1205         int i, ret = 0;
1206
1207         urb->actual_length = 0;
1208
1209         i = 0;
1210         list_for_each_entry(td, &urbp->td_list, list) {
1211                 int actlength;
1212
1213                 if (td_status(td) & TD_CTRL_ACTIVE)
1214                         return -EINPROGRESS;
1215
1216                 actlength = uhci_actual_length(td_status(td));
1217                 urb->iso_frame_desc[i].actual_length = actlength;
1218                 urb->actual_length += actlength;
1219
1220                 status = uhci_map_status(uhci_status_bits(td_status(td)),
1221                                 usb_pipeout(urb->pipe));
1222                 urb->iso_frame_desc[i].status = status;
1223                 if (status) {
1224                         urb->error_count++;
1225                         ret = status;
1226                 }
1227
1228                 i++;
1229         }
1230
1231         return ret;
1232 }
1233
1234 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1235 {
1236         struct urb_priv *up;
1237
1238         /* We don't match Isoc transfers since they are special */
1239         if (usb_pipeisoc(urb->pipe))
1240                 return NULL;
1241
1242         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1243                 struct urb *u = up->urb;
1244
1245                 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1246                         /* For control, ignore the direction */
1247                         if (usb_pipecontrol(urb->pipe) &&
1248                             (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1249                                 return u;
1250                         else if (u->pipe == urb->pipe)
1251                                 return u;
1252                 }
1253         }
1254
1255         return NULL;
1256 }
1257
1258 static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, int mem_flags)
1259 {
1260         int ret;
1261         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1262         unsigned long flags;
1263         struct urb *eurb;
1264         int bustime;
1265
1266         spin_lock_irqsave(&uhci->schedule_lock, flags);
1267
1268         ret = urb->status;
1269         if (ret != -EINPROGRESS)                /* URB already unlinked! */
1270                 goto out;
1271
1272         eurb = uhci_find_urb_ep(uhci, urb);
1273
1274         if (!uhci_alloc_urb_priv(uhci, urb)) {
1275                 ret = -ENOMEM;
1276                 goto out;
1277         }
1278
1279         switch (usb_pipetype(urb->pipe)) {
1280         case PIPE_CONTROL:
1281                 ret = uhci_submit_control(uhci, urb, eurb);
1282                 break;
1283         case PIPE_INTERRUPT:
1284                 if (!eurb) {
1285                         bustime = usb_check_bandwidth(urb->dev, urb);
1286                         if (bustime < 0)
1287                                 ret = bustime;
1288                         else {
1289                                 ret = uhci_submit_interrupt(uhci, urb, eurb);
1290                                 if (ret == -EINPROGRESS)
1291                                         usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1292                         }
1293                 } else {        /* inherit from parent */
1294                         urb->bandwidth = eurb->bandwidth;
1295                         ret = uhci_submit_interrupt(uhci, urb, eurb);
1296                 }
1297                 break;
1298         case PIPE_BULK:
1299                 ret = uhci_submit_bulk(uhci, urb, eurb);
1300                 break;
1301         case PIPE_ISOCHRONOUS:
1302                 bustime = usb_check_bandwidth(urb->dev, urb);
1303                 if (bustime < 0) {
1304                         ret = bustime;
1305                         break;
1306                 }
1307
1308                 ret = uhci_submit_isochronous(uhci, urb);
1309                 if (ret == -EINPROGRESS)
1310                         usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1311                 break;
1312         }
1313
1314         if (ret != -EINPROGRESS) {
1315                 /* Submit failed, so delete it from the urb_list */
1316                 struct urb_priv *urbp = urb->hcpriv;
1317
1318                 list_del_init(&urbp->urb_list);
1319                 uhci_destroy_urb_priv(uhci, urb);
1320         } else
1321                 ret = 0;
1322
1323 out:
1324         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1325         return ret;
1326 }
1327
1328 /*
1329  * Return the result of a transfer
1330  */
1331 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1332 {
1333         int ret = -EINPROGRESS;
1334         struct urb_priv *urbp;
1335
1336         spin_lock(&urb->lock);
1337
1338         urbp = (struct urb_priv *)urb->hcpriv;
1339
1340         if (urb->status != -EINPROGRESS)        /* URB already dequeued */
1341                 goto out;
1342
1343         switch (usb_pipetype(urb->pipe)) {
1344         case PIPE_CONTROL:
1345                 ret = uhci_result_control(uhci, urb);
1346                 break;
1347         case PIPE_BULK:
1348         case PIPE_INTERRUPT:
1349                 ret = uhci_result_common(uhci, urb);
1350                 break;
1351         case PIPE_ISOCHRONOUS:
1352                 ret = uhci_result_isochronous(uhci, urb);
1353                 break;
1354         }
1355
1356         if (ret == -EINPROGRESS)
1357                 goto out;
1358         urb->status = ret;
1359
1360         switch (usb_pipetype(urb->pipe)) {
1361         case PIPE_CONTROL:
1362         case PIPE_BULK:
1363         case PIPE_ISOCHRONOUS:
1364                 /* Release bandwidth for Interrupt or Isoc. transfers */
1365                 if (urb->bandwidth)
1366                         usb_release_bandwidth(urb->dev, urb, 1);
1367                 uhci_unlink_generic(uhci, urb);
1368                 break;
1369         case PIPE_INTERRUPT:
1370                 /* Release bandwidth for Interrupt or Isoc. transfers */
1371                 /* Make sure we don't release if we have a queued URB */
1372                 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1373                         usb_release_bandwidth(urb->dev, urb, 0);
1374                 else
1375                         /* bandwidth was passed on to queued URB, */
1376                         /* so don't let usb_unlink_urb() release it */
1377                         urb->bandwidth = 0;
1378                 uhci_unlink_generic(uhci, urb);
1379                 break;
1380         default:
1381                 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1382                                 "for urb %p\n",
1383                                 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1384         }
1385
1386         /* Move it from uhci->urb_list to uhci->complete_list */
1387         uhci_moveto_complete(uhci, urbp);
1388
1389 out:
1390         spin_unlock(&urb->lock);
1391 }
1392
1393 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1394 {
1395         struct list_head *head;
1396         struct uhci_td *td;
1397         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1398         int prevactive = 0;
1399
1400         uhci_dec_fsbr(uhci, urb);       /* Safe since it checks */
1401
1402         /*
1403          * Now we need to find out what the last successful toggle was
1404          * so we can update the local data toggle for the next transfer
1405          *
1406          * There are 2 ways the last successful completed TD is found:
1407          *
1408          * 1) The TD is NOT active and the actual length < expected length
1409          * 2) The TD is NOT active and it's the last TD in the chain
1410          *
1411          * and a third way the first uncompleted TD is found:
1412          *
1413          * 3) The TD is active and the previous TD is NOT active
1414          *
1415          * Control and Isochronous ignore the toggle, so this is safe
1416          * for all types
1417          *
1418          * FIXME: The toggle fixups won't be 100% reliable until we
1419          * change over to using a single queue for each endpoint and
1420          * stop the queue before unlinking.
1421          */
1422         head = &urbp->td_list;
1423         list_for_each_entry(td, head, list) {
1424                 if (!(td_status(td) & TD_CTRL_ACTIVE) &&
1425                                 (uhci_actual_length(td_status(td)) <
1426                                  uhci_expected_length(td_token(td)) ||
1427                                 td->list.next == head))
1428                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1429                                 uhci_packetout(td_token(td)),
1430                                 uhci_toggle(td_token(td)) ^ 1);
1431                 else if ((td_status(td) & TD_CTRL_ACTIVE) && !prevactive)
1432                         usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1433                                 uhci_packetout(td_token(td)),
1434                                 uhci_toggle(td_token(td)));
1435
1436                 prevactive = td_status(td) & TD_CTRL_ACTIVE;
1437         }
1438
1439         uhci_delete_queued_urb(uhci, urb);
1440
1441         /* The interrupt loop will reclaim the QH's */
1442         uhci_remove_qh(uhci, urbp->qh);
1443         urbp->qh = NULL;
1444 }
1445
1446 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1447 {
1448         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1449         unsigned long flags;
1450         struct urb_priv *urbp;
1451         unsigned int age;
1452
1453         spin_lock_irqsave(&uhci->schedule_lock, flags);
1454         urbp = urb->hcpriv;
1455         if (!urbp)                      /* URB was never linked! */
1456                 goto done;
1457         list_del_init(&urbp->urb_list);
1458
1459         uhci_unlink_generic(uhci, urb);
1460
1461         age = uhci_get_current_frame_number(uhci);
1462         if (age != uhci->urb_remove_age) {
1463                 uhci_remove_pending_urbps(uhci);
1464                 uhci->urb_remove_age = age;
1465         }
1466
1467         /* If we're the first, set the next interrupt bit */
1468         if (list_empty(&uhci->urb_remove_list))
1469                 uhci_set_next_interrupt(uhci);
1470         list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1471
1472 done:
1473         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1474         return 0;
1475 }
1476
1477 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1478 {
1479         struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1480         struct list_head *head;
1481         struct uhci_td *td;
1482         int count = 0;
1483
1484         uhci_dec_fsbr(uhci, urb);
1485
1486         urbp->fsbr_timeout = 1;
1487
1488         /*
1489          * Ideally we would want to fix qh->element as well, but it's
1490          * read/write by the HC, so that can introduce a race. It's not
1491          * really worth the hassle
1492          */
1493
1494         head = &urbp->td_list;
1495         list_for_each_entry(td, head, list) {
1496                 /*
1497                  * Make sure we don't do the last one (since it'll have the
1498                  * TERM bit set) as well as we skip every so many TD's to
1499                  * make sure it doesn't hog the bandwidth
1500                  */
1501                 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1502                                 (DEPTH_INTERVAL - 1))
1503                         td->link |= UHCI_PTR_DEPTH;
1504
1505                 count++;
1506         }
1507
1508         return 0;
1509 }
1510
1511 /*
1512  * uhci_get_current_frame_number()
1513  *
1514  * returns the current frame number for a USB bus/controller.
1515  */
1516 static unsigned int uhci_get_current_frame_number(struct uhci_hcd *uhci)
1517 {
1518         return inw(uhci->io_addr + USBFRNUM);
1519 }
1520
1521 static int init_stall_timer(struct usb_hcd *hcd);
1522
1523 static void stall_callback(unsigned long ptr)
1524 {
1525         struct usb_hcd *hcd = (struct usb_hcd *)ptr;
1526         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1527         struct urb_priv *up;
1528         unsigned long flags;
1529         int called_uhci_finish_completion = 0;
1530
1531         spin_lock_irqsave(&uhci->schedule_lock, flags);
1532         if (!list_empty(&uhci->urb_remove_list) &&
1533             uhci_get_current_frame_number(uhci) != uhci->urb_remove_age) {
1534                 uhci_remove_pending_urbps(uhci);
1535                 uhci_finish_completion(hcd, NULL);
1536                 called_uhci_finish_completion = 1;
1537         }
1538
1539         list_for_each_entry(up, &uhci->urb_list, urb_list) {
1540                 struct urb *u = up->urb;
1541
1542                 spin_lock(&u->lock);
1543
1544                 /* Check if the FSBR timed out */
1545                 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1546                         uhci_fsbr_timeout(uhci, u);
1547
1548                 spin_unlock(&u->lock);
1549         }
1550         spin_unlock_irqrestore(&uhci->schedule_lock, flags);
1551
1552         /* Wake up anyone waiting for an URB to complete */
1553         if (called_uhci_finish_completion)
1554                 wake_up_all(&uhci->waitqh);
1555
1556         /* Really disable FSBR */
1557         if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1558                 uhci->fsbrtimeout = 0;
1559                 uhci->skel_term_qh->link = UHCI_PTR_TERM;
1560         }
1561
1562         /* Poll for and perform state transitions */
1563         hc_state_transitions(uhci);
1564         if (unlikely(uhci->suspended_ports && uhci->state != UHCI_SUSPENDED))
1565                 uhci_check_resume(uhci);
1566
1567         init_stall_timer(hcd);
1568 }
1569
1570 static int init_stall_timer(struct usb_hcd *hcd)
1571 {
1572         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1573
1574         init_timer(&uhci->stall_timer);
1575         uhci->stall_timer.function = stall_callback;
1576         uhci->stall_timer.data = (unsigned long)hcd;
1577         uhci->stall_timer.expires = jiffies + msecs_to_jiffies(100);
1578         add_timer(&uhci->stall_timer);
1579
1580         return 0;
1581 }
1582
1583 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1584 {
1585         struct uhci_qh *qh, *tmp;
1586
1587         list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1588                 list_del_init(&qh->remove_list);
1589
1590                 uhci_free_qh(uhci, qh);
1591         }
1592 }
1593
1594 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1595 {
1596         struct uhci_td *td, *tmp;
1597
1598         list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1599                 list_del_init(&td->remove_list);
1600
1601                 uhci_free_td(uhci, td);
1602         }
1603 }
1604
1605 static void
1606 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1607 __releases(uhci->schedule_lock)
1608 __acquires(uhci->schedule_lock)
1609 {
1610         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1611
1612         uhci_destroy_urb_priv(uhci, urb);
1613
1614         spin_unlock(&uhci->schedule_lock);
1615         usb_hcd_giveback_urb(hcd, urb, regs);
1616         spin_lock(&uhci->schedule_lock);
1617 }
1618
1619 static void uhci_finish_completion(struct usb_hcd *hcd, struct pt_regs *regs)
1620 {
1621         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1622         struct urb_priv *urbp, *tmp;
1623
1624         list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1625                 struct urb *urb = urbp->urb;
1626
1627                 list_del_init(&urbp->urb_list);
1628                 uhci_finish_urb(hcd, urb, regs);
1629         }
1630 }
1631
1632 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1633 {
1634
1635         /* Splice the urb_remove_list onto the end of the complete_list */
1636         list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1637 }
1638
1639 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs)
1640 {
1641         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1642         unsigned long io_addr = uhci->io_addr;
1643         unsigned short status;
1644         struct urb_priv *urbp, *tmp;
1645         unsigned int age;
1646
1647         /*
1648          * Read the interrupt status, and write it back to clear the
1649          * interrupt cause.  Contrary to the UHCI specification, the
1650          * "HC Halted" status bit is persistent: it is RO, not R/WC.
1651          */
1652         status = inw(io_addr + USBSTS);
1653         if (!(status & ~USBSTS_HCH))    /* shared interrupt, not mine */
1654                 return IRQ_NONE;
1655         outw(status, io_addr + USBSTS);         /* Clear it */
1656
1657         if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) {
1658                 if (status & USBSTS_HSE)
1659                         dev_err(uhci_dev(uhci), "host system error, "
1660                                         "PCI problems?\n");
1661                 if (status & USBSTS_HCPE)
1662                         dev_err(uhci_dev(uhci), "host controller process "
1663                                         "error, something bad happened!\n");
1664                 if ((status & USBSTS_HCH) && uhci->state > 0) {
1665                         dev_err(uhci_dev(uhci), "host controller halted, "
1666                                         "very bad!\n");
1667                         /* FIXME: Reset the controller, fix the offending TD */
1668                 }
1669         }
1670
1671         if (status & USBSTS_RD)
1672                 uhci->resume_detect = 1;
1673
1674         spin_lock(&uhci->schedule_lock);
1675
1676         age = uhci_get_current_frame_number(uhci);
1677         if (age != uhci->qh_remove_age)
1678                 uhci_free_pending_qhs(uhci);
1679         if (age != uhci->td_remove_age)
1680                 uhci_free_pending_tds(uhci);
1681         if (age != uhci->urb_remove_age)
1682                 uhci_remove_pending_urbps(uhci);
1683
1684         if (list_empty(&uhci->urb_remove_list) &&
1685             list_empty(&uhci->td_remove_list) &&
1686             list_empty(&uhci->qh_remove_list))
1687                 uhci_clear_next_interrupt(uhci);
1688         else
1689                 uhci_set_next_interrupt(uhci);
1690
1691         /* Walk the list of pending URBs to see which ones completed
1692          * (must be _safe because uhci_transfer_result() dequeues URBs) */
1693         list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1694                 struct urb *urb = urbp->urb;
1695
1696                 /* Checks the status and does all of the magic necessary */
1697                 uhci_transfer_result(uhci, urb);
1698         }
1699         uhci_finish_completion(hcd, regs);
1700
1701         spin_unlock(&uhci->schedule_lock);
1702
1703         /* Wake up anyone waiting for an URB to complete */
1704         wake_up_all(&uhci->waitqh);
1705
1706         return IRQ_HANDLED;
1707 }
1708
1709 static void reset_hc(struct uhci_hcd *uhci)
1710 {
1711         unsigned long io_addr = uhci->io_addr;
1712
1713         /* Turn off PIRQ, SMI, and all interrupts.  This also turns off
1714          * the BIOS's USB Legacy Support.
1715          */
1716         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0);
1717         outw(0, uhci->io_addr + USBINTR);
1718
1719         /* Global reset for 50ms */
1720         uhci->state = UHCI_RESET;
1721         outw(USBCMD_GRESET, io_addr + USBCMD);
1722         msleep(50);
1723         outw(0, io_addr + USBCMD);
1724
1725         /* Another 10ms delay */
1726         msleep(10);
1727         uhci->resume_detect = 0;
1728 }
1729
1730 static void suspend_hc(struct uhci_hcd *uhci)
1731 {
1732         unsigned long io_addr = uhci->io_addr;
1733
1734         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1735         uhci->state = UHCI_SUSPENDED;
1736         uhci->resume_detect = 0;
1737         outw(USBCMD_EGSM, io_addr + USBCMD);
1738 }
1739
1740 static void wakeup_hc(struct uhci_hcd *uhci)
1741 {
1742         unsigned long io_addr = uhci->io_addr;
1743
1744         switch (uhci->state) {
1745                 case UHCI_SUSPENDED:            /* Start the resume */
1746                         dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__);
1747
1748                         /* Global resume for >= 20ms */
1749                         outw(USBCMD_FGR | USBCMD_EGSM, io_addr + USBCMD);
1750                         uhci->state = UHCI_RESUMING_1;
1751                         uhci->state_end = jiffies + msecs_to_jiffies(20);
1752                         break;
1753
1754                 case UHCI_RESUMING_1:           /* End global resume */
1755                         uhci->state = UHCI_RESUMING_2;
1756                         outw(0, io_addr + USBCMD);
1757                         /* Falls through */
1758
1759                 case UHCI_RESUMING_2:           /* Wait for EOP to be sent */
1760                         if (inw(io_addr + USBCMD) & USBCMD_FGR)
1761                                 break;
1762
1763                         /* Run for at least 1 second, and
1764                          * mark it configured with a 64-byte max packet */
1765                         uhci->state = UHCI_RUNNING_GRACE;
1766                         uhci->state_end = jiffies + HZ;
1767                         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP,
1768                                         io_addr + USBCMD);
1769                         break;
1770
1771                 case UHCI_RUNNING_GRACE:        /* Now allowed to suspend */
1772                         uhci->state = UHCI_RUNNING;
1773                         break;
1774
1775                 default:
1776                         break;
1777         }
1778 }
1779
1780 static int ports_active(struct uhci_hcd *uhci)
1781 {
1782         unsigned long io_addr = uhci->io_addr;
1783         int connection = 0;
1784         int i;
1785
1786         for (i = 0; i < uhci->rh_numports; i++)
1787                 connection |= (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_CCS);
1788
1789         return connection;
1790 }
1791
1792 static int suspend_allowed(struct uhci_hcd *uhci)
1793 {
1794         unsigned long io_addr = uhci->io_addr;
1795         int i;
1796
1797         if (to_pci_dev(uhci_dev(uhci))->vendor != PCI_VENDOR_ID_INTEL)
1798                 return 1;
1799
1800         /* Some of Intel's USB controllers have a bug that causes false
1801          * resume indications if any port has an over current condition.
1802          * To prevent problems, we will not allow a global suspend if
1803          * any ports are OC.
1804          *
1805          * Some motherboards using Intel's chipsets (but not using all
1806          * the USB ports) appear to hardwire the over current inputs active
1807          * to disable the USB ports.
1808          */
1809
1810         /* check for over current condition on any port */
1811         for (i = 0; i < uhci->rh_numports; i++) {
1812                 if (inw(io_addr + USBPORTSC1 + i * 2) & USBPORTSC_OC)
1813                         return 0;
1814         }
1815
1816         return 1;
1817 }
1818
1819 static void hc_state_transitions(struct uhci_hcd *uhci)
1820 {
1821         switch (uhci->state) {
1822                 case UHCI_RUNNING:
1823
1824                         /* global suspend if nothing connected for 1 second */
1825                         if (!ports_active(uhci) && suspend_allowed(uhci)) {
1826                                 uhci->state = UHCI_SUSPENDING_GRACE;
1827                                 uhci->state_end = jiffies + HZ;
1828                         }
1829                         break;
1830
1831                 case UHCI_SUSPENDING_GRACE:
1832                         if (ports_active(uhci))
1833                                 uhci->state = UHCI_RUNNING;
1834                         else if (time_after_eq(jiffies, uhci->state_end))
1835                                 suspend_hc(uhci);
1836                         break;
1837
1838                 case UHCI_SUSPENDED:
1839
1840                         /* wakeup if requested by a device */
1841                         if (uhci->resume_detect)
1842                                 wakeup_hc(uhci);
1843                         break;
1844
1845                 case UHCI_RESUMING_1:
1846                 case UHCI_RESUMING_2:
1847                 case UHCI_RUNNING_GRACE:
1848                         if (time_after_eq(jiffies, uhci->state_end))
1849                                 wakeup_hc(uhci);
1850                         break;
1851
1852                 default:
1853                         break;
1854         }
1855 }
1856
1857 static int start_hc(struct uhci_hcd *uhci)
1858 {
1859         unsigned long io_addr = uhci->io_addr;
1860         int timeout = 10;
1861
1862         /*
1863          * Reset the HC - this will force us to get a
1864          * new notification of any already connected
1865          * ports due to the virtual disconnect that it
1866          * implies.
1867          */
1868         outw(USBCMD_HCRESET, io_addr + USBCMD);
1869         while (inw(io_addr + USBCMD) & USBCMD_HCRESET) {
1870                 if (--timeout < 0) {
1871                         dev_err(uhci_dev(uhci), "USBCMD_HCRESET timed out!\n");
1872                         return -ETIMEDOUT;
1873                 }
1874                 msleep(1);
1875         }
1876
1877         /* Turn on PIRQ and all interrupts */
1878         pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
1879                         USBLEGSUP_DEFAULT);
1880         outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP,
1881                 io_addr + USBINTR);
1882
1883         /* Start at frame 0 */
1884         outw(0, io_addr + USBFRNUM);
1885         outl(uhci->fl->dma_handle, io_addr + USBFLBASEADD);
1886
1887         /* Run and mark it configured with a 64-byte max packet */
1888         uhci->state = UHCI_RUNNING_GRACE;
1889         uhci->state_end = jiffies + HZ;
1890         outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, io_addr + USBCMD);
1891
1892         uhci->hcd.state = USB_STATE_RUNNING;
1893         return 0;
1894 }
1895
1896 /*
1897  * De-allocate all resources..
1898  */
1899 static void release_uhci(struct uhci_hcd *uhci)
1900 {
1901         int i;
1902
1903         for (i = 0; i < UHCI_NUM_SKELQH; i++)
1904                 if (uhci->skelqh[i]) {
1905                         uhci_free_qh(uhci, uhci->skelqh[i]);
1906                         uhci->skelqh[i] = NULL;
1907                 }
1908
1909         if (uhci->term_td) {
1910                 uhci_free_td(uhci, uhci->term_td);
1911                 uhci->term_td = NULL;
1912         }
1913
1914         if (uhci->qh_pool) {
1915                 dma_pool_destroy(uhci->qh_pool);
1916                 uhci->qh_pool = NULL;
1917         }
1918
1919         if (uhci->td_pool) {
1920                 dma_pool_destroy(uhci->td_pool);
1921                 uhci->td_pool = NULL;
1922         }
1923
1924         if (uhci->fl) {
1925                 dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
1926                                 uhci->fl, uhci->fl->dma_handle);
1927                 uhci->fl = NULL;
1928         }
1929
1930 #ifdef CONFIG_PROC_FS
1931         if (uhci->proc_entry) {
1932                 remove_proc_entry(uhci->hcd.self.bus_name, uhci_proc_root);
1933                 uhci->proc_entry = NULL;
1934         }
1935 #endif
1936 }
1937
1938 static int uhci_reset(struct usb_hcd *hcd)
1939 {
1940         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1941
1942         uhci->io_addr = (unsigned long) hcd->regs;
1943
1944         /* Kick BIOS off this hardware and reset, so we won't get
1945          * interrupts from any previous setup.
1946          */
1947         reset_hc(uhci);
1948         return 0;
1949 }
1950
1951 /*
1952  * Allocate a frame list, and then setup the skeleton
1953  *
1954  * The hardware doesn't really know any difference
1955  * in the queues, but the order does matter for the
1956  * protocols higher up. The order is:
1957  *
1958  *  - any isochronous events handled before any
1959  *    of the queues. We don't do that here, because
1960  *    we'll create the actual TD entries on demand.
1961  *  - The first queue is the interrupt queue.
1962  *  - The second queue is the control queue, split into low- and full-speed
1963  *  - The third queue is bulk queue.
1964  *  - The fourth queue is the bandwidth reclamation queue, which loops back
1965  *    to the full-speed control queue.
1966  */
1967 static int uhci_start(struct usb_hcd *hcd)
1968 {
1969         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1970         int retval = -EBUSY;
1971         int i, port;
1972         unsigned io_size;
1973         dma_addr_t dma_handle;
1974         struct usb_device *udev;
1975 #ifdef CONFIG_PROC_FS
1976         struct proc_dir_entry *ent;
1977 #endif
1978
1979         io_size = pci_resource_len(to_pci_dev(uhci_dev(uhci)), hcd->region);
1980
1981 #ifdef CONFIG_PROC_FS
1982         ent = create_proc_entry(hcd->self.bus_name, S_IFREG|S_IRUGO|S_IWUSR, uhci_proc_root);
1983         if (!ent) {
1984                 dev_err(uhci_dev(uhci), "couldn't create uhci proc entry\n");
1985                 retval = -ENOMEM;
1986                 goto err_create_proc_entry;
1987         }
1988
1989         ent->data = uhci;
1990         ent->proc_fops = &uhci_proc_operations;
1991         ent->size = 0;
1992         uhci->proc_entry = ent;
1993 #endif
1994
1995         uhci->fsbr = 0;
1996         uhci->fsbrtimeout = 0;
1997
1998         spin_lock_init(&uhci->schedule_lock);
1999         INIT_LIST_HEAD(&uhci->qh_remove_list);
2000
2001         INIT_LIST_HEAD(&uhci->td_remove_list);
2002
2003         INIT_LIST_HEAD(&uhci->urb_remove_list);
2004
2005         INIT_LIST_HEAD(&uhci->urb_list);
2006
2007         INIT_LIST_HEAD(&uhci->complete_list);
2008
2009         init_waitqueue_head(&uhci->waitqh);
2010
2011         uhci->fl = dma_alloc_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2012                         &dma_handle, 0);
2013         if (!uhci->fl) {
2014                 dev_err(uhci_dev(uhci), "unable to allocate "
2015                                 "consistent memory for frame list\n");
2016                 goto err_alloc_fl;
2017         }
2018
2019         memset((void *)uhci->fl, 0, sizeof(*uhci->fl));
2020
2021         uhci->fl->dma_handle = dma_handle;
2022
2023         uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci),
2024                         sizeof(struct uhci_td), 16, 0);
2025         if (!uhci->td_pool) {
2026                 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n");
2027                 goto err_create_td_pool;
2028         }
2029
2030         uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci),
2031                         sizeof(struct uhci_qh), 16, 0);
2032         if (!uhci->qh_pool) {
2033                 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n");
2034                 goto err_create_qh_pool;
2035         }
2036
2037         /* Initialize the root hub */
2038
2039         /* UHCI specs says devices must have 2 ports, but goes on to say */
2040         /*  they may have more but give no way to determine how many they */
2041         /*  have. However, according to the UHCI spec, Bit 7 is always set */
2042         /*  to 1. So we try to use this to our advantage */
2043         for (port = 0; port < (io_size - 0x10) / 2; port++) {
2044                 unsigned int portstatus;
2045
2046                 portstatus = inw(uhci->io_addr + 0x10 + (port * 2));
2047                 if (!(portstatus & 0x0080))
2048                         break;
2049         }
2050         if (debug)
2051                 dev_info(uhci_dev(uhci), "detected %d ports\n", port);
2052
2053         /* This is experimental so anything less than 2 or greater than 8 is */
2054         /*  something weird and we'll ignore it */
2055         if (port < 2 || port > UHCI_RH_MAXCHILD) {
2056                 dev_info(uhci_dev(uhci), "port count misdetected? "
2057                                 "forcing to 2 ports\n");
2058                 port = 2;
2059         }
2060
2061         uhci->rh_numports = port;
2062
2063         udev = usb_alloc_dev(NULL, &hcd->self, 0);
2064         if (!udev) {
2065                 dev_err(uhci_dev(uhci), "unable to allocate root hub\n");
2066                 goto err_alloc_root_hub;
2067         }
2068
2069         uhci->term_td = uhci_alloc_td(uhci, udev);
2070         if (!uhci->term_td) {
2071                 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n");
2072                 goto err_alloc_term_td;
2073         }
2074
2075         for (i = 0; i < UHCI_NUM_SKELQH; i++) {
2076                 uhci->skelqh[i] = uhci_alloc_qh(uhci, udev);
2077                 if (!uhci->skelqh[i]) {
2078                         dev_err(uhci_dev(uhci), "unable to allocate QH\n");
2079                         goto err_alloc_skelqh;
2080                 }
2081         }
2082
2083         /*
2084          * 8 Interrupt queues; link all higher int queues to int1,
2085          * then link int1 to control and control to bulk
2086          */
2087         uhci->skel_int128_qh->link =
2088                         uhci->skel_int64_qh->link =
2089                         uhci->skel_int32_qh->link =
2090                         uhci->skel_int16_qh->link =
2091                         uhci->skel_int8_qh->link =
2092                         uhci->skel_int4_qh->link =
2093                         uhci->skel_int2_qh->link =
2094                         cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH;
2095         uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
2096
2097         uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
2098         uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH;
2099         uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH;
2100
2101         /* This dummy TD is to work around a bug in Intel PIIX controllers */
2102         uhci_fill_td(uhci->term_td, 0, (UHCI_NULL_DATA_SIZE << 21) |
2103                 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0);
2104         uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle);
2105
2106         uhci->skel_term_qh->link = UHCI_PTR_TERM;
2107         uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle);
2108
2109         /*
2110          * Fill the frame list: make all entries point to the proper
2111          * interrupt queue.
2112          *
2113          * The interrupt queues will be interleaved as evenly as possible.
2114          * There's not much to be done about period-1 interrupts; they have
2115          * to occur in every frame.  But we can schedule period-2 interrupts
2116          * in odd-numbered frames, period-4 interrupts in frames congruent
2117          * to 2 (mod 4), and so on.  This way each frame only has two
2118          * interrupt QHs, which will help spread out bandwidth utilization.
2119          */
2120         for (i = 0; i < UHCI_NUMFRAMES; i++) {
2121                 int irq;
2122
2123                 /*
2124                  * ffs (Find First bit Set) does exactly what we need:
2125                  * 1,3,5,...  => ffs = 0 => use skel_int2_qh = skelqh[6],
2126                  * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc.
2127                  * ffs > 6 => not on any high-period queue, so use
2128                  *      skel_int1_qh = skelqh[7].
2129                  * Add UHCI_NUMFRAMES to insure at least one bit is set.
2130                  */
2131                 irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES);
2132                 if (irq < 0)
2133                         irq = 7;
2134
2135                 /* Only place we don't use the frame list routines */
2136                 uhci->fl->frame[i] = UHCI_PTR_QH |
2137                                 cpu_to_le32(uhci->skelqh[irq]->dma_handle);
2138         }
2139
2140         /*
2141          * Some architectures require a full mb() to enforce completion of
2142          * the memory writes above before the I/O transfers in start_hc().
2143          */
2144         mb();
2145         if ((retval = start_hc(uhci)) != 0)
2146                 goto err_alloc_skelqh;
2147
2148         init_stall_timer(hcd);
2149
2150         udev->speed = USB_SPEED_FULL;
2151
2152         if (hcd_register_root(udev, &uhci->hcd) != 0) {
2153                 dev_err(uhci_dev(uhci), "unable to start root hub\n");
2154                 retval = -ENOMEM;
2155                 goto err_start_root_hub;
2156         }
2157
2158         return 0;
2159
2160 /*
2161  * error exits:
2162  */
2163 err_start_root_hub:
2164         reset_hc(uhci);
2165
2166         del_timer_sync(&uhci->stall_timer);
2167
2168 err_alloc_skelqh:
2169         for (i = 0; i < UHCI_NUM_SKELQH; i++)
2170                 if (uhci->skelqh[i]) {
2171                         uhci_free_qh(uhci, uhci->skelqh[i]);
2172                         uhci->skelqh[i] = NULL;
2173                 }
2174
2175         uhci_free_td(uhci, uhci->term_td);
2176         uhci->term_td = NULL;
2177
2178 err_alloc_term_td:
2179         usb_put_dev(udev);
2180
2181 err_alloc_root_hub:
2182         dma_pool_destroy(uhci->qh_pool);
2183         uhci->qh_pool = NULL;
2184
2185 err_create_qh_pool:
2186         dma_pool_destroy(uhci->td_pool);
2187         uhci->td_pool = NULL;
2188
2189 err_create_td_pool:
2190         dma_free_coherent(uhci_dev(uhci), sizeof(*uhci->fl),
2191                         uhci->fl, uhci->fl->dma_handle);
2192         uhci->fl = NULL;
2193
2194 err_alloc_fl:
2195 #ifdef CONFIG_PROC_FS
2196         remove_proc_entry(hcd->self.bus_name, uhci_proc_root);
2197         uhci->proc_entry = NULL;
2198
2199 err_create_proc_entry:
2200 #endif
2201
2202         return retval;
2203 }
2204
2205 static void uhci_stop(struct usb_hcd *hcd)
2206 {
2207         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2208
2209         del_timer_sync(&uhci->stall_timer);
2210
2211         /*
2212          * At this point, we're guaranteed that no new connects can be made
2213          * to this bus since there are no more parents
2214          */
2215
2216         reset_hc(uhci);
2217
2218         spin_lock_irq(&uhci->schedule_lock);
2219         uhci_free_pending_qhs(uhci);
2220         uhci_free_pending_tds(uhci);
2221         uhci_remove_pending_urbps(uhci);
2222         uhci_finish_completion(hcd, NULL);
2223
2224         uhci_free_pending_qhs(uhci);
2225         uhci_free_pending_tds(uhci);
2226         spin_unlock_irq(&uhci->schedule_lock);
2227
2228         /* Wake up anyone waiting for an URB to complete */
2229         wake_up_all(&uhci->waitqh);
2230         
2231         release_uhci(uhci);
2232 }
2233
2234 #ifdef CONFIG_PM
2235 static int uhci_suspend(struct usb_hcd *hcd, u32 state)
2236 {
2237         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2238
2239         /* Don't try to suspend broken motherboards, reset instead */
2240         if (suspend_allowed(uhci)) {
2241                 suspend_hc(uhci);
2242                 uhci->saved_framenumber =
2243                                 inw(uhci->io_addr + USBFRNUM) & 0x3ff;
2244         } else
2245                 reset_hc(uhci);
2246         return 0;
2247 }
2248
2249 static int uhci_resume(struct usb_hcd *hcd)
2250 {
2251         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2252         int rc;
2253
2254         pci_set_master(to_pci_dev(uhci_dev(uhci)));
2255
2256         if (uhci->state == UHCI_SUSPENDED) {
2257
2258                 /*
2259                  * Some systems don't maintain the UHCI register values
2260                  * during a PM suspend/resume cycle, so reinitialize
2261                  * the Frame Number, Framelist Base Address, Interrupt
2262                  * Enable, and Legacy Support registers.
2263                  */
2264                 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2265                                 0);
2266                 outw(uhci->saved_framenumber, uhci->io_addr + USBFRNUM);
2267                 outl(uhci->fl->dma_handle, uhci->io_addr + USBFLBASEADD);
2268                 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC |
2269                                 USBINTR_SP, uhci->io_addr + USBINTR);
2270                 uhci->resume_detect = 1;
2271                 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP,
2272                                 USBLEGSUP_DEFAULT);
2273         } else {
2274                 reset_hc(uhci);
2275                 if ((rc = start_hc(uhci)) != 0)
2276                         return rc;
2277         }
2278         uhci->hcd.state = USB_STATE_RUNNING;
2279         return 0;
2280 }
2281 #endif
2282
2283 static struct usb_hcd *uhci_hcd_alloc(void)
2284 {
2285         struct uhci_hcd *uhci;
2286
2287         uhci = (struct uhci_hcd *)kmalloc(sizeof(*uhci), GFP_KERNEL);
2288         if (!uhci)
2289                 return NULL;
2290
2291         memset(uhci, 0, sizeof(*uhci));
2292         uhci->hcd.product_desc = "UHCI Host Controller";
2293         return &uhci->hcd;
2294 }
2295
2296 /* Are there any URBs for a particular device/endpoint on a given list? */
2297 static int urbs_for_ep_list(struct list_head *head,
2298                 struct hcd_dev *hdev, int ep)
2299 {
2300         struct urb_priv *urbp;
2301
2302         list_for_each_entry(urbp, head, urb_list) {
2303                 struct urb *urb = urbp->urb;
2304
2305                 if (hdev == urb->dev->hcpriv && ep ==
2306                                 (usb_pipeendpoint(urb->pipe) |
2307                                  usb_pipein(urb->pipe)))
2308                         return 1;
2309         }
2310         return 0;
2311 }
2312
2313 /* Are there any URBs for a particular device/endpoint? */
2314 static int urbs_for_ep(struct uhci_hcd *uhci, struct hcd_dev *hdev, int ep)
2315 {
2316         int rc;
2317
2318         spin_lock_irq(&uhci->schedule_lock);
2319         rc = (urbs_for_ep_list(&uhci->urb_list, hdev, ep) ||
2320                         urbs_for_ep_list(&uhci->complete_list, hdev, ep) ||
2321                         urbs_for_ep_list(&uhci->urb_remove_list, hdev, ep));
2322         spin_unlock_irq(&uhci->schedule_lock);
2323         return rc;
2324 }
2325
2326 /* Wait until all the URBs for a particular device/endpoint are gone */
2327 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
2328                 struct hcd_dev *hdev, int endpoint)
2329 {
2330         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
2331
2332         wait_event_interruptible(uhci->waitqh,
2333                         !urbs_for_ep(uhci, hdev, endpoint));
2334 }
2335
2336 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)
2337 {
2338         return uhci_get_current_frame_number(hcd_to_uhci(hcd));
2339 }
2340
2341 static const char hcd_name[] = "uhci_hcd";
2342
2343 static const struct hc_driver uhci_driver = {
2344         .description =          hcd_name,
2345
2346         /* Generic hardware linkage */
2347         .irq =                  uhci_irq,
2348         .flags =                HCD_USB11,
2349
2350         /* Basic lifecycle operations */
2351         .reset =                uhci_reset,
2352         .start =                uhci_start,
2353 #ifdef CONFIG_PM
2354         .suspend =              uhci_suspend,
2355         .resume =               uhci_resume,
2356 #endif
2357         .stop =                 uhci_stop,
2358
2359         .hcd_alloc =            uhci_hcd_alloc,
2360
2361         .urb_enqueue =          uhci_urb_enqueue,
2362         .urb_dequeue =          uhci_urb_dequeue,
2363
2364         .endpoint_disable =     uhci_hcd_endpoint_disable,
2365         .get_frame_number =     uhci_hcd_get_frame_number,
2366
2367         .hub_status_data =      uhci_hub_status_data,
2368         .hub_control =          uhci_hub_control,
2369 };
2370
2371 static const struct pci_device_id uhci_pci_ids[] = { {
2372         /* handle any USB UHCI controller */
2373         PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0),
2374         .driver_data =  (unsigned long) &uhci_driver,
2375         }, { /* end: all zeroes */ }
2376 };
2377
2378 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
2379
2380 static struct pci_driver uhci_pci_driver = {
2381         .name =         (char *)hcd_name,
2382         .id_table =     uhci_pci_ids,
2383
2384         .probe =        usb_hcd_pci_probe,
2385         .remove =       usb_hcd_pci_remove,
2386
2387 #ifdef  CONFIG_PM
2388         .suspend =      usb_hcd_pci_suspend,
2389         .resume =       usb_hcd_pci_resume,
2390 #endif  /* PM */
2391 };
2392  
2393 static int __init uhci_hcd_init(void)
2394 {
2395         int retval = -ENOMEM;
2396
2397         printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n");
2398
2399         if (usb_disabled())
2400                 return -ENODEV;
2401
2402         if (debug) {
2403                 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL);
2404                 if (!errbuf)
2405                         goto errbuf_failed;
2406         }
2407
2408 #ifdef CONFIG_PROC_FS
2409         uhci_proc_root = create_proc_entry("driver/uhci", S_IFDIR, NULL);
2410         if (!uhci_proc_root)
2411                 goto proc_failed;
2412 #endif
2413
2414         uhci_up_cachep = kmem_cache_create("uhci_urb_priv",
2415                 sizeof(struct urb_priv), 0, 0, NULL, NULL);
2416         if (!uhci_up_cachep)
2417                 goto up_failed;
2418
2419         retval = pci_register_driver(&uhci_pci_driver);
2420         if (retval)
2421                 goto init_failed;
2422
2423         return 0;
2424
2425 init_failed:
2426         if (kmem_cache_destroy(uhci_up_cachep))
2427                 warn("not all urb_priv's were freed!");
2428
2429 up_failed:
2430
2431 #ifdef CONFIG_PROC_FS
2432         remove_proc_entry("driver/uhci", NULL);
2433
2434 proc_failed:
2435 #endif
2436         if (errbuf)
2437                 kfree(errbuf);
2438
2439 errbuf_failed:
2440
2441         return retval;
2442 }
2443
2444 static void __exit uhci_hcd_cleanup(void) 
2445 {
2446         pci_unregister_driver(&uhci_pci_driver);
2447         
2448         if (kmem_cache_destroy(uhci_up_cachep))
2449                 warn("not all urb_priv's were freed!");
2450
2451 #ifdef CONFIG_PROC_FS
2452         remove_proc_entry("driver/uhci", NULL);
2453 #endif
2454
2455         if (errbuf)
2456                 kfree(errbuf);
2457 }
2458
2459 module_init(uhci_hcd_init);
2460 module_exit(uhci_hcd_cleanup);
2461
2462 MODULE_AUTHOR(DRIVER_AUTHOR);
2463 MODULE_DESCRIPTION(DRIVER_DESC);
2464 MODULE_LICENSE("GPL");