vserver 1.9.3
[linux-2.6.git] / drivers / usb / host / ohci-q.c
1 /*
2  * OHCI HCD (Host Controller Driver) for USB.
3  * 
4  * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5  * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6  * 
7  * This file is licenced under the GPL.
8  */
9
10 static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
11 {
12         int             last = urb_priv->length - 1;
13
14         if (last >= 0) {
15                 int             i;
16                 struct td       *td;
17
18                 for (i = 0; i <= last; i++) {
19                         td = urb_priv->td [i];
20                         if (td)
21                                 td_free (hc, td);
22                 }
23         }
24
25         list_del (&urb_priv->pending);
26         kfree (urb_priv);
27 }
28
29 /*-------------------------------------------------------------------------*/
30
31 /*
32  * URB goes back to driver, and isn't reissued.
33  * It's completely gone from HC data structures.
34  * PRECONDITION:  ohci lock held, irqs blocked.
35  */
36 static void
37 finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs)
38 {
39         // ASSERT (urb->hcpriv != 0);
40
41         urb_free_priv (ohci, urb->hcpriv);
42         urb->hcpriv = NULL;
43
44         spin_lock (&urb->lock);
45         if (likely (urb->status == -EINPROGRESS))
46                 urb->status = 0;
47         /* report short control reads right even though the data TD always
48          * has TD_R set.  (much simpler, but creates the 1-td limit.)
49          */
50         if (unlikely (urb->transfer_flags & URB_SHORT_NOT_OK)
51                         && unlikely (usb_pipecontrol (urb->pipe))
52                         && urb->actual_length < urb->transfer_buffer_length
53                         && usb_pipein (urb->pipe)
54                         && urb->status == 0) {
55                 urb->status = -EREMOTEIO;
56         }
57         spin_unlock (&urb->lock);
58
59         switch (usb_pipetype (urb->pipe)) {
60         case PIPE_ISOCHRONOUS:
61                 hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs--;
62                 break;
63         case PIPE_INTERRUPT:
64                 hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs--;
65                 break;
66         }
67
68 #ifdef OHCI_VERBOSE_DEBUG
69         urb_print (urb, "RET", usb_pipeout (urb->pipe));
70 #endif
71
72         /* urb->complete() can reenter this HCD */
73         spin_unlock (&ohci->lock);
74         usb_hcd_giveback_urb (&ohci->hcd, urb, regs);
75         spin_lock (&ohci->lock);
76
77         /* stop periodic dma if it's not needed */
78         if (hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs == 0
79                         && hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs == 0) {
80                 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81                 writel (ohci->hc_control, &ohci->regs->control);
82         }
83 }
84
85
86 /*-------------------------------------------------------------------------*
87  * ED handling functions
88  *-------------------------------------------------------------------------*/  
89
90 /* search for the right schedule branch to use for a periodic ed.
91  * does some load balancing; returns the branch, or negative errno.
92  */
93 static int balance (struct ohci_hcd *ohci, int interval, int load)
94 {
95         int     i, branch = -ENOSPC;
96
97         /* iso periods can be huge; iso tds specify frame numbers */
98         if (interval > NUM_INTS)
99                 interval = NUM_INTS;
100
101         /* search for the least loaded schedule branch of that period
102          * that has enough bandwidth left unreserved.
103          */
104         for (i = 0; i < interval ; i++) {
105                 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
106 #if 1   /* CONFIG_USB_BANDWIDTH */
107                         int     j;
108
109                         /* usb 1.1 says 90% of one frame */
110                         for (j = i; j < NUM_INTS; j += interval) {
111                                 if ((ohci->load [j] + load) > 900)
112                                         break;
113                         }
114                         if (j < NUM_INTS)
115                                 continue;
116 #endif
117                         branch = i; 
118                 }
119         }
120         return branch;
121 }
122
123 /*-------------------------------------------------------------------------*/
124
125 /* both iso and interrupt requests have periods; this routine puts them
126  * into the schedule tree in the apppropriate place.  most iso devices use
127  * 1msec periods, but that's not required.
128  */
129 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
130 {
131         unsigned        i;
132
133         ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
134                 (ed->hwINFO & ED_ISO) ? "iso " : "",
135                 ed, ed->branch, ed->load, ed->interval);
136
137         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
138                 struct ed       **prev = &ohci->periodic [i];
139                 __le32          *prev_p = &ohci->hcca->int_table [i];
140                 struct ed       *here = *prev;
141
142                 /* sorting each branch by period (slow before fast)
143                  * lets us share the faster parts of the tree.
144                  * (plus maybe: put interrupt eds before iso)
145                  */
146                 while (here && ed != here) {
147                         if (ed->interval > here->interval)
148                                 break;
149                         prev = &here->ed_next;
150                         prev_p = &here->hwNextED;
151                         here = *prev;
152                 }
153                 if (ed != here) {
154                         ed->ed_next = here;
155                         if (here)
156                                 ed->hwNextED = *prev_p;
157                         wmb ();
158                         *prev = ed;
159                         *prev_p = cpu_to_le32(ed->dma);
160                         wmb();
161                 }
162                 ohci->load [i] += ed->load;
163         }
164         hcd_to_bus (&ohci->hcd)->bandwidth_allocated += ed->load / ed->interval;
165 }
166
167 /* link an ed into one of the HC chains */
168
169 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
170 {        
171         int     branch;
172
173         if (ohci->hcd.state == USB_STATE_QUIESCING)
174                 return -EAGAIN;
175
176         ed->state = ED_OPER;
177         ed->ed_prev = NULL;
178         ed->ed_next = NULL;
179         ed->hwNextED = 0;
180         wmb ();
181
182         /* we care about rm_list when setting CLE/BLE in case the HC was at
183          * work on some TD when CLE/BLE was turned off, and isn't quiesced
184          * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.
185          *
186          * control and bulk EDs are doubly linked (ed_next, ed_prev), but
187          * periodic ones are singly linked (ed_next). that's because the
188          * periodic schedule encodes a tree like figure 3-5 in the ohci
189          * spec:  each qh can have several "previous" nodes, and the tree
190          * doesn't have unused/idle descriptors.
191          */
192         switch (ed->type) {
193         case PIPE_CONTROL:
194                 if (ohci->ed_controltail == NULL) {
195                         WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
196                         writel (ed->dma, &ohci->regs->ed_controlhead);
197                 } else {
198                         ohci->ed_controltail->ed_next = ed;
199                         ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
200                 }
201                 ed->ed_prev = ohci->ed_controltail;
202                 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
203                         wmb();
204                         ohci->hc_control |= OHCI_CTRL_CLE;
205                         writel (0, &ohci->regs->ed_controlcurrent);
206                         writel (ohci->hc_control, &ohci->regs->control);
207                 }
208                 ohci->ed_controltail = ed;
209                 break;
210
211         case PIPE_BULK:
212                 if (ohci->ed_bulktail == NULL) {
213                         WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
214                         writel (ed->dma, &ohci->regs->ed_bulkhead);
215                 } else {
216                         ohci->ed_bulktail->ed_next = ed;
217                         ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
218                 }
219                 ed->ed_prev = ohci->ed_bulktail;
220                 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
221                         wmb();
222                         ohci->hc_control |= OHCI_CTRL_BLE;
223                         writel (0, &ohci->regs->ed_bulkcurrent);
224                         writel (ohci->hc_control, &ohci->regs->control);
225                 }
226                 ohci->ed_bulktail = ed;
227                 break;
228
229         // case PIPE_INTERRUPT:
230         // case PIPE_ISOCHRONOUS:
231         default:
232                 branch = balance (ohci, ed->interval, ed->load);
233                 if (branch < 0) {
234                         ohci_dbg (ohci,
235                                 "ERR %d, interval %d msecs, load %d\n",
236                                 branch, ed->interval, ed->load);
237                         // FIXME if there are TDs queued, fail them!
238                         return branch;
239                 }
240                 ed->branch = branch;
241                 periodic_link (ohci, ed);
242         }               
243
244         /* the HC may not see the schedule updates yet, but if it does
245          * then they'll be properly ordered.
246          */
247         return 0;
248 }
249
250 /*-------------------------------------------------------------------------*/
251
252 /* scan the periodic table to find and unlink this ED */
253 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
254 {
255         int     i;
256
257         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
258                 struct ed       *temp;
259                 struct ed       **prev = &ohci->periodic [i];
260                 __le32          *prev_p = &ohci->hcca->int_table [i];
261
262                 while (*prev && (temp = *prev) != ed) {
263                         prev_p = &temp->hwNextED;
264                         prev = &temp->ed_next;
265                 }
266                 if (*prev) {
267                         *prev_p = ed->hwNextED;
268                         *prev = ed->ed_next;
269                 }
270                 ohci->load [i] -= ed->load;
271         }       
272         hcd_to_bus (&ohci->hcd)->bandwidth_allocated -= ed->load / ed->interval;
273
274         ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
275                 (ed->hwINFO & ED_ISO) ? "iso " : "",
276                 ed, ed->branch, ed->load, ed->interval);
277 }
278
279 /* unlink an ed from one of the HC chains. 
280  * just the link to the ed is unlinked.
281  * the link from the ed still points to another operational ed or 0
282  * so the HC can eventually finish the processing of the unlinked ed
283  * (assuming it already started that, which needn't be true).
284  *
285  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
286  * it won't.  ED_SKIP means the HC will finish its current transaction,
287  * but won't start anything new.  The TD queue may still grow; device
288  * drivers don't know about this HCD-internal state.
289  *
290  * When the HC can't see the ED, something changes ED_UNLINK to one of:
291  *
292  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
293  *    immediately.  HC should be working on them.
294  *
295  *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
296  *    to care about this ED; safe to disable the endpoint.
297  *
298  * When finish_unlinks() runs later, after SOF interrupt, it will often
299  * complete one or more URB unlinks before making that state change.
300  */
301 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) 
302 {
303         ed->hwINFO |= ED_SKIP;
304         wmb ();
305         ed->state = ED_UNLINK;
306
307         /* To deschedule something from the control or bulk list, just
308          * clear CLE/BLE and wait.  There's no safe way to scrub out list
309          * head/current registers until later, and "later" isn't very
310          * tightly specified.  Figure 6-5 and Section 6.4.2.2 show how
311          * the HC is reading the ED queues (while we modify them).
312          *
313          * For now, ed_schedule() is "later".  It might be good paranoia
314          * to scrub those registers in finish_unlinks(), in case of bugs
315          * that make the HC try to use them.
316          */
317         switch (ed->type) {
318         case PIPE_CONTROL:
319                 /* remove ED from the HC's list: */
320                 if (ed->ed_prev == NULL) {
321                         if (!ed->hwNextED) {
322                                 ohci->hc_control &= ~OHCI_CTRL_CLE;
323                                 writel (ohci->hc_control, &ohci->regs->control);
324                                 // a ohci_readl() later syncs CLE with the HC
325                         } else
326                                 writel (le32_to_cpup (&ed->hwNextED),
327                                         &ohci->regs->ed_controlhead);
328                 } else {
329                         ed->ed_prev->ed_next = ed->ed_next;
330                         ed->ed_prev->hwNextED = ed->hwNextED;
331                 }
332                 /* remove ED from the HCD's list: */
333                 if (ohci->ed_controltail == ed) {
334                         ohci->ed_controltail = ed->ed_prev;
335                         if (ohci->ed_controltail)
336                                 ohci->ed_controltail->ed_next = NULL;
337                 } else if (ed->ed_next) {
338                         ed->ed_next->ed_prev = ed->ed_prev;
339                 }
340                 break;
341
342         case PIPE_BULK:
343                 /* remove ED from the HC's list: */
344                 if (ed->ed_prev == NULL) {
345                         if (!ed->hwNextED) {
346                                 ohci->hc_control &= ~OHCI_CTRL_BLE;
347                                 writel (ohci->hc_control, &ohci->regs->control);
348                                 // a ohci_readl() later syncs BLE with the HC
349                         } else
350                                 writel (le32_to_cpup (&ed->hwNextED),
351                                         &ohci->regs->ed_bulkhead);
352                 } else {
353                         ed->ed_prev->ed_next = ed->ed_next;
354                         ed->ed_prev->hwNextED = ed->hwNextED;
355                 }
356                 /* remove ED from the HCD's list: */
357                 if (ohci->ed_bulktail == ed) {
358                         ohci->ed_bulktail = ed->ed_prev;
359                         if (ohci->ed_bulktail)
360                                 ohci->ed_bulktail->ed_next = NULL;
361                 } else if (ed->ed_next) {
362                         ed->ed_next->ed_prev = ed->ed_prev;
363                 }
364                 break;
365
366         // case PIPE_INTERRUPT:
367         // case PIPE_ISOCHRONOUS:
368         default:
369                 periodic_unlink (ohci, ed);
370                 break;
371         }
372 }
373
374
375 /*-------------------------------------------------------------------------*/
376
377 /* get and maybe (re)init an endpoint. init _should_ be done only as part
378  * of usb_set_configuration() or usb_set_interface() ... but the USB stack
379  * isn't very stateful, so we re-init whenever the HC isn't looking.
380  */
381 static struct ed *ed_get (
382         struct ohci_hcd         *ohci,
383         struct usb_device       *udev,
384         unsigned int            pipe,
385         int                     interval
386 ) {
387         int                     is_out = !usb_pipein (pipe);
388         int                     type = usb_pipetype (pipe);
389         struct hcd_dev          *dev = (struct hcd_dev *) udev->hcpriv;
390         struct ed               *ed; 
391         unsigned                ep;
392         unsigned long           flags;
393
394         ep = usb_pipeendpoint (pipe) << 1;
395         if (type != PIPE_CONTROL && is_out)
396                 ep |= 1;
397
398         spin_lock_irqsave (&ohci->lock, flags);
399
400         if (!(ed = dev->ep [ep])) {
401                 struct td       *td;
402
403                 ed = ed_alloc (ohci, GFP_ATOMIC);
404                 if (!ed) {
405                         /* out of memory */
406                         goto done;
407                 }
408                 dev->ep [ep] = ed;
409
410                 /* dummy td; end of td list for ed */
411                 td = td_alloc (ohci, GFP_ATOMIC);
412                 if (!td) {
413                         /* out of memory */
414                         ed_free (ohci, ed);
415                         ed = NULL;
416                         goto done;
417                 }
418                 ed->dummy = td;
419                 ed->hwTailP = cpu_to_le32 (td->td_dma);
420                 ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
421                 ed->state = ED_IDLE;
422                 ed->type = type;
423         }
424
425         /* NOTE: only ep0 currently needs this "re"init logic, during
426          * enumeration (after set_address).
427          */
428         if (ed->state == ED_IDLE) {
429                 u32     info;
430                 __le32  hw_info;
431
432                 info = usb_pipedevice (pipe);
433                 info |= (ep >> 1) << 7;
434                 info |= usb_maxpacket (udev, pipe, is_out) << 16;
435                 hw_info = cpu_to_le32 (info);
436                 if (udev->speed == USB_SPEED_LOW)
437                         hw_info |= ED_LOWSPEED;
438                 /* only control transfers store pids in tds */
439                 if (type != PIPE_CONTROL) {
440                         hw_info |= is_out ? ED_OUT : ED_IN;
441                         if (type != PIPE_BULK) {
442                                 /* periodic transfers... */
443                                 if (type == PIPE_ISOCHRONOUS)
444                                         hw_info |= ED_ISO;
445                                 else if (interval > 32) /* iso can be bigger */
446                                         interval = 32;
447                                 ed->interval = interval;
448                                 ed->load = usb_calc_bus_time (
449                                         udev->speed, !is_out,
450                                         type == PIPE_ISOCHRONOUS,
451                                         usb_maxpacket (udev, pipe, is_out))
452                                                 / 1000;
453                         }
454                 }
455                 ed->hwINFO = hw_info;
456         }
457
458 done:
459         spin_unlock_irqrestore (&ohci->lock, flags);
460         return ed; 
461 }
462
463 /*-------------------------------------------------------------------------*/
464
465 /* request unlinking of an endpoint from an operational HC.
466  * put the ep on the rm_list
467  * real work is done at the next start frame (SF) hardware interrupt
468  * caller guarantees HCD is running, so hardware access is safe,
469  * and that ed->state is ED_OPER
470  */
471 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
472 {    
473         ed->hwINFO |= ED_DEQUEUE;
474         ed_deschedule (ohci, ed);
475
476         /* rm_list is just singly linked, for simplicity */
477         ed->ed_next = ohci->ed_rm_list;
478         ed->ed_prev = NULL;
479         ohci->ed_rm_list = ed;
480
481         /* enable SOF interrupt */
482         writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
483         writel (OHCI_INTR_SF, &ohci->regs->intrenable);
484         // flush those writes, and get latest HCCA contents
485         (void) ohci_readl (&ohci->regs->control);
486
487         /* SF interrupt might get delayed; record the frame counter value that
488          * indicates when the HC isn't looking at it, so concurrent unlinks
489          * behave.  frame_no wraps every 2^16 msec, and changes right before
490          * SF is triggered.
491          */
492         ed->tick = OHCI_FRAME_NO(ohci->hcca) + 1;
493
494 }
495
496 /*-------------------------------------------------------------------------*
497  * TD handling functions
498  *-------------------------------------------------------------------------*/
499
500 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
501
502 static void
503 td_fill (struct ohci_hcd *ohci, u32 info,
504         dma_addr_t data, int len,
505         struct urb *urb, int index)
506 {
507         struct td               *td, *td_pt;
508         struct urb_priv         *urb_priv = urb->hcpriv;
509         int                     is_iso = info & TD_ISO;
510         int                     hash;
511
512         // ASSERT (index < urb_priv->length);
513
514         /* aim for only one interrupt per urb.  mostly applies to control
515          * and iso; other urbs rarely need more than one TD per urb.
516          * this way, only final tds (or ones with an error) cause IRQs.
517          * at least immediately; use DI=6 in case any control request is
518          * tempted to die part way through.  (and to force the hc to flush
519          * its donelist soonish, even on unlink paths.)
520          *
521          * NOTE: could delay interrupts even for the last TD, and get fewer
522          * interrupts ... increasing per-urb latency by sharing interrupts.
523          * Drivers that queue bulk urbs may request that behavior.
524          */
525         if (index != (urb_priv->length - 1)
526                         || (urb->transfer_flags & URB_NO_INTERRUPT))
527                 info |= TD_DI_SET (6);
528
529         /* use this td as the next dummy */
530         td_pt = urb_priv->td [index];
531
532         /* fill the old dummy TD */
533         td = urb_priv->td [index] = urb_priv->ed->dummy;
534         urb_priv->ed->dummy = td_pt;
535
536         td->ed = urb_priv->ed;
537         td->next_dl_td = NULL;
538         td->index = index;
539         td->urb = urb; 
540         td->data_dma = data;
541         if (!len)
542                 data = 0;
543
544         td->hwINFO = cpu_to_le32 (info);
545         if (is_iso) {
546                 td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);
547                 td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);
548                 td->ed->last_iso = info & 0xffff;
549         } else {
550                 td->hwCBP = cpu_to_le32 (data); 
551         }                       
552         if (data)
553                 td->hwBE = cpu_to_le32 (data + len - 1);
554         else
555                 td->hwBE = 0;
556         td->hwNextTD = cpu_to_le32 (td_pt->td_dma);
557
558         /* append to queue */
559         list_add_tail (&td->td_list, &td->ed->td_list);
560
561         /* hash it for later reverse mapping */
562         hash = TD_HASH_FUNC (td->td_dma);
563         td->td_hash = ohci->td_hash [hash];
564         ohci->td_hash [hash] = td;
565
566         /* HC might read the TD (or cachelines) right away ... */
567         wmb ();
568         td->ed->hwTailP = td->hwNextTD;
569 }
570
571 /*-------------------------------------------------------------------------*/
572
573 /* Prepare all TDs of a transfer, and queue them onto the ED.
574  * Caller guarantees HC is active.
575  * Usually the ED is already on the schedule, so TDs might be
576  * processed as soon as they're queued.
577  */
578 static void td_submit_urb (
579         struct ohci_hcd *ohci,
580         struct urb      *urb
581 ) {
582         struct urb_priv *urb_priv = urb->hcpriv;
583         dma_addr_t      data;
584         int             data_len = urb->transfer_buffer_length;
585         int             cnt = 0;
586         u32             info = 0;
587         int             is_out = usb_pipeout (urb->pipe);
588         int             periodic = 0;
589
590         /* OHCI handles the bulk/interrupt data toggles itself.  We just
591          * use the device toggle bits for resetting, and rely on the fact
592          * that resetting toggle is meaningless if the endpoint is active.
593          */
594         if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
595                 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
596                         is_out, 1);
597                 urb_priv->ed->hwHeadP &= ~ED_C;
598         }
599
600         urb_priv->td_cnt = 0;
601         list_add (&urb_priv->pending, &ohci->pending);
602
603         if (data_len)
604                 data = urb->transfer_dma;
605         else
606                 data = 0;
607
608         /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
609          * using TD_CC_GET, as well as by seeing them on the done list.
610          * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
611          */
612         switch (urb_priv->ed->type) {
613
614         /* Bulk and interrupt are identical except for where in the schedule
615          * their EDs live.
616          */
617         case PIPE_INTERRUPT:
618                 /* ... and periodic urbs have extra accounting */
619                 periodic = hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs++ == 0
620                         && hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs == 0;
621                 /* FALLTHROUGH */
622         case PIPE_BULK:
623                 info = is_out
624                         ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
625                         : TD_T_TOGGLE | TD_CC | TD_DP_IN;
626                 /* TDs _could_ transfer up to 8K each */
627                 while (data_len > 4096) {
628                         td_fill (ohci, info, data, 4096, urb, cnt);
629                         data += 4096;
630                         data_len -= 4096;
631                         cnt++;
632                 }
633                 /* maybe avoid ED halt on final TD short read */
634                 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
635                         info |= TD_R;
636                 td_fill (ohci, info, data, data_len, urb, cnt);
637                 cnt++;
638                 if ((urb->transfer_flags & URB_ZERO_PACKET)
639                                 && cnt < urb_priv->length) {
640                         td_fill (ohci, info, 0, 0, urb, cnt);
641                         cnt++;
642                 }
643                 /* maybe kickstart bulk list */
644                 if (urb_priv->ed->type == PIPE_BULK) {
645                         wmb ();
646                         writel (OHCI_BLF, &ohci->regs->cmdstatus);
647                 }
648                 break;
649
650         /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
651          * any DATA phase works normally, and the STATUS ack is special.
652          */
653         case PIPE_CONTROL:
654                 info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
655                 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
656                 if (data_len > 0) {
657                         info = TD_CC | TD_R | TD_T_DATA1;
658                         info |= is_out ? TD_DP_OUT : TD_DP_IN;
659                         /* NOTE:  mishandles transfers >8K, some >4K */
660                         td_fill (ohci, info, data, data_len, urb, cnt++);
661                 }
662                 info = is_out
663                         ? TD_CC | TD_DP_IN | TD_T_DATA1
664                         : TD_CC | TD_DP_OUT | TD_T_DATA1;
665                 td_fill (ohci, info, data, 0, urb, cnt++);
666                 /* maybe kickstart control list */
667                 wmb ();
668                 writel (OHCI_CLF, &ohci->regs->cmdstatus);
669                 break;
670
671         /* ISO has no retransmit, so no toggle; and it uses special TDs.
672          * Each TD could handle multiple consecutive frames (interval 1);
673          * we could often reduce the number of TDs here.
674          */
675         case PIPE_ISOCHRONOUS:
676                 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
677                         int     frame = urb->start_frame;
678
679                         // FIXME scheduling should handle frame counter
680                         // roll-around ... exotic case (and OHCI has
681                         // a 2^16 iso range, vs other HCs max of 2^10)
682                         frame += cnt * urb->interval;
683                         frame &= 0xffff;
684                         td_fill (ohci, TD_CC | TD_ISO | frame,
685                                 data + urb->iso_frame_desc [cnt].offset,
686                                 urb->iso_frame_desc [cnt].length, urb, cnt);
687                 }
688                 periodic = hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs++ == 0
689                         && hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs == 0;
690                 break;
691         }
692
693         /* start periodic dma if needed */
694         if (periodic) {
695                 wmb ();
696                 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
697                 writel (ohci->hc_control, &ohci->regs->control);
698         }
699
700         // ASSERT (urb_priv->length == cnt);
701 }
702
703 /*-------------------------------------------------------------------------*
704  * Done List handling functions
705  *-------------------------------------------------------------------------*/
706
707 /* calculate transfer length/status and update the urb
708  * PRECONDITION:  irqsafe (only for urb->status locking)
709  */
710 static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
711 {
712         u32     tdINFO = le32_to_cpup (&td->hwINFO);
713         int     cc = 0;
714
715         list_del (&td->td_list);
716
717         /* ISO ... drivers see per-TD length/status */
718         if (tdINFO & TD_ISO) {
719                 u16     tdPSW = le16_to_cpu (td->hwPSW [0]);
720                 int     dlen = 0;
721
722                 /* NOTE:  assumes FC in tdINFO == 0 (and MAXPSW == 1) */
723
724                 cc = (tdPSW >> 12) & 0xF;
725                 if (tdINFO & TD_CC)     /* hc didn't touch? */
726                         return;
727
728                 if (usb_pipeout (urb->pipe))
729                         dlen = urb->iso_frame_desc [td->index].length;
730                 else {
731                         /* short reads are always OK for ISO */
732                         if (cc == TD_DATAUNDERRUN)
733                                 cc = TD_CC_NOERROR;
734                         dlen = tdPSW & 0x3ff;
735                 }
736                 urb->actual_length += dlen;
737                 urb->iso_frame_desc [td->index].actual_length = dlen;
738                 urb->iso_frame_desc [td->index].status = cc_to_error [cc];
739
740                 if (cc != TD_CC_NOERROR)
741                         ohci_vdbg (ohci,
742                                 "urb %p iso td %p (%d) len %d cc %d\n",
743                                 urb, td, 1 + td->index, dlen, cc);
744
745         /* BULK, INT, CONTROL ... drivers see aggregate length/status,
746          * except that "setup" bytes aren't counted and "short" transfers
747          * might not be reported as errors.
748          */
749         } else {
750                 int     type = usb_pipetype (urb->pipe);
751                 u32     tdBE = le32_to_cpup (&td->hwBE);
752
753                 cc = TD_CC_GET (tdINFO);
754
755                 /* update packet status if needed (short is normally ok) */
756                 if (cc == TD_DATAUNDERRUN
757                                 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
758                         cc = TD_CC_NOERROR;
759                 if (cc != TD_CC_NOERROR && cc < 0x0E) {
760                         spin_lock (&urb->lock);
761                         if (urb->status == -EINPROGRESS)
762                                 urb->status = cc_to_error [cc];
763                         spin_unlock (&urb->lock);
764                 }
765
766                 /* count all non-empty packets except control SETUP packet */
767                 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
768                         if (td->hwCBP == 0)
769                                 urb->actual_length += tdBE - td->data_dma + 1;
770                         else
771                                 urb->actual_length +=
772                                           le32_to_cpup (&td->hwCBP)
773                                         - td->data_dma;
774                 }
775
776                 if (cc != TD_CC_NOERROR && cc < 0x0E)
777                         ohci_vdbg (ohci,
778                                 "urb %p td %p (%d) cc %d, len=%d/%d\n",
779                                 urb, td, 1 + td->index, cc,
780                                 urb->actual_length,
781                                 urb->transfer_buffer_length);
782         }
783 }
784
785 /*-------------------------------------------------------------------------*/
786
787 static inline struct td *
788 ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
789 {
790         struct urb              *urb = td->urb;
791         struct ed               *ed = td->ed;
792         struct list_head        *tmp = td->td_list.next;
793         __le32                  toggle = ed->hwHeadP & ED_C;
794
795         /* clear ed halt; this is the td that caused it, but keep it inactive
796          * until its urb->complete() has a chance to clean up.
797          */
798         ed->hwINFO |= ED_SKIP;
799         wmb ();
800         ed->hwHeadP &= ~ED_H; 
801
802         /* put any later tds from this urb onto the donelist, after 'td',
803          * order won't matter here: no errors, and nothing was transferred.
804          * also patch the ed so it looks as if those tds completed normally.
805          */
806         while (tmp != &ed->td_list) {
807                 struct td       *next;
808                 __le32          info;
809
810                 next = list_entry (tmp, struct td, td_list);
811                 tmp = next->td_list.next;
812
813                 if (next->urb != urb)
814                         break;
815
816                 /* NOTE: if multi-td control DATA segments get supported,
817                  * this urb had one of them, this td wasn't the last td
818                  * in that segment (TD_R clear), this ed halted because
819                  * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
820                  * then we need to leave the control STATUS packet queued
821                  * and clear ED_SKIP.
822                  */
823                 info = next->hwINFO;
824                 info |= cpu_to_le32 (TD_DONE);
825                 info &= ~cpu_to_le32 (TD_CC);
826                 next->hwINFO = info;
827
828                 next->next_dl_td = rev; 
829                 rev = next;
830
831                 ed->hwHeadP = next->hwNextTD | toggle;
832         }
833
834         /* help for troubleshooting:  report anything that
835          * looks odd ... that doesn't include protocol stalls
836          * (or maybe some other things)
837          */
838         switch (cc) {
839         case TD_DATAUNDERRUN:
840                 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
841                         break;
842                 /* fallthrough */
843         case TD_CC_STALL:
844                 if (usb_pipecontrol (urb->pipe))
845                         break;
846                 /* fallthrough */
847         default:
848                 ohci_dbg (ohci,
849                         "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
850                         urb, urb->dev->devpath,
851                         usb_pipeendpoint (urb->pipe),
852                         usb_pipein (urb->pipe) ? "in" : "out",
853                         le32_to_cpu (td->hwINFO),
854                         cc, cc_to_error [cc]);
855         }
856
857         return rev;
858 }
859
860 /* replies to the request have to be on a FIFO basis so
861  * we unreverse the hc-reversed done-list
862  */
863 static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
864 {
865         u32             td_dma;
866         struct td       *td_rev = NULL;
867         struct td       *td = NULL;
868
869         td_dma = le32_to_cpup (&ohci->hcca->done_head);
870         ohci->hcca->done_head = 0;
871         wmb();
872
873         /* get TD from hc's singly linked list, and
874          * prepend to ours.  ed->td_list changes later.
875          */
876         while (td_dma) {                
877                 int             cc;
878
879                 td = dma_to_td (ohci, td_dma);
880                 if (!td) {
881                         ohci_err (ohci, "bad entry %8x\n", td_dma);
882                         break;
883                 }
884
885                 td->hwINFO |= cpu_to_le32 (TD_DONE);
886                 cc = TD_CC_GET (le32_to_cpup (&td->hwINFO));
887
888                 /* Non-iso endpoints can halt on error; un-halt,
889                  * and dequeue any other TDs from this urb.
890                  * No other TD could have caused the halt.
891                  */
892                 if (cc != TD_CC_NOERROR && (td->ed->hwHeadP & ED_H))
893                         td_rev = ed_halted (ohci, td, cc, td_rev);
894
895                 td->next_dl_td = td_rev;        
896                 td_rev = td;
897                 td_dma = le32_to_cpup (&td->hwNextTD);
898         }       
899         return td_rev;
900 }
901
902 /*-------------------------------------------------------------------------*/
903
904 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
905 static void
906 finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
907 {
908         struct ed       *ed, **last;
909
910 rescan_all:
911         for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
912                 struct list_head        *entry, *tmp;
913                 int                     completed, modified;
914                 __le32                  *prev;
915
916                 /* only take off EDs that the HC isn't using, accounting for
917                  * frame counter wraps and EDs with partially retired TDs
918                  */
919                 if (likely (regs && HCD_IS_RUNNING(ohci->hcd.state))) {
920                         if (tick_before (tick, ed->tick)) {
921 skip_ed:
922                                 last = &ed->ed_next;
923                                 continue;
924                         }
925
926                         if (!list_empty (&ed->td_list)) {
927                                 struct td       *td;
928                                 u32             head;
929
930                                 td = list_entry (ed->td_list.next, struct td,
931                                                         td_list);
932                                 head = le32_to_cpu (ed->hwHeadP) & TD_MASK;
933
934                                 /* INTR_WDH may need to clean up first */
935                                 if (td->td_dma != head)
936                                         goto skip_ed;
937                         }
938                 }
939
940                 /* reentrancy:  if we drop the schedule lock, someone might
941                  * have modified this list.  normally it's just prepending
942                  * entries (which we'd ignore), but paranoia won't hurt.
943                  */
944                 *last = ed->ed_next;
945                 ed->ed_next = NULL;
946                 modified = 0;
947
948                 /* unlink urbs as requested, but rescan the list after
949                  * we call a completion since it might have unlinked
950                  * another (earlier) urb
951                  *
952                  * When we get here, the HC doesn't see this ed.  But it
953                  * must not be rescheduled until all completed URBs have
954                  * been given back to the driver.
955                  */
956 rescan_this:
957                 completed = 0;
958                 prev = &ed->hwHeadP;
959                 list_for_each_safe (entry, tmp, &ed->td_list) {
960                         struct td       *td;
961                         struct urb      *urb;
962                         urb_priv_t      *urb_priv;
963                         __le32          savebits;
964
965                         td = list_entry (entry, struct td, td_list);
966                         urb = td->urb;
967                         urb_priv = td->urb->hcpriv;
968
969                         if (urb->status == -EINPROGRESS) {
970                                 prev = &td->hwNextTD;
971                                 continue;
972                         }
973
974                         /* patch pointer hc uses */
975                         savebits = *prev & ~cpu_to_le32 (TD_MASK);
976                         *prev = td->hwNextTD | savebits;
977
978                         /* HC may have partly processed this TD */
979                         td_done (ohci, urb, td);
980                         urb_priv->td_cnt++;
981
982                         /* if URB is done, clean up */
983                         if (urb_priv->td_cnt == urb_priv->length) {
984                                 modified = completed = 1;
985                                 finish_urb (ohci, urb, regs);
986                         }
987                 }
988                 if (completed && !list_empty (&ed->td_list))
989                         goto rescan_this;
990
991                 /* ED's now officially unlinked, hc doesn't see */
992                 ed->state = ED_IDLE;
993                 ed->hwHeadP &= ~ED_H;
994                 ed->hwNextED = 0;
995                 wmb ();
996                 ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
997
998                 /* but if there's work queued, reschedule */
999                 if (!list_empty (&ed->td_list)) {
1000                         if (HCD_IS_RUNNING(ohci->hcd.state))
1001                                 ed_schedule (ohci, ed);
1002                 }
1003
1004                 if (modified)
1005                         goto rescan_all;
1006         }
1007
1008         /* maybe reenable control and bulk lists */ 
1009         if (HCD_IS_RUNNING(ohci->hcd.state)
1010                         && ohci->hcd.state != USB_STATE_QUIESCING
1011                         && !ohci->ed_rm_list) {
1012                 u32     command = 0, control = 0;
1013
1014                 if (ohci->ed_controltail) {
1015                         command |= OHCI_CLF;
1016                         if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1017                                 control |= OHCI_CTRL_CLE;
1018                                 writel (0, &ohci->regs->ed_controlcurrent);
1019                         }
1020                 }
1021                 if (ohci->ed_bulktail) {
1022                         command |= OHCI_BLF;
1023                         if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1024                                 control |= OHCI_CTRL_BLE;
1025                                 writel (0, &ohci->regs->ed_bulkcurrent);
1026                         }
1027                 }
1028                 
1029                 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1030                 if (control) {
1031                         ohci->hc_control |= control;
1032                         writel (ohci->hc_control, &ohci->regs->control);   
1033                 }
1034                 if (command)
1035                         writel (command, &ohci->regs->cmdstatus);   
1036         }
1037 }
1038
1039
1040
1041 /*-------------------------------------------------------------------------*/
1042
1043 /*
1044  * Process normal completions (error or success) and clean the schedules.
1045  *
1046  * This is the main path for handing urbs back to drivers.  The only other
1047  * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
1048  * scanning the (re-reversed) donelist as this does.
1049  */
1050 static void
1051 dl_done_list (struct ohci_hcd *ohci, struct pt_regs *regs)
1052 {
1053         struct td       *td = dl_reverse_done_list (ohci);
1054
1055         while (td) {
1056                 struct td       *td_next = td->next_dl_td;
1057                 struct urb      *urb = td->urb;
1058                 urb_priv_t      *urb_priv = urb->hcpriv;
1059                 struct ed       *ed = td->ed;
1060
1061                 /* update URB's length and status from TD */
1062                 td_done (ohci, urb, td);
1063                 urb_priv->td_cnt++;
1064
1065                 /* If all this urb's TDs are done, call complete() */
1066                 if (urb_priv->td_cnt == urb_priv->length)
1067                         finish_urb (ohci, urb, regs);
1068
1069                 /* clean schedule:  unlink EDs that are no longer busy */
1070                 if (list_empty (&ed->td_list)) {
1071                         if (ed->state == ED_OPER)
1072                                 start_ed_unlink (ohci, ed);
1073
1074                 /* ... reenabling halted EDs only after fault cleanup */
1075                 } else if ((ed->hwINFO & (ED_SKIP | ED_DEQUEUE)) == ED_SKIP) {
1076                         td = list_entry (ed->td_list.next, struct td, td_list);
1077                         if (!(td->hwINFO & TD_DONE)) {
1078                                 ed->hwINFO &= ~ED_SKIP;
1079                                 /* ... hc may need waking-up */
1080                                 switch (ed->type) {
1081                                 case PIPE_CONTROL:
1082                                         writel (OHCI_CLF,
1083                                                 &ohci->regs->cmdstatus);   
1084                                         break;
1085                                 case PIPE_BULK:
1086                                         writel (OHCI_BLF,
1087                                                 &ohci->regs->cmdstatus);   
1088                                         break;
1089                                 }
1090                         }
1091                 }
1092
1093                 td = td_next;
1094         }  
1095 }