patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / usb / host / ohci-q.c
1 /*
2  * OHCI HCD (Host Controller Driver) for USB.
3  * 
4  * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5  * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6  * 
7  * This file is licenced under the GPL.
8  */
9
10 static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
11 {
12         int             last = urb_priv->length - 1;
13
14         if (last >= 0) {
15                 int             i;
16                 struct td       *td;
17
18                 for (i = 0; i <= last; i++) {
19                         td = urb_priv->td [i];
20                         if (td)
21                                 td_free (hc, td);
22                 }
23         }
24
25         list_del (&urb_priv->pending);
26         kfree (urb_priv);
27 }
28
29 /*-------------------------------------------------------------------------*/
30
31 /*
32  * URB goes back to driver, and isn't reissued.
33  * It's completely gone from HC data structures.
34  * PRECONDITION:  ohci lock held, irqs blocked.
35  */
36 static void
37 finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs)
38 {
39         // ASSERT (urb->hcpriv != 0);
40
41         urb_free_priv (ohci, urb->hcpriv);
42         urb->hcpriv = NULL;
43
44         spin_lock (&urb->lock);
45         if (likely (urb->status == -EINPROGRESS))
46                 urb->status = 0;
47         /* report short control reads right even though the data TD always
48          * has TD_R set.  (much simpler, but creates the 1-td limit.)
49          */
50         if (unlikely (urb->transfer_flags & URB_SHORT_NOT_OK)
51                         && unlikely (usb_pipecontrol (urb->pipe))
52                         && urb->actual_length < urb->transfer_buffer_length
53                         && usb_pipein (urb->pipe)
54                         && urb->status == 0) {
55                 urb->status = -EREMOTEIO;
56         }
57         spin_unlock (&urb->lock);
58
59         switch (usb_pipetype (urb->pipe)) {
60         case PIPE_ISOCHRONOUS:
61                 hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs--;
62                 break;
63         case PIPE_INTERRUPT:
64                 hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs--;
65                 break;
66         }
67
68 #ifdef OHCI_VERBOSE_DEBUG
69         urb_print (urb, "RET", usb_pipeout (urb->pipe));
70 #endif
71
72         /* urb->complete() can reenter this HCD */
73         spin_unlock (&ohci->lock);
74         usb_hcd_giveback_urb (&ohci->hcd, urb, regs);
75         spin_lock (&ohci->lock);
76
77         /* stop periodic dma if it's not needed */
78         if (hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs == 0
79                         && hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs == 0) {
80                 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
81                 writel (ohci->hc_control, &ohci->regs->control);
82         }
83 }
84
85
86 /*-------------------------------------------------------------------------*
87  * ED handling functions
88  *-------------------------------------------------------------------------*/  
89
90 /* search for the right schedule branch to use for a periodic ed.
91  * does some load balancing; returns the branch, or negative errno.
92  */
93 static int balance (struct ohci_hcd *ohci, int interval, int load)
94 {
95         int     i, branch = -ENOSPC;
96
97         /* iso periods can be huge; iso tds specify frame numbers */
98         if (interval > NUM_INTS)
99                 interval = NUM_INTS;
100
101         /* search for the least loaded schedule branch of that period
102          * that has enough bandwidth left unreserved.
103          */
104         for (i = 0; i < interval ; i++) {
105                 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
106 #if 1   /* CONFIG_USB_BANDWIDTH */
107                         int     j;
108
109                         /* usb 1.1 says 90% of one frame */
110                         for (j = i; j < NUM_INTS; j += interval) {
111                                 if ((ohci->load [j] + load) > 900)
112                                         break;
113                         }
114                         if (j < NUM_INTS)
115                                 continue;
116 #endif
117                         branch = i; 
118                 }
119         }
120         return branch;
121 }
122
123 /*-------------------------------------------------------------------------*/
124
125 /* both iso and interrupt requests have periods; this routine puts them
126  * into the schedule tree in the apppropriate place.  most iso devices use
127  * 1msec periods, but that's not required.
128  */
129 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
130 {
131         unsigned        i;
132
133         ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
134                 (ed->hwINFO & ED_ISO) ? "iso " : "",
135                 ed, ed->branch, ed->load, ed->interval);
136
137         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
138                 struct ed       **prev = &ohci->periodic [i];
139                 u32             *prev_p = &ohci->hcca->int_table [i];
140                 struct ed       *here = *prev;
141
142                 /* sorting each branch by period (slow before fast)
143                  * lets us share the faster parts of the tree.
144                  * (plus maybe: put interrupt eds before iso)
145                  */
146                 while (here && ed != here) {
147                         if (ed->interval > here->interval)
148                                 break;
149                         prev = &here->ed_next;
150                         prev_p = &here->hwNextED;
151                         here = *prev;
152                 }
153                 if (ed != here) {
154                         ed->ed_next = here;
155                         if (here)
156                                 ed->hwNextED = *prev_p;
157                         wmb ();
158                         *prev = ed;
159                         *prev_p = cpu_to_le32p (&ed->dma);
160                         wmb();
161                 }
162                 ohci->load [i] += ed->load;
163         }
164         hcd_to_bus (&ohci->hcd)->bandwidth_allocated += ed->load / ed->interval;
165 }
166
167 /* link an ed into one of the HC chains */
168
169 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
170 {        
171         int     branch;
172
173         if (ohci->hcd.state == USB_STATE_QUIESCING)
174                 return -EAGAIN;
175
176         ed->state = ED_OPER;
177         ed->ed_prev = 0;
178         ed->ed_next = 0;
179         ed->hwNextED = 0;
180         wmb ();
181
182         /* we care about rm_list when setting CLE/BLE in case the HC was at
183          * work on some TD when CLE/BLE was turned off, and isn't quiesced
184          * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.
185          *
186          * control and bulk EDs are doubly linked (ed_next, ed_prev), but
187          * periodic ones are singly linked (ed_next). that's because the
188          * periodic schedule encodes a tree like figure 3-5 in the ohci
189          * spec:  each qh can have several "previous" nodes, and the tree
190          * doesn't have unused/idle descriptors.
191          */
192         switch (ed->type) {
193         case PIPE_CONTROL:
194                 if (ohci->ed_controltail == NULL) {
195                         WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
196                         writel (ed->dma, &ohci->regs->ed_controlhead);
197                 } else {
198                         ohci->ed_controltail->ed_next = ed;
199                         ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
200                 }
201                 ed->ed_prev = ohci->ed_controltail;
202                 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
203                         wmb();
204                         ohci->hc_control |= OHCI_CTRL_CLE;
205                         writel (0, &ohci->regs->ed_controlcurrent);
206                         writel (ohci->hc_control, &ohci->regs->control);
207                 }
208                 ohci->ed_controltail = ed;
209                 break;
210
211         case PIPE_BULK:
212                 if (ohci->ed_bulktail == NULL) {
213                         WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
214                         writel (ed->dma, &ohci->regs->ed_bulkhead);
215                 } else {
216                         ohci->ed_bulktail->ed_next = ed;
217                         ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
218                 }
219                 ed->ed_prev = ohci->ed_bulktail;
220                 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
221                         wmb();
222                         ohci->hc_control |= OHCI_CTRL_BLE;
223                         writel (0, &ohci->regs->ed_bulkcurrent);
224                         writel (ohci->hc_control, &ohci->regs->control);
225                 }
226                 ohci->ed_bulktail = ed;
227                 break;
228
229         // case PIPE_INTERRUPT:
230         // case PIPE_ISOCHRONOUS:
231         default:
232                 branch = balance (ohci, ed->interval, ed->load);
233                 if (branch < 0) {
234                         ohci_dbg (ohci,
235                                 "ERR %d, interval %d msecs, load %d\n",
236                                 branch, ed->interval, ed->load);
237                         // FIXME if there are TDs queued, fail them!
238                         return branch;
239                 }
240                 ed->branch = branch;
241                 periodic_link (ohci, ed);
242         }               
243
244         /* the HC may not see the schedule updates yet, but if it does
245          * then they'll be properly ordered.
246          */
247         return 0;
248 }
249
250 /*-------------------------------------------------------------------------*/
251
252 /* scan the periodic table to find and unlink this ED */
253 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
254 {
255         int     i;
256
257         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
258                 struct ed       *temp;
259                 struct ed       **prev = &ohci->periodic [i];
260                 u32             *prev_p = &ohci->hcca->int_table [i];
261
262                 while (*prev && (temp = *prev) != ed) {
263                         prev_p = &temp->hwNextED;
264                         prev = &temp->ed_next;
265                 }
266                 if (*prev) {
267                         *prev_p = ed->hwNextED;
268                         *prev = ed->ed_next;
269                 }
270                 ohci->load [i] -= ed->load;
271         }       
272         hcd_to_bus (&ohci->hcd)->bandwidth_allocated -= ed->load / ed->interval;
273
274         ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
275                 (ed->hwINFO & ED_ISO) ? "iso " : "",
276                 ed, ed->branch, ed->load, ed->interval);
277 }
278
279 /* unlink an ed from one of the HC chains. 
280  * just the link to the ed is unlinked.
281  * the link from the ed still points to another operational ed or 0
282  * so the HC can eventually finish the processing of the unlinked ed
283  * (assuming it already started that, which needn't be true).
284  *
285  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
286  * it won't.  ED_SKIP means the HC will finish its current transaction,
287  * but won't start anything new.  The TD queue may still grow; device
288  * drivers don't know about this HCD-internal state.
289  *
290  * When the HC can't see the ED, something changes ED_UNLINK to one of:
291  *
292  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
293  *    immediately.  HC should be working on them.
294  *
295  *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
296  *    to care about this ED; safe to disable the endpoint.
297  *
298  * When finish_unlinks() runs later, after SOF interrupt, it will often
299  * complete one or more URB unlinks before making that state change.
300  */
301 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) 
302 {
303         ed->hwINFO |= ED_SKIP;
304         wmb ();
305         ed->state = ED_UNLINK;
306
307         /* To deschedule something from the control or bulk list, just
308          * clear CLE/BLE and wait.  There's no safe way to scrub out list
309          * head/current registers until later, and "later" isn't very
310          * tightly specified.  Figure 6-5 and Section 6.4.2.2 show how
311          * the HC is reading the ED queues (while we modify them).
312          *
313          * For now, ed_schedule() is "later".  It might be good paranoia
314          * to scrub those registers in finish_unlinks(), in case of bugs
315          * that make the HC try to use them.
316          */
317         switch (ed->type) {
318         case PIPE_CONTROL:
319                 /* remove ED from the HC's list: */
320                 if (ed->ed_prev == NULL) {
321                         if (!ed->hwNextED) {
322                                 ohci->hc_control &= ~OHCI_CTRL_CLE;
323                                 writel (ohci->hc_control, &ohci->regs->control);
324                                 // a readl() later syncs CLE with the HC
325                         } else
326                                 writel (le32_to_cpup (&ed->hwNextED),
327                                         &ohci->regs->ed_controlhead);
328                 } else {
329                         ed->ed_prev->ed_next = ed->ed_next;
330                         ed->ed_prev->hwNextED = ed->hwNextED;
331                 }
332                 /* remove ED from the HCD's list: */
333                 if (ohci->ed_controltail == ed) {
334                         ohci->ed_controltail = ed->ed_prev;
335                         if (ohci->ed_controltail)
336                                 ohci->ed_controltail->ed_next = 0;
337                 } else if (ed->ed_next) {
338                         ed->ed_next->ed_prev = ed->ed_prev;
339                 }
340                 break;
341
342         case PIPE_BULK:
343                 /* remove ED from the HC's list: */
344                 if (ed->ed_prev == NULL) {
345                         if (!ed->hwNextED) {
346                                 ohci->hc_control &= ~OHCI_CTRL_BLE;
347                                 writel (ohci->hc_control, &ohci->regs->control);
348                                 // a readl() later syncs BLE with the HC
349                         } else
350                                 writel (le32_to_cpup (&ed->hwNextED),
351                                         &ohci->regs->ed_bulkhead);
352                 } else {
353                         ed->ed_prev->ed_next = ed->ed_next;
354                         ed->ed_prev->hwNextED = ed->hwNextED;
355                 }
356                 /* remove ED from the HCD's list: */
357                 if (ohci->ed_bulktail == ed) {
358                         ohci->ed_bulktail = ed->ed_prev;
359                         if (ohci->ed_bulktail)
360                                 ohci->ed_bulktail->ed_next = 0;
361                 } else if (ed->ed_next) {
362                         ed->ed_next->ed_prev = ed->ed_prev;
363                 }
364                 break;
365
366         // case PIPE_INTERRUPT:
367         // case PIPE_ISOCHRONOUS:
368         default:
369                 periodic_unlink (ohci, ed);
370                 break;
371         }
372 }
373
374
375 /*-------------------------------------------------------------------------*/
376
377 /* get and maybe (re)init an endpoint. init _should_ be done only as part
378  * of usb_set_configuration() or usb_set_interface() ... but the USB stack
379  * isn't very stateful, so we re-init whenever the HC isn't looking.
380  */
381 static struct ed *ed_get (
382         struct ohci_hcd         *ohci,
383         struct usb_device       *udev,
384         unsigned int            pipe,
385         int                     interval
386 ) {
387         int                     is_out = !usb_pipein (pipe);
388         int                     type = usb_pipetype (pipe);
389         struct hcd_dev          *dev = (struct hcd_dev *) udev->hcpriv;
390         struct ed               *ed; 
391         unsigned                ep;
392         unsigned long           flags;
393
394         ep = usb_pipeendpoint (pipe) << 1;
395         if (type != PIPE_CONTROL && is_out)
396                 ep |= 1;
397
398         spin_lock_irqsave (&ohci->lock, flags);
399
400         if (!(ed = dev->ep [ep])) {
401                 struct td       *td;
402
403                 ed = ed_alloc (ohci, GFP_ATOMIC);
404                 if (!ed) {
405                         /* out of memory */
406                         goto done;
407                 }
408                 dev->ep [ep] = ed;
409
410                 /* dummy td; end of td list for ed */
411                 td = td_alloc (ohci, GFP_ATOMIC);
412                 if (!td) {
413                         /* out of memory */
414                         ed_free (ohci, ed);
415                         ed = 0;
416                         goto done;
417                 }
418                 ed->dummy = td;
419                 ed->hwTailP = cpu_to_le32 (td->td_dma);
420                 ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
421                 ed->state = ED_IDLE;
422                 ed->type = type;
423         }
424
425         /* NOTE: only ep0 currently needs this "re"init logic, during
426          * enumeration (after set_address).
427          */
428         if (ed->state == ED_IDLE) {
429                 u32     info;
430
431                 info = usb_pipedevice (pipe);
432                 info |= (ep >> 1) << 7;
433                 info |= usb_maxpacket (udev, pipe, is_out) << 16;
434                 info = cpu_to_le32 (info);
435                 if (udev->speed == USB_SPEED_LOW)
436                         info |= ED_LOWSPEED;
437                 /* only control transfers store pids in tds */
438                 if (type != PIPE_CONTROL) {
439                         info |= is_out ? ED_OUT : ED_IN;
440                         if (type != PIPE_BULK) {
441                                 /* periodic transfers... */
442                                 if (type == PIPE_ISOCHRONOUS)
443                                         info |= ED_ISO;
444                                 else if (interval > 32) /* iso can be bigger */
445                                         interval = 32;
446                                 ed->interval = interval;
447                                 ed->load = usb_calc_bus_time (
448                                         udev->speed, !is_out,
449                                         type == PIPE_ISOCHRONOUS,
450                                         usb_maxpacket (udev, pipe, is_out))
451                                                 / 1000;
452                         }
453                 }
454                 ed->hwINFO = info;
455         }
456
457 done:
458         spin_unlock_irqrestore (&ohci->lock, flags);
459         return ed; 
460 }
461
462 /*-------------------------------------------------------------------------*/
463
464 /* request unlinking of an endpoint from an operational HC.
465  * put the ep on the rm_list
466  * real work is done at the next start frame (SF) hardware interrupt
467  * caller guarantees HCD is running, so hardware access is safe,
468  * and that ed->state is ED_OPER
469  */
470 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
471 {    
472         ed->hwINFO |= ED_DEQUEUE;
473         ed_deschedule (ohci, ed);
474
475         /* rm_list is just singly linked, for simplicity */
476         ed->ed_next = ohci->ed_rm_list;
477         ed->ed_prev = 0;
478         ohci->ed_rm_list = ed;
479
480         /* enable SOF interrupt */
481         writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
482         writel (OHCI_INTR_SF, &ohci->regs->intrenable);
483         // flush those writes, and get latest HCCA contents
484         (void) readl (&ohci->regs->control);
485
486         /* SF interrupt might get delayed; record the frame counter value that
487          * indicates when the HC isn't looking at it, so concurrent unlinks
488          * behave.  frame_no wraps every 2^16 msec, and changes right before
489          * SF is triggered.
490          */
491         ed->tick = OHCI_FRAME_NO(ohci->hcca) + 1;
492
493 }
494
495 /*-------------------------------------------------------------------------*
496  * TD handling functions
497  *-------------------------------------------------------------------------*/
498
499 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
500
501 static void
502 td_fill (struct ohci_hcd *ohci, u32 info,
503         dma_addr_t data, int len,
504         struct urb *urb, int index)
505 {
506         struct td               *td, *td_pt;
507         struct urb_priv         *urb_priv = urb->hcpriv;
508         int                     is_iso = info & TD_ISO;
509         int                     hash;
510
511         // ASSERT (index < urb_priv->length);
512
513         /* aim for only one interrupt per urb.  mostly applies to control
514          * and iso; other urbs rarely need more than one TD per urb.
515          * this way, only final tds (or ones with an error) cause IRQs.
516          * at least immediately; use DI=6 in case any control request is
517          * tempted to die part way through.  (and to force the hc to flush
518          * its donelist soonish, even on unlink paths.)
519          *
520          * NOTE: could delay interrupts even for the last TD, and get fewer
521          * interrupts ... increasing per-urb latency by sharing interrupts.
522          * Drivers that queue bulk urbs may request that behavior.
523          */
524         if (index != (urb_priv->length - 1)
525                         || (urb->transfer_flags & URB_NO_INTERRUPT))
526                 info |= TD_DI_SET (6);
527
528         /* use this td as the next dummy */
529         td_pt = urb_priv->td [index];
530
531         /* fill the old dummy TD */
532         td = urb_priv->td [index] = urb_priv->ed->dummy;
533         urb_priv->ed->dummy = td_pt;
534
535         td->ed = urb_priv->ed;
536         td->next_dl_td = NULL;
537         td->index = index;
538         td->urb = urb; 
539         td->data_dma = data;
540         if (!len)
541                 data = 0;
542
543         td->hwINFO = cpu_to_le32 (info);
544         if (is_iso) {
545                 td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);
546                 td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);
547                 td->ed->last_iso = info & 0xffff;
548         } else {
549                 td->hwCBP = cpu_to_le32 (data); 
550         }                       
551         if (data)
552                 td->hwBE = cpu_to_le32 (data + len - 1);
553         else
554                 td->hwBE = 0;
555         td->hwNextTD = cpu_to_le32 (td_pt->td_dma);
556
557         /* append to queue */
558         list_add_tail (&td->td_list, &td->ed->td_list);
559
560         /* hash it for later reverse mapping */
561         hash = TD_HASH_FUNC (td->td_dma);
562         td->td_hash = ohci->td_hash [hash];
563         ohci->td_hash [hash] = td;
564
565         /* HC might read the TD (or cachelines) right away ... */
566         wmb ();
567         td->ed->hwTailP = td->hwNextTD;
568 }
569
570 /*-------------------------------------------------------------------------*/
571
572 /* Prepare all TDs of a transfer, and queue them onto the ED.
573  * Caller guarantees HC is active.
574  * Usually the ED is already on the schedule, so TDs might be
575  * processed as soon as they're queued.
576  */
577 static void td_submit_urb (
578         struct ohci_hcd *ohci,
579         struct urb      *urb
580 ) {
581         struct urb_priv *urb_priv = urb->hcpriv;
582         dma_addr_t      data;
583         int             data_len = urb->transfer_buffer_length;
584         int             cnt = 0;
585         u32             info = 0;
586         int             is_out = usb_pipeout (urb->pipe);
587         int             periodic = 0;
588
589         /* OHCI handles the bulk/interrupt data toggles itself.  We just
590          * use the device toggle bits for resetting, and rely on the fact
591          * that resetting toggle is meaningless if the endpoint is active.
592          */
593         if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
594                 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
595                         is_out, 1);
596                 urb_priv->ed->hwHeadP &= ~ED_C;
597         }
598
599         urb_priv->td_cnt = 0;
600         list_add (&urb_priv->pending, &ohci->pending);
601
602         if (data_len)
603                 data = urb->transfer_dma;
604         else
605                 data = 0;
606
607         /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
608          * using TD_CC_GET, as well as by seeing them on the done list.
609          * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
610          */
611         switch (urb_priv->ed->type) {
612
613         /* Bulk and interrupt are identical except for where in the schedule
614          * their EDs live.
615          */
616         case PIPE_INTERRUPT:
617                 /* ... and periodic urbs have extra accounting */
618                 periodic = hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs++ == 0
619                         && hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs == 0;
620                 /* FALLTHROUGH */
621         case PIPE_BULK:
622                 info = is_out
623                         ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
624                         : TD_T_TOGGLE | TD_CC | TD_DP_IN;
625                 /* TDs _could_ transfer up to 8K each */
626                 while (data_len > 4096) {
627                         td_fill (ohci, info, data, 4096, urb, cnt);
628                         data += 4096;
629                         data_len -= 4096;
630                         cnt++;
631                 }
632                 /* maybe avoid ED halt on final TD short read */
633                 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
634                         info |= TD_R;
635                 td_fill (ohci, info, data, data_len, urb, cnt);
636                 cnt++;
637                 if ((urb->transfer_flags & URB_ZERO_PACKET)
638                                 && cnt < urb_priv->length) {
639                         td_fill (ohci, info, 0, 0, urb, cnt);
640                         cnt++;
641                 }
642                 /* maybe kickstart bulk list */
643                 if (urb_priv->ed->type == PIPE_BULK) {
644                         wmb ();
645                         writel (OHCI_BLF, &ohci->regs->cmdstatus);
646                 }
647                 break;
648
649         /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
650          * any DATA phase works normally, and the STATUS ack is special.
651          */
652         case PIPE_CONTROL:
653                 info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
654                 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
655                 if (data_len > 0) {
656                         info = TD_CC | TD_R | TD_T_DATA1;
657                         info |= is_out ? TD_DP_OUT : TD_DP_IN;
658                         /* NOTE:  mishandles transfers >8K, some >4K */
659                         td_fill (ohci, info, data, data_len, urb, cnt++);
660                 }
661                 info = is_out
662                         ? TD_CC | TD_DP_IN | TD_T_DATA1
663                         : TD_CC | TD_DP_OUT | TD_T_DATA1;
664                 td_fill (ohci, info, data, 0, urb, cnt++);
665                 /* maybe kickstart control list */
666                 wmb ();
667                 writel (OHCI_CLF, &ohci->regs->cmdstatus);
668                 break;
669
670         /* ISO has no retransmit, so no toggle; and it uses special TDs.
671          * Each TD could handle multiple consecutive frames (interval 1);
672          * we could often reduce the number of TDs here.
673          */
674         case PIPE_ISOCHRONOUS:
675                 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
676                         int     frame = urb->start_frame;
677
678                         // FIXME scheduling should handle frame counter
679                         // roll-around ... exotic case (and OHCI has
680                         // a 2^16 iso range, vs other HCs max of 2^10)
681                         frame += cnt * urb->interval;
682                         frame &= 0xffff;
683                         td_fill (ohci, TD_CC | TD_ISO | frame,
684                                 data + urb->iso_frame_desc [cnt].offset,
685                                 urb->iso_frame_desc [cnt].length, urb, cnt);
686                 }
687                 periodic = hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs++ == 0
688                         && hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs == 0;
689                 break;
690         }
691
692         /* start periodic dma if needed */
693         if (periodic) {
694                 wmb ();
695                 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
696                 writel (ohci->hc_control, &ohci->regs->control);
697         }
698
699         // ASSERT (urb_priv->length == cnt);
700 }
701
702 /*-------------------------------------------------------------------------*
703  * Done List handling functions
704  *-------------------------------------------------------------------------*/
705
706 /* calculate transfer length/status and update the urb
707  * PRECONDITION:  irqsafe (only for urb->status locking)
708  */
709 static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
710 {
711         u32     tdINFO = le32_to_cpup (&td->hwINFO);
712         int     cc = 0;
713
714         list_del (&td->td_list);
715
716         /* ISO ... drivers see per-TD length/status */
717         if (tdINFO & TD_ISO) {
718                 u16     tdPSW = le16_to_cpu (td->hwPSW [0]);
719                 int     dlen = 0;
720
721                 /* NOTE:  assumes FC in tdINFO == 0 (and MAXPSW == 1) */
722
723                 cc = (tdPSW >> 12) & 0xF;
724                 if (tdINFO & TD_CC)     /* hc didn't touch? */
725                         return;
726
727                 if (usb_pipeout (urb->pipe))
728                         dlen = urb->iso_frame_desc [td->index].length;
729                 else {
730                         /* short reads are always OK for ISO */
731                         if (cc == TD_DATAUNDERRUN)
732                                 cc = TD_CC_NOERROR;
733                         dlen = tdPSW & 0x3ff;
734                 }
735                 urb->actual_length += dlen;
736                 urb->iso_frame_desc [td->index].actual_length = dlen;
737                 urb->iso_frame_desc [td->index].status = cc_to_error [cc];
738
739                 if (cc != TD_CC_NOERROR)
740                         ohci_vdbg (ohci,
741                                 "urb %p iso td %p (%d) len %d cc %d\n",
742                                 urb, td, 1 + td->index, dlen, cc);
743
744         /* BULK, INT, CONTROL ... drivers see aggregate length/status,
745          * except that "setup" bytes aren't counted and "short" transfers
746          * might not be reported as errors.
747          */
748         } else {
749                 int     type = usb_pipetype (urb->pipe);
750                 u32     tdBE = le32_to_cpup (&td->hwBE);
751
752                 cc = TD_CC_GET (tdINFO);
753
754                 /* control endpoints only have soft stalls */
755                 if (type != PIPE_CONTROL && cc == TD_CC_STALL)
756                         usb_endpoint_halt (urb->dev,
757                                 usb_pipeendpoint (urb->pipe),
758                                 usb_pipeout (urb->pipe));
759
760                 /* update packet status if needed (short is normally ok) */
761                 if (cc == TD_DATAUNDERRUN
762                                 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
763                         cc = TD_CC_NOERROR;
764                 if (cc != TD_CC_NOERROR && cc < 0x0E) {
765                         spin_lock (&urb->lock);
766                         if (urb->status == -EINPROGRESS)
767                                 urb->status = cc_to_error [cc];
768                         spin_unlock (&urb->lock);
769                 }
770
771                 /* count all non-empty packets except control SETUP packet */
772                 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
773                         if (td->hwCBP == 0)
774                                 urb->actual_length += tdBE - td->data_dma + 1;
775                         else
776                                 urb->actual_length +=
777                                           le32_to_cpup (&td->hwCBP)
778                                         - td->data_dma;
779                 }
780
781                 if (cc != TD_CC_NOERROR && cc < 0x0E)
782                         ohci_vdbg (ohci,
783                                 "urb %p td %p (%d) cc %d, len=%d/%d\n",
784                                 urb, td, 1 + td->index, cc,
785                                 urb->actual_length,
786                                 urb->transfer_buffer_length);
787         }
788 }
789
790 /*-------------------------------------------------------------------------*/
791
792 static inline struct td *
793 ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
794 {
795         struct urb              *urb = td->urb;
796         struct ed               *ed = td->ed;
797         struct list_head        *tmp = td->td_list.next;
798         u32                     toggle = ed->hwHeadP & ED_C;
799
800         /* clear ed halt; this is the td that caused it, but keep it inactive
801          * until its urb->complete() has a chance to clean up.
802          */
803         ed->hwINFO |= ED_SKIP;
804         wmb ();
805         ed->hwHeadP &= ~ED_H; 
806
807         /* put any later tds from this urb onto the donelist, after 'td',
808          * order won't matter here: no errors, and nothing was transferred.
809          * also patch the ed so it looks as if those tds completed normally.
810          */
811         while (tmp != &ed->td_list) {
812                 struct td       *next;
813                 u32             info;
814
815                 next = list_entry (tmp, struct td, td_list);
816                 tmp = next->td_list.next;
817
818                 if (next->urb != urb)
819                         break;
820
821                 /* NOTE: if multi-td control DATA segments get supported,
822                  * this urb had one of them, this td wasn't the last td
823                  * in that segment (TD_R clear), this ed halted because
824                  * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
825                  * then we need to leave the control STATUS packet queued
826                  * and clear ED_SKIP.
827                  */
828                 info = next->hwINFO;
829                 info |= cpu_to_le32 (TD_DONE);
830                 info &= ~cpu_to_le32 (TD_CC);
831                 next->hwINFO = info;
832
833                 next->next_dl_td = rev; 
834                 rev = next;
835
836                 ed->hwHeadP = next->hwNextTD | toggle;
837         }
838
839         /* help for troubleshooting:  report anything that
840          * looks odd ... that doesn't include protocol stalls
841          * (or maybe some other things)
842          */
843         switch (cc) {
844         case TD_DATAUNDERRUN:
845                 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
846                         break;
847                 /* fallthrough */
848         case TD_CC_STALL:
849                 if (usb_pipecontrol (urb->pipe))
850                         break;
851                 /* fallthrough */
852         default:
853                 ohci_dbg (ohci,
854                         "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
855                         urb, urb->dev->devpath,
856                         usb_pipeendpoint (urb->pipe),
857                         usb_pipein (urb->pipe) ? "in" : "out",
858                         le32_to_cpu (td->hwINFO),
859                         cc, cc_to_error [cc]);
860         }
861
862         return rev;
863 }
864
865 /* replies to the request have to be on a FIFO basis so
866  * we unreverse the hc-reversed done-list
867  */
868 static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
869 {
870         u32             td_dma;
871         struct td       *td_rev = NULL;
872         struct td       *td = NULL;
873
874         td_dma = le32_to_cpup (&ohci->hcca->done_head);
875         ohci->hcca->done_head = 0;
876         wmb();
877
878         /* get TD from hc's singly linked list, and
879          * prepend to ours.  ed->td_list changes later.
880          */
881         while (td_dma) {                
882                 int             cc;
883
884                 td = dma_to_td (ohci, td_dma);
885                 if (!td) {
886                         ohci_err (ohci, "bad entry %8x\n", td_dma);
887                         break;
888                 }
889
890                 td->hwINFO |= cpu_to_le32 (TD_DONE);
891                 cc = TD_CC_GET (le32_to_cpup (&td->hwINFO));
892
893                 /* Non-iso endpoints can halt on error; un-halt,
894                  * and dequeue any other TDs from this urb.
895                  * No other TD could have caused the halt.
896                  */
897                 if (cc != TD_CC_NOERROR && (td->ed->hwHeadP & ED_H))
898                         td_rev = ed_halted (ohci, td, cc, td_rev);
899
900                 td->next_dl_td = td_rev;        
901                 td_rev = td;
902                 td_dma = le32_to_cpup (&td->hwNextTD);
903         }       
904         return td_rev;
905 }
906
907 /*-------------------------------------------------------------------------*/
908
909 /* wrap-aware logic stolen from <linux/jiffies.h> */
910 #define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)
911
912 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
913 static void
914 finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
915 {
916         struct ed       *ed, **last;
917
918 rescan_all:
919         for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
920                 struct list_head        *entry, *tmp;
921                 int                     completed, modified;
922                 u32                     *prev;
923
924                 /* only take off EDs that the HC isn't using, accounting for
925                  * frame counter wraps and EDs with partially retired TDs
926                  */
927                 if (likely (HCD_IS_RUNNING(ohci->hcd.state))) {
928                         if (tick_before (tick, ed->tick)) {
929 skip_ed:
930                                 last = &ed->ed_next;
931                                 continue;
932                         }
933
934                         if (!list_empty (&ed->td_list)) {
935                                 struct td       *td;
936                                 u32             head;
937
938                                 td = list_entry (ed->td_list.next, struct td,
939                                                         td_list);
940                                 head = cpu_to_le32 (ed->hwHeadP) & TD_MASK;
941
942                                 /* INTR_WDH may need to clean up first */
943                                 if (td->td_dma != head)
944                                         goto skip_ed;
945                         }
946                 }
947
948                 /* reentrancy:  if we drop the schedule lock, someone might
949                  * have modified this list.  normally it's just prepending
950                  * entries (which we'd ignore), but paranoia won't hurt.
951                  */
952                 *last = ed->ed_next;
953                 ed->ed_next = 0;
954                 modified = 0;
955
956                 /* unlink urbs as requested, but rescan the list after
957                  * we call a completion since it might have unlinked
958                  * another (earlier) urb
959                  *
960                  * When we get here, the HC doesn't see this ed.  But it
961                  * must not be rescheduled until all completed URBs have
962                  * been given back to the driver.
963                  */
964 rescan_this:
965                 completed = 0;
966                 prev = &ed->hwHeadP;
967                 list_for_each_safe (entry, tmp, &ed->td_list) {
968                         struct td       *td;
969                         struct urb      *urb;
970                         urb_priv_t      *urb_priv;
971                         u32             savebits;
972
973                         td = list_entry (entry, struct td, td_list);
974                         urb = td->urb;
975                         urb_priv = td->urb->hcpriv;
976
977                         if (urb->status == -EINPROGRESS) {
978                                 prev = &td->hwNextTD;
979                                 continue;
980                         }
981
982                         /* patch pointer hc uses */
983                         savebits = *prev & ~cpu_to_le32 (TD_MASK);
984                         *prev = td->hwNextTD | savebits;
985
986                         /* HC may have partly processed this TD */
987                         td_done (ohci, urb, td);
988                         urb_priv->td_cnt++;
989
990                         /* if URB is done, clean up */
991                         if (urb_priv->td_cnt == urb_priv->length) {
992                                 modified = completed = 1;
993                                 finish_urb (ohci, urb, regs);
994                         }
995                 }
996                 if (completed && !list_empty (&ed->td_list))
997                         goto rescan_this;
998
999                 /* ED's now officially unlinked, hc doesn't see */
1000                 ed->state = ED_IDLE;
1001                 ed->hwHeadP &= ~ED_H;
1002                 ed->hwNextED = 0;
1003                 wmb ();
1004                 ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
1005
1006                 /* but if there's work queued, reschedule */
1007                 if (!list_empty (&ed->td_list)) {
1008                         if (HCD_IS_RUNNING(ohci->hcd.state))
1009                                 ed_schedule (ohci, ed);
1010                 }
1011
1012                 if (modified)
1013                         goto rescan_all;
1014         }
1015
1016         /* maybe reenable control and bulk lists */ 
1017         if (HCD_IS_RUNNING(ohci->hcd.state)
1018                         && ohci->hcd.state != USB_STATE_QUIESCING
1019                         && !ohci->ed_rm_list) {
1020                 u32     command = 0, control = 0;
1021
1022                 if (ohci->ed_controltail) {
1023                         command |= OHCI_CLF;
1024                         if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1025                                 control |= OHCI_CTRL_CLE;
1026                                 writel (0, &ohci->regs->ed_controlcurrent);
1027                         }
1028                 }
1029                 if (ohci->ed_bulktail) {
1030                         command |= OHCI_BLF;
1031                         if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1032                                 control |= OHCI_CTRL_BLE;
1033                                 writel (0, &ohci->regs->ed_bulkcurrent);
1034                         }
1035                 }
1036                 
1037                 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1038                 if (control) {
1039                         ohci->hc_control |= control;
1040                         writel (ohci->hc_control, &ohci->regs->control);   
1041                 }
1042                 if (command)
1043                         writel (command, &ohci->regs->cmdstatus);   
1044         }
1045 }
1046
1047
1048
1049 /*-------------------------------------------------------------------------*/
1050
1051 /*
1052  * Process normal completions (error or success) and clean the schedules.
1053  *
1054  * This is the main path for handing urbs back to drivers.  The only other
1055  * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
1056  * scanning the (re-reversed) donelist as this does.
1057  */
1058 static void
1059 dl_done_list (struct ohci_hcd *ohci, struct pt_regs *regs)
1060 {
1061         struct td       *td = dl_reverse_done_list (ohci);
1062
1063         while (td) {
1064                 struct td       *td_next = td->next_dl_td;
1065                 struct urb      *urb = td->urb;
1066                 urb_priv_t      *urb_priv = urb->hcpriv;
1067                 struct ed       *ed = td->ed;
1068
1069                 /* update URB's length and status from TD */
1070                 td_done (ohci, urb, td);
1071                 urb_priv->td_cnt++;
1072
1073                 /* If all this urb's TDs are done, call complete() */
1074                 if (urb_priv->td_cnt == urb_priv->length)
1075                         finish_urb (ohci, urb, regs);
1076
1077                 /* clean schedule:  unlink EDs that are no longer busy */
1078                 if (list_empty (&ed->td_list)) {
1079                         if (ed->state == ED_OPER)
1080                                 start_ed_unlink (ohci, ed);
1081
1082                 /* ... reenabling halted EDs only after fault cleanup */
1083                 } else if ((ed->hwINFO & (ED_SKIP | ED_DEQUEUE)) == ED_SKIP) {
1084                         td = list_entry (ed->td_list.next, struct td, td_list);
1085                         if (!(td->hwINFO & TD_DONE)) {
1086                                 ed->hwINFO &= ~ED_SKIP;
1087                                 /* ... hc may need waking-up */
1088                                 switch (ed->type) {
1089                                 case PIPE_CONTROL:
1090                                         writel (OHCI_CLF,
1091                                                 &ohci->regs->cmdstatus);   
1092                                         break;
1093                                 case PIPE_BULK:
1094                                         writel (OHCI_BLF,
1095                                                 &ohci->regs->cmdstatus);   
1096                                         break;
1097                                 }
1098                         }
1099                 }
1100
1101                 td = td_next;
1102         }  
1103 }