ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / usb / host / ohci-q.c
1 /*
2  * OHCI HCD (Host Controller Driver) for USB.
3  * 
4  * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
5  * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
6  * 
7  * This file is licenced under the GPL.
8  */
9
10 static void urb_free_priv (struct ohci_hcd *hc, urb_priv_t *urb_priv)
11 {
12         int             last = urb_priv->length - 1;
13
14         if (last >= 0) {
15                 int             i;
16                 struct td       *td;
17
18                 for (i = 0; i <= last; i++) {
19                         td = urb_priv->td [i];
20                         if (td)
21                                 td_free (hc, td);
22                 }
23         }
24
25         kfree (urb_priv);
26 }
27
28 /*-------------------------------------------------------------------------*/
29
30 /*
31  * URB goes back to driver, and isn't reissued.
32  * It's completely gone from HC data structures.
33  * PRECONDITION:  ohci lock held, irqs blocked.
34  */
35 static void
36 finish_urb (struct ohci_hcd *ohci, struct urb *urb, struct pt_regs *regs)
37 {
38         // ASSERT (urb->hcpriv != 0);
39
40         urb_free_priv (ohci, urb->hcpriv);
41         urb->hcpriv = NULL;
42
43         spin_lock (&urb->lock);
44         if (likely (urb->status == -EINPROGRESS))
45                 urb->status = 0;
46         /* report short control reads right even though the data TD always
47          * has TD_R set.  (much simpler, but creates the 1-td limit.)
48          */
49         if (unlikely (urb->transfer_flags & URB_SHORT_NOT_OK)
50                         && unlikely (usb_pipecontrol (urb->pipe))
51                         && urb->actual_length < urb->transfer_buffer_length
52                         && usb_pipein (urb->pipe)
53                         && urb->status == 0) {
54                 urb->status = -EREMOTEIO;
55         }
56         spin_unlock (&urb->lock);
57
58         switch (usb_pipetype (urb->pipe)) {
59         case PIPE_ISOCHRONOUS:
60                 hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs--;
61                 break;
62         case PIPE_INTERRUPT:
63                 hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs--;
64                 break;
65         }
66
67 #ifdef OHCI_VERBOSE_DEBUG
68         urb_print (urb, "RET", usb_pipeout (urb->pipe));
69 #endif
70
71         /* urb->complete() can reenter this HCD */
72         spin_unlock (&ohci->lock);
73         usb_hcd_giveback_urb (&ohci->hcd, urb, regs);
74         spin_lock (&ohci->lock);
75
76         /* stop periodic dma if it's not needed */
77         if (hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs == 0
78                         && hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs == 0) {
79                 ohci->hc_control &= ~(OHCI_CTRL_PLE|OHCI_CTRL_IE);
80                 writel (ohci->hc_control, &ohci->regs->control);
81         }
82 }
83
84
85 /*-------------------------------------------------------------------------*
86  * ED handling functions
87  *-------------------------------------------------------------------------*/  
88
89 /* search for the right schedule branch to use for a periodic ed.
90  * does some load balancing; returns the branch, or negative errno.
91  */
92 static int balance (struct ohci_hcd *ohci, int interval, int load)
93 {
94         int     i, branch = -ENOSPC;
95
96         /* iso periods can be huge; iso tds specify frame numbers */
97         if (interval > NUM_INTS)
98                 interval = NUM_INTS;
99
100         /* search for the least loaded schedule branch of that period
101          * that has enough bandwidth left unreserved.
102          */
103         for (i = 0; i < interval ; i++) {
104                 if (branch < 0 || ohci->load [branch] > ohci->load [i]) {
105 #if 1   /* CONFIG_USB_BANDWIDTH */
106                         int     j;
107
108                         /* usb 1.1 says 90% of one frame */
109                         for (j = i; j < NUM_INTS; j += interval) {
110                                 if ((ohci->load [j] + load) > 900)
111                                         break;
112                         }
113                         if (j < NUM_INTS)
114                                 continue;
115 #endif
116                         branch = i; 
117                 }
118         }
119         return branch;
120 }
121
122 /*-------------------------------------------------------------------------*/
123
124 /* both iso and interrupt requests have periods; this routine puts them
125  * into the schedule tree in the apppropriate place.  most iso devices use
126  * 1msec periods, but that's not required.
127  */
128 static void periodic_link (struct ohci_hcd *ohci, struct ed *ed)
129 {
130         unsigned        i;
131
132         ohci_vdbg (ohci, "link %sed %p branch %d [%dus.], interval %d\n",
133                 (ed->hwINFO & ED_ISO) ? "iso " : "",
134                 ed, ed->branch, ed->load, ed->interval);
135
136         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
137                 struct ed       **prev = &ohci->periodic [i];
138                 u32             *prev_p = &ohci->hcca->int_table [i];
139                 struct ed       *here = *prev;
140
141                 /* sorting each branch by period (slow before fast)
142                  * lets us share the faster parts of the tree.
143                  * (plus maybe: put interrupt eds before iso)
144                  */
145                 while (here && ed != here) {
146                         if (ed->interval > here->interval)
147                                 break;
148                         prev = &here->ed_next;
149                         prev_p = &here->hwNextED;
150                         here = *prev;
151                 }
152                 if (ed != here) {
153                         ed->ed_next = here;
154                         if (here)
155                                 ed->hwNextED = *prev_p;
156                         wmb ();
157                         *prev = ed;
158                         *prev_p = cpu_to_le32p (&ed->dma);
159                         wmb();
160                 }
161                 ohci->load [i] += ed->load;
162         }
163         hcd_to_bus (&ohci->hcd)->bandwidth_allocated += ed->load / ed->interval;
164 }
165
166 /* link an ed into one of the HC chains */
167
168 static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
169 {        
170         int     branch;
171
172         ed->state = ED_OPER;
173         ed->ed_prev = 0;
174         ed->ed_next = 0;
175         ed->hwNextED = 0;
176         wmb ();
177
178         /* we care about rm_list when setting CLE/BLE in case the HC was at
179          * work on some TD when CLE/BLE was turned off, and isn't quiesced
180          * yet.  finish_unlinks() restarts as needed, some upcoming INTR_SF.
181          *
182          * control and bulk EDs are doubly linked (ed_next, ed_prev), but
183          * periodic ones are singly linked (ed_next). that's because the
184          * periodic schedule encodes a tree like figure 3-5 in the ohci
185          * spec:  each qh can have several "previous" nodes, and the tree
186          * doesn't have unused/idle descriptors.
187          */
188         switch (ed->type) {
189         case PIPE_CONTROL:
190                 if (ohci->ed_controltail == NULL) {
191                         WARN_ON (ohci->hc_control & OHCI_CTRL_CLE);
192                         writel (ed->dma, &ohci->regs->ed_controlhead);
193                 } else {
194                         ohci->ed_controltail->ed_next = ed;
195                         ohci->ed_controltail->hwNextED = cpu_to_le32 (ed->dma);
196                 }
197                 ed->ed_prev = ohci->ed_controltail;
198                 if (!ohci->ed_controltail && !ohci->ed_rm_list) {
199                         wmb();
200                         ohci->hc_control |= OHCI_CTRL_CLE;
201                         writel (0, &ohci->regs->ed_controlcurrent);
202                         writel (ohci->hc_control, &ohci->regs->control);
203                 }
204                 ohci->ed_controltail = ed;
205                 break;
206
207         case PIPE_BULK:
208                 if (ohci->ed_bulktail == NULL) {
209                         WARN_ON (ohci->hc_control & OHCI_CTRL_BLE);
210                         writel (ed->dma, &ohci->regs->ed_bulkhead);
211                 } else {
212                         ohci->ed_bulktail->ed_next = ed;
213                         ohci->ed_bulktail->hwNextED = cpu_to_le32 (ed->dma);
214                 }
215                 ed->ed_prev = ohci->ed_bulktail;
216                 if (!ohci->ed_bulktail && !ohci->ed_rm_list) {
217                         wmb();
218                         ohci->hc_control |= OHCI_CTRL_BLE;
219                         writel (0, &ohci->regs->ed_bulkcurrent);
220                         writel (ohci->hc_control, &ohci->regs->control);
221                 }
222                 ohci->ed_bulktail = ed;
223                 break;
224
225         // case PIPE_INTERRUPT:
226         // case PIPE_ISOCHRONOUS:
227         default:
228                 branch = balance (ohci, ed->interval, ed->load);
229                 if (branch < 0) {
230                         ohci_dbg (ohci,
231                                 "ERR %d, interval %d msecs, load %d\n",
232                                 branch, ed->interval, ed->load);
233                         // FIXME if there are TDs queued, fail them!
234                         return branch;
235                 }
236                 ed->branch = branch;
237                 periodic_link (ohci, ed);
238         }               
239
240         /* the HC may not see the schedule updates yet, but if it does
241          * then they'll be properly ordered.
242          */
243         return 0;
244 }
245
246 /*-------------------------------------------------------------------------*/
247
248 /* scan the periodic table to find and unlink this ED */
249 static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed)
250 {
251         int     i;
252
253         for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
254                 struct ed       *temp;
255                 struct ed       **prev = &ohci->periodic [i];
256                 u32             *prev_p = &ohci->hcca->int_table [i];
257
258                 while (*prev && (temp = *prev) != ed) {
259                         prev_p = &temp->hwNextED;
260                         prev = &temp->ed_next;
261                 }
262                 if (*prev) {
263                         *prev_p = ed->hwNextED;
264                         *prev = ed->ed_next;
265                 }
266                 ohci->load [i] -= ed->load;
267         }       
268         hcd_to_bus (&ohci->hcd)->bandwidth_allocated -= ed->load / ed->interval;
269
270         ohci_vdbg (ohci, "unlink %sed %p branch %d [%dus.], interval %d\n",
271                 (ed->hwINFO & ED_ISO) ? "iso " : "",
272                 ed, ed->branch, ed->load, ed->interval);
273 }
274
275 /* unlink an ed from one of the HC chains. 
276  * just the link to the ed is unlinked.
277  * the link from the ed still points to another operational ed or 0
278  * so the HC can eventually finish the processing of the unlinked ed
279  * (assuming it already started that, which needn't be true).
280  *
281  * ED_UNLINK is a transient state: the HC may still see this ED, but soon
282  * it won't.  ED_SKIP means the HC will finish its current transaction,
283  * but won't start anything new.  The TD queue may still grow; device
284  * drivers don't know about this HCD-internal state.
285  *
286  * When the HC can't see the ED, something changes ED_UNLINK to one of:
287  *
288  *  - ED_OPER: when there's any request queued, the ED gets rescheduled
289  *    immediately.  HC should be working on them.
290  *
291  *  - ED_IDLE:  when there's no TD queue. there's no reason for the HC
292  *    to care about this ED; safe to disable the endpoint.
293  *
294  * When finish_unlinks() runs later, after SOF interrupt, it will often
295  * complete one or more URB unlinks before making that state change.
296  */
297 static void ed_deschedule (struct ohci_hcd *ohci, struct ed *ed) 
298 {
299         ed->hwINFO |= ED_SKIP;
300         wmb ();
301         ed->state = ED_UNLINK;
302
303         /* To deschedule something from the control or bulk list, just
304          * clear CLE/BLE and wait.  There's no safe way to scrub out list
305          * head/current registers until later, and "later" isn't very
306          * tightly specified.  Figure 6-5 and Section 6.4.2.2 show how
307          * the HC is reading the ED queues (while we modify them).
308          *
309          * For now, ed_schedule() is "later".  It might be good paranoia
310          * to scrub those registers in finish_unlinks(), in case of bugs
311          * that make the HC try to use them.
312          */
313         switch (ed->type) {
314         case PIPE_CONTROL:
315                 /* remove ED from the HC's list: */
316                 if (ed->ed_prev == NULL) {
317                         if (!ed->hwNextED) {
318                                 ohci->hc_control &= ~OHCI_CTRL_CLE;
319                                 writel (ohci->hc_control, &ohci->regs->control);
320                                 // a readl() later syncs CLE with the HC
321                         } else
322                                 writel (le32_to_cpup (&ed->hwNextED),
323                                         &ohci->regs->ed_controlhead);
324                 } else {
325                         ed->ed_prev->ed_next = ed->ed_next;
326                         ed->ed_prev->hwNextED = ed->hwNextED;
327                 }
328                 /* remove ED from the HCD's list: */
329                 if (ohci->ed_controltail == ed) {
330                         ohci->ed_controltail = ed->ed_prev;
331                         if (ohci->ed_controltail)
332                                 ohci->ed_controltail->ed_next = 0;
333                 } else if (ed->ed_next) {
334                         ed->ed_next->ed_prev = ed->ed_prev;
335                 }
336                 break;
337
338         case PIPE_BULK:
339                 /* remove ED from the HC's list: */
340                 if (ed->ed_prev == NULL) {
341                         if (!ed->hwNextED) {
342                                 ohci->hc_control &= ~OHCI_CTRL_BLE;
343                                 writel (ohci->hc_control, &ohci->regs->control);
344                                 // a readl() later syncs BLE with the HC
345                         } else
346                                 writel (le32_to_cpup (&ed->hwNextED),
347                                         &ohci->regs->ed_bulkhead);
348                 } else {
349                         ed->ed_prev->ed_next = ed->ed_next;
350                         ed->ed_prev->hwNextED = ed->hwNextED;
351                 }
352                 /* remove ED from the HCD's list: */
353                 if (ohci->ed_bulktail == ed) {
354                         ohci->ed_bulktail = ed->ed_prev;
355                         if (ohci->ed_bulktail)
356                                 ohci->ed_bulktail->ed_next = 0;
357                 } else if (ed->ed_next) {
358                         ed->ed_next->ed_prev = ed->ed_prev;
359                 }
360                 break;
361
362         // case PIPE_INTERRUPT:
363         // case PIPE_ISOCHRONOUS:
364         default:
365                 periodic_unlink (ohci, ed);
366                 break;
367         }
368 }
369
370
371 /*-------------------------------------------------------------------------*/
372
373 /* get and maybe (re)init an endpoint. init _should_ be done only as part
374  * of usb_set_configuration() or usb_set_interface() ... but the USB stack
375  * isn't very stateful, so we re-init whenever the HC isn't looking.
376  */
377 static struct ed *ed_get (
378         struct ohci_hcd         *ohci,
379         struct usb_device       *udev,
380         unsigned int            pipe,
381         int                     interval
382 ) {
383         int                     is_out = !usb_pipein (pipe);
384         int                     type = usb_pipetype (pipe);
385         struct hcd_dev          *dev = (struct hcd_dev *) udev->hcpriv;
386         struct ed               *ed; 
387         unsigned                ep;
388         unsigned long           flags;
389
390         ep = usb_pipeendpoint (pipe) << 1;
391         if (type != PIPE_CONTROL && is_out)
392                 ep |= 1;
393
394         spin_lock_irqsave (&ohci->lock, flags);
395
396         if (!(ed = dev->ep [ep])) {
397                 struct td       *td;
398
399                 ed = ed_alloc (ohci, GFP_ATOMIC);
400                 if (!ed) {
401                         /* out of memory */
402                         goto done;
403                 }
404                 dev->ep [ep] = ed;
405
406                 /* dummy td; end of td list for ed */
407                 td = td_alloc (ohci, GFP_ATOMIC);
408                 if (!td) {
409                         /* out of memory */
410                         ed_free (ohci, ed);
411                         ed = 0;
412                         goto done;
413                 }
414                 ed->dummy = td;
415                 ed->hwTailP = cpu_to_le32 (td->td_dma);
416                 ed->hwHeadP = ed->hwTailP;      /* ED_C, ED_H zeroed */
417                 ed->state = ED_IDLE;
418                 ed->type = type;
419         }
420
421         /* NOTE: only ep0 currently needs this "re"init logic, during
422          * enumeration (after set_address, or if ep0 maxpacket >8).
423          */
424         if (ed->state == ED_IDLE) {
425                 u32     info;
426
427                 info = usb_pipedevice (pipe);
428                 info |= (ep >> 1) << 7;
429                 info |= usb_maxpacket (udev, pipe, is_out) << 16;
430                 info = cpu_to_le32 (info);
431                 if (udev->speed == USB_SPEED_LOW)
432                         info |= ED_LOWSPEED;
433                 /* only control transfers store pids in tds */
434                 if (type != PIPE_CONTROL) {
435                         info |= is_out ? ED_OUT : ED_IN;
436                         if (type != PIPE_BULK) {
437                                 /* periodic transfers... */
438                                 if (type == PIPE_ISOCHRONOUS)
439                                         info |= ED_ISO;
440                                 else if (interval > 32) /* iso can be bigger */
441                                         interval = 32;
442                                 ed->interval = interval;
443                                 ed->load = usb_calc_bus_time (
444                                         udev->speed, !is_out,
445                                         type == PIPE_ISOCHRONOUS,
446                                         usb_maxpacket (udev, pipe, is_out))
447                                                 / 1000;
448                         }
449                 }
450                 ed->hwINFO = info;
451         }
452
453 done:
454         spin_unlock_irqrestore (&ohci->lock, flags);
455         return ed; 
456 }
457
458 /*-------------------------------------------------------------------------*/
459
460 /* request unlinking of an endpoint from an operational HC.
461  * put the ep on the rm_list
462  * real work is done at the next start frame (SF) hardware interrupt
463  * caller guarantees HCD is running, so hardware access is safe,
464  * and that ed->state is ED_OPER
465  */
466 static void start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
467 {    
468         ed->hwINFO |= ED_DEQUEUE;
469         ed_deschedule (ohci, ed);
470
471         /* rm_list is just singly linked, for simplicity */
472         ed->ed_next = ohci->ed_rm_list;
473         ed->ed_prev = 0;
474         ohci->ed_rm_list = ed;
475
476         /* enable SOF interrupt */
477         writel (OHCI_INTR_SF, &ohci->regs->intrstatus);
478         writel (OHCI_INTR_SF, &ohci->regs->intrenable);
479         // flush those writes, and get latest HCCA contents
480         (void) readl (&ohci->regs->control);
481
482         /* SF interrupt might get delayed; record the frame counter value that
483          * indicates when the HC isn't looking at it, so concurrent unlinks
484          * behave.  frame_no wraps every 2^16 msec, and changes right before
485          * SF is triggered.
486          */
487         ed->tick = OHCI_FRAME_NO(ohci->hcca) + 1;
488
489 }
490
491 /*-------------------------------------------------------------------------*
492  * TD handling functions
493  *-------------------------------------------------------------------------*/
494
495 /* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
496
497 static void
498 td_fill (struct ohci_hcd *ohci, u32 info,
499         dma_addr_t data, int len,
500         struct urb *urb, int index)
501 {
502         struct td               *td, *td_pt;
503         struct urb_priv         *urb_priv = urb->hcpriv;
504         int                     is_iso = info & TD_ISO;
505         int                     hash;
506
507         // ASSERT (index < urb_priv->length);
508
509         /* aim for only one interrupt per urb.  mostly applies to control
510          * and iso; other urbs rarely need more than one TD per urb.
511          * this way, only final tds (or ones with an error) cause IRQs.
512          * at least immediately; use DI=6 in case any control request is
513          * tempted to die part way through.  (and to force the hc to flush
514          * its donelist soonish, even on unlink paths.)
515          *
516          * NOTE: could delay interrupts even for the last TD, and get fewer
517          * interrupts ... increasing per-urb latency by sharing interrupts.
518          * Drivers that queue bulk urbs may request that behavior.
519          */
520         if (index != (urb_priv->length - 1)
521                         || (urb->transfer_flags & URB_NO_INTERRUPT))
522                 info |= TD_DI_SET (6);
523
524         /* use this td as the next dummy */
525         td_pt = urb_priv->td [index];
526
527         /* fill the old dummy TD */
528         td = urb_priv->td [index] = urb_priv->ed->dummy;
529         urb_priv->ed->dummy = td_pt;
530
531         td->ed = urb_priv->ed;
532         td->next_dl_td = NULL;
533         td->index = index;
534         td->urb = urb; 
535         td->data_dma = data;
536         if (!len)
537                 data = 0;
538
539         td->hwINFO = cpu_to_le32 (info);
540         if (is_iso) {
541                 td->hwCBP = cpu_to_le32 (data & 0xFFFFF000);
542                 td->hwPSW [0] = cpu_to_le16 ((data & 0x0FFF) | 0xE000);
543                 td->ed->last_iso = info & 0xffff;
544         } else {
545                 td->hwCBP = cpu_to_le32 (data); 
546         }                       
547         if (data)
548                 td->hwBE = cpu_to_le32 (data + len - 1);
549         else
550                 td->hwBE = 0;
551         td->hwNextTD = cpu_to_le32 (td_pt->td_dma);
552
553         /* append to queue */
554         list_add_tail (&td->td_list, &td->ed->td_list);
555
556         /* hash it for later reverse mapping */
557         hash = TD_HASH_FUNC (td->td_dma);
558         td->td_hash = ohci->td_hash [hash];
559         ohci->td_hash [hash] = td;
560
561         /* HC might read the TD (or cachelines) right away ... */
562         wmb ();
563         td->ed->hwTailP = td->hwNextTD;
564 }
565
566 /*-------------------------------------------------------------------------*/
567
568 /* Prepare all TDs of a transfer, and queue them onto the ED.
569  * Caller guarantees HC is active.
570  * Usually the ED is already on the schedule, so TDs might be
571  * processed as soon as they're queued.
572  */
573 static void td_submit_urb (
574         struct ohci_hcd *ohci,
575         struct urb      *urb
576 ) {
577         struct urb_priv *urb_priv = urb->hcpriv;
578         dma_addr_t      data;
579         int             data_len = urb->transfer_buffer_length;
580         int             cnt = 0;
581         u32             info = 0;
582         int             is_out = usb_pipeout (urb->pipe);
583         int             periodic = 0;
584
585         /* OHCI handles the bulk/interrupt data toggles itself.  We just
586          * use the device toggle bits for resetting, and rely on the fact
587          * that resetting toggle is meaningless if the endpoint is active.
588          */
589         if (!usb_gettoggle (urb->dev, usb_pipeendpoint (urb->pipe), is_out)) {
590                 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe),
591                         is_out, 1);
592                 urb_priv->ed->hwHeadP &= ~ED_C;
593         }
594
595         urb_priv->td_cnt = 0;
596
597         if (data_len)
598                 data = urb->transfer_dma;
599         else
600                 data = 0;
601
602         /* NOTE:  TD_CC is set so we can tell which TDs the HC processed by
603          * using TD_CC_GET, as well as by seeing them on the done list.
604          * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
605          */
606         switch (urb_priv->ed->type) {
607
608         /* Bulk and interrupt are identical except for where in the schedule
609          * their EDs live.
610          */
611         case PIPE_INTERRUPT:
612                 /* ... and periodic urbs have extra accounting */
613                 periodic = hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs++ == 0
614                         && hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs == 0;
615                 /* FALLTHROUGH */
616         case PIPE_BULK:
617                 info = is_out
618                         ? TD_T_TOGGLE | TD_CC | TD_DP_OUT
619                         : TD_T_TOGGLE | TD_CC | TD_DP_IN;
620                 /* TDs _could_ transfer up to 8K each */
621                 while (data_len > 4096) {
622                         td_fill (ohci, info, data, 4096, urb, cnt);
623                         data += 4096;
624                         data_len -= 4096;
625                         cnt++;
626                 }
627                 /* maybe avoid ED halt on final TD short read */
628                 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
629                         info |= TD_R;
630                 td_fill (ohci, info, data, data_len, urb, cnt);
631                 cnt++;
632                 if ((urb->transfer_flags & URB_ZERO_PACKET)
633                                 && cnt < urb_priv->length) {
634                         td_fill (ohci, info, 0, 0, urb, cnt);
635                         cnt++;
636                 }
637                 /* maybe kickstart bulk list */
638                 if (urb_priv->ed->type == PIPE_BULK) {
639                         wmb ();
640                         writel (OHCI_BLF, &ohci->regs->cmdstatus);
641                 }
642                 break;
643
644         /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
645          * any DATA phase works normally, and the STATUS ack is special.
646          */
647         case PIPE_CONTROL:
648                 info = TD_CC | TD_DP_SETUP | TD_T_DATA0;
649                 td_fill (ohci, info, urb->setup_dma, 8, urb, cnt++);
650                 if (data_len > 0) {
651                         info = TD_CC | TD_R | TD_T_DATA1;
652                         info |= is_out ? TD_DP_OUT : TD_DP_IN;
653                         /* NOTE:  mishandles transfers >8K, some >4K */
654                         td_fill (ohci, info, data, data_len, urb, cnt++);
655                 }
656                 info = is_out
657                         ? TD_CC | TD_DP_IN | TD_T_DATA1
658                         : TD_CC | TD_DP_OUT | TD_T_DATA1;
659                 td_fill (ohci, info, data, 0, urb, cnt++);
660                 /* maybe kickstart control list */
661                 wmb ();
662                 writel (OHCI_CLF, &ohci->regs->cmdstatus);
663                 break;
664
665         /* ISO has no retransmit, so no toggle; and it uses special TDs.
666          * Each TD could handle multiple consecutive frames (interval 1);
667          * we could often reduce the number of TDs here.
668          */
669         case PIPE_ISOCHRONOUS:
670                 for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
671                         int     frame = urb->start_frame;
672
673                         // FIXME scheduling should handle frame counter
674                         // roll-around ... exotic case (and OHCI has
675                         // a 2^16 iso range, vs other HCs max of 2^10)
676                         frame += cnt * urb->interval;
677                         frame &= 0xffff;
678                         td_fill (ohci, TD_CC | TD_ISO | frame,
679                                 data + urb->iso_frame_desc [cnt].offset,
680                                 urb->iso_frame_desc [cnt].length, urb, cnt);
681                 }
682                 periodic = hcd_to_bus (&ohci->hcd)->bandwidth_isoc_reqs++ == 0
683                         && hcd_to_bus (&ohci->hcd)->bandwidth_int_reqs == 0;
684                 break;
685         }
686
687         /* start periodic dma if needed */
688         if (periodic) {
689                 wmb ();
690                 ohci->hc_control |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
691                 writel (ohci->hc_control, &ohci->regs->control);
692         }
693
694         // ASSERT (urb_priv->length == cnt);
695 }
696
697 /*-------------------------------------------------------------------------*
698  * Done List handling functions
699  *-------------------------------------------------------------------------*/
700
701 /* calculate transfer length/status and update the urb
702  * PRECONDITION:  irqsafe (only for urb->status locking)
703  */
704 static void td_done (struct ohci_hcd *ohci, struct urb *urb, struct td *td)
705 {
706         u32     tdINFO = le32_to_cpup (&td->hwINFO);
707         int     cc = 0;
708
709         list_del (&td->td_list);
710
711         /* ISO ... drivers see per-TD length/status */
712         if (tdINFO & TD_ISO) {
713                 u16     tdPSW = le16_to_cpu (td->hwPSW [0]);
714                 int     dlen = 0;
715
716                 /* NOTE:  assumes FC in tdINFO == 0 (and MAXPSW == 1) */
717
718                 cc = (tdPSW >> 12) & 0xF;
719                 if (tdINFO & TD_CC)     /* hc didn't touch? */
720                         return;
721
722                 if (usb_pipeout (urb->pipe))
723                         dlen = urb->iso_frame_desc [td->index].length;
724                 else {
725                         /* short reads are always OK for ISO */
726                         if (cc == TD_DATAUNDERRUN)
727                                 cc = TD_CC_NOERROR;
728                         dlen = tdPSW & 0x3ff;
729                 }
730                 urb->actual_length += dlen;
731                 urb->iso_frame_desc [td->index].actual_length = dlen;
732                 urb->iso_frame_desc [td->index].status = cc_to_error [cc];
733
734                 if (cc != TD_CC_NOERROR)
735                         ohci_vdbg (ohci,
736                                 "urb %p iso td %p (%d) len %d cc %d\n",
737                                 urb, td, 1 + td->index, dlen, cc);
738
739         /* BULK, INT, CONTROL ... drivers see aggregate length/status,
740          * except that "setup" bytes aren't counted and "short" transfers
741          * might not be reported as errors.
742          */
743         } else {
744                 int     type = usb_pipetype (urb->pipe);
745                 u32     tdBE = le32_to_cpup (&td->hwBE);
746
747                 cc = TD_CC_GET (tdINFO);
748
749                 /* control endpoints only have soft stalls */
750                 if (type != PIPE_CONTROL && cc == TD_CC_STALL)
751                         usb_endpoint_halt (urb->dev,
752                                 usb_pipeendpoint (urb->pipe),
753                                 usb_pipeout (urb->pipe));
754
755                 /* update packet status if needed (short is normally ok) */
756                 if (cc == TD_DATAUNDERRUN
757                                 && !(urb->transfer_flags & URB_SHORT_NOT_OK))
758                         cc = TD_CC_NOERROR;
759                 if (cc != TD_CC_NOERROR && cc < 0x0E) {
760                         spin_lock (&urb->lock);
761                         if (urb->status == -EINPROGRESS)
762                                 urb->status = cc_to_error [cc];
763                         spin_unlock (&urb->lock);
764                 }
765
766                 /* count all non-empty packets except control SETUP packet */
767                 if ((type != PIPE_CONTROL || td->index != 0) && tdBE != 0) {
768                         if (td->hwCBP == 0)
769                                 urb->actual_length += tdBE - td->data_dma + 1;
770                         else
771                                 urb->actual_length +=
772                                           le32_to_cpup (&td->hwCBP)
773                                         - td->data_dma;
774                 }
775
776                 if (cc != TD_CC_NOERROR && cc < 0x0E)
777                         ohci_vdbg (ohci,
778                                 "urb %p td %p (%d) cc %d, len=%d/%d\n",
779                                 urb, td, 1 + td->index, cc,
780                                 urb->actual_length,
781                                 urb->transfer_buffer_length);
782         }
783 }
784
785 /*-------------------------------------------------------------------------*/
786
787 static inline struct td *
788 ed_halted (struct ohci_hcd *ohci, struct td *td, int cc, struct td *rev)
789 {
790         struct urb              *urb = td->urb;
791         struct ed               *ed = td->ed;
792         struct list_head        *tmp = td->td_list.next;
793         u32                     toggle = ed->hwHeadP & ED_C;
794
795         /* clear ed halt; this is the td that caused it, but keep it inactive
796          * until its urb->complete() has a chance to clean up.
797          */
798         ed->hwINFO |= ED_SKIP;
799         wmb ();
800         ed->hwHeadP &= ~ED_H; 
801
802         /* put any later tds from this urb onto the donelist, after 'td',
803          * order won't matter here: no errors, and nothing was transferred.
804          * also patch the ed so it looks as if those tds completed normally.
805          */
806         while (tmp != &ed->td_list) {
807                 struct td       *next;
808                 u32             info;
809
810                 next = list_entry (tmp, struct td, td_list);
811                 tmp = next->td_list.next;
812
813                 if (next->urb != urb)
814                         break;
815
816                 /* NOTE: if multi-td control DATA segments get supported,
817                  * this urb had one of them, this td wasn't the last td
818                  * in that segment (TD_R clear), this ed halted because
819                  * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
820                  * then we need to leave the control STATUS packet queued
821                  * and clear ED_SKIP.
822                  */
823                 info = next->hwINFO;
824                 info |= cpu_to_le32 (TD_DONE);
825                 info &= ~cpu_to_le32 (TD_CC);
826                 next->hwINFO = info;
827
828                 next->next_dl_td = rev; 
829                 rev = next;
830
831                 ed->hwHeadP = next->hwNextTD | toggle;
832         }
833
834         /* help for troubleshooting:  report anything that
835          * looks odd ... that doesn't include protocol stalls
836          * (or maybe some other things)
837          */
838         switch (cc) {
839         case TD_DATAUNDERRUN:
840                 if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
841                         break;
842                 /* fallthrough */
843         case TD_CC_STALL:
844                 if (usb_pipecontrol (urb->pipe))
845                         break;
846                 /* fallthrough */
847         default:
848                 ohci_dbg (ohci,
849                         "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
850                         urb, urb->dev->devpath,
851                         usb_pipeendpoint (urb->pipe),
852                         usb_pipein (urb->pipe) ? "in" : "out",
853                         le32_to_cpu (td->hwINFO),
854                         cc, cc_to_error [cc]);
855         }
856
857         return rev;
858 }
859
860 /* replies to the request have to be on a FIFO basis so
861  * we unreverse the hc-reversed done-list
862  */
863 static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
864 {
865         u32             td_dma;
866         struct td       *td_rev = NULL;
867         struct td       *td = NULL;
868         unsigned long   flags;
869
870         spin_lock_irqsave (&ohci->lock, flags);
871
872         td_dma = le32_to_cpup (&ohci->hcca->done_head);
873         ohci->hcca->done_head = 0;
874         wmb();
875
876         /* get TD from hc's singly linked list, and
877          * prepend to ours.  ed->td_list changes later.
878          */
879         while (td_dma) {                
880                 int             cc;
881
882                 td = dma_to_td (ohci, td_dma);
883                 if (!td) {
884                         ohci_err (ohci, "bad entry %8x\n", td_dma);
885                         break;
886                 }
887
888                 td->hwINFO |= cpu_to_le32 (TD_DONE);
889                 cc = TD_CC_GET (le32_to_cpup (&td->hwINFO));
890
891                 /* Non-iso endpoints can halt on error; un-halt,
892                  * and dequeue any other TDs from this urb.
893                  * No other TD could have caused the halt.
894                  */
895                 if (cc != TD_CC_NOERROR && (td->ed->hwHeadP & ED_H))
896                         td_rev = ed_halted (ohci, td, cc, td_rev);
897
898                 td->next_dl_td = td_rev;        
899                 td_rev = td;
900                 td_dma = le32_to_cpup (&td->hwNextTD);
901         }       
902         spin_unlock_irqrestore (&ohci->lock, flags);
903         return td_rev;
904 }
905
906 /*-------------------------------------------------------------------------*/
907
908 /* wrap-aware logic stolen from <linux/jiffies.h> */
909 #define tick_before(t1,t2) ((((s16)(t1))-((s16)(t2))) < 0)
910
911 /* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
912 static void
913 finish_unlinks (struct ohci_hcd *ohci, u16 tick, struct pt_regs *regs)
914 {
915         struct ed       *ed, **last;
916
917 rescan_all:
918         for (last = &ohci->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
919                 struct list_head        *entry, *tmp;
920                 int                     completed, modified;
921                 u32                     *prev;
922
923                 /* only take off EDs that the HC isn't using, accounting for
924                  * frame counter wraps and EDs with partially retired TDs
925                  */
926                 if (likely (HCD_IS_RUNNING(ohci->hcd.state))) {
927                         if (tick_before (tick, ed->tick)) {
928 skip_ed:
929                                 last = &ed->ed_next;
930                                 continue;
931                         }
932
933                         if (!list_empty (&ed->td_list)) {
934                                 struct td       *td;
935                                 u32             head;
936
937                                 td = list_entry (ed->td_list.next, struct td,
938                                                         td_list);
939                                 head = cpu_to_le32 (ed->hwHeadP) & TD_MASK;
940
941                                 /* INTR_WDH may need to clean up first */
942                                 if (td->td_dma != head)
943                                         goto skip_ed;
944                         }
945                 }
946
947                 /* reentrancy:  if we drop the schedule lock, someone might
948                  * have modified this list.  normally it's just prepending
949                  * entries (which we'd ignore), but paranoia won't hurt.
950                  */
951                 *last = ed->ed_next;
952                 ed->ed_next = 0;
953                 modified = 0;
954
955                 /* unlink urbs as requested, but rescan the list after
956                  * we call a completion since it might have unlinked
957                  * another (earlier) urb
958                  *
959                  * When we get here, the HC doesn't see this ed.  But it
960                  * must not be rescheduled until all completed URBs have
961                  * been given back to the driver.
962                  */
963 rescan_this:
964                 completed = 0;
965                 prev = &ed->hwHeadP;
966                 list_for_each_safe (entry, tmp, &ed->td_list) {
967                         struct td       *td;
968                         struct urb      *urb;
969                         urb_priv_t      *urb_priv;
970                         u32             savebits;
971
972                         td = list_entry (entry, struct td, td_list);
973                         urb = td->urb;
974                         urb_priv = td->urb->hcpriv;
975
976                         if (urb->status == -EINPROGRESS) {
977                                 prev = &td->hwNextTD;
978                                 continue;
979                         }
980
981                         /* patch pointer hc uses */
982                         savebits = *prev & ~cpu_to_le32 (TD_MASK);
983                         *prev = td->hwNextTD | savebits;
984
985                         /* HC may have partly processed this TD */
986                         td_done (ohci, urb, td);
987                         urb_priv->td_cnt++;
988
989                         /* if URB is done, clean up */
990                         if (urb_priv->td_cnt == urb_priv->length) {
991                                 modified = completed = 1;
992                                 finish_urb (ohci, urb, regs);
993                         }
994                 }
995                 if (completed && !list_empty (&ed->td_list))
996                         goto rescan_this;
997
998                 /* ED's now officially unlinked, hc doesn't see */
999                 ed->state = ED_IDLE;
1000                 ed->hwHeadP &= ~ED_H;
1001                 ed->hwNextED = 0;
1002                 wmb ();
1003                 ed->hwINFO &= ~(ED_SKIP | ED_DEQUEUE);
1004
1005                 /* but if there's work queued, reschedule */
1006                 if (!list_empty (&ed->td_list)) {
1007                         if (HCD_IS_RUNNING(ohci->hcd.state))
1008                                 ed_schedule (ohci, ed);
1009                 }
1010
1011                 if (modified)
1012                         goto rescan_all;
1013         }
1014
1015         /* maybe reenable control and bulk lists */ 
1016         if (HCD_IS_RUNNING(ohci->hcd.state) && !ohci->ed_rm_list) {
1017                 u32     command = 0, control = 0;
1018
1019                 if (ohci->ed_controltail) {
1020                         command |= OHCI_CLF;
1021                         if (!(ohci->hc_control & OHCI_CTRL_CLE)) {
1022                                 control |= OHCI_CTRL_CLE;
1023                                 writel (0, &ohci->regs->ed_controlcurrent);
1024                         }
1025                 }
1026                 if (ohci->ed_bulktail) {
1027                         command |= OHCI_BLF;
1028                         if (!(ohci->hc_control & OHCI_CTRL_BLE)) {
1029                                 control |= OHCI_CTRL_BLE;
1030                                 writel (0, &ohci->regs->ed_bulkcurrent);
1031                         }
1032                 }
1033                 
1034                 /* CLE/BLE to enable, CLF/BLF to (maybe) kickstart */
1035                 if (control) {
1036                         ohci->hc_control |= control;
1037                         writel (ohci->hc_control, &ohci->regs->control);   
1038                 }
1039                 if (command)
1040                         writel (command, &ohci->regs->cmdstatus);   
1041         }
1042 }
1043
1044
1045
1046 /*-------------------------------------------------------------------------*/
1047
1048 /*
1049  * Process normal completions (error or success) and clean the schedules.
1050  *
1051  * This is the main path for handing urbs back to drivers.  The only other
1052  * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of
1053  * scanning the (re-reversed) donelist as this does.
1054  */
1055 static void
1056 dl_done_list (struct ohci_hcd *ohci, struct td *td, struct pt_regs *regs)
1057 {
1058         unsigned long   flags;
1059
1060         spin_lock_irqsave (&ohci->lock, flags);
1061         while (td) {
1062                 struct td       *td_next = td->next_dl_td;
1063                 struct urb      *urb = td->urb;
1064                 urb_priv_t      *urb_priv = urb->hcpriv;
1065                 struct ed       *ed = td->ed;
1066
1067                 /* update URB's length and status from TD */
1068                 td_done (ohci, urb, td);
1069                 urb_priv->td_cnt++;
1070
1071                 /* If all this urb's TDs are done, call complete() */
1072                 if (urb_priv->td_cnt == urb_priv->length)
1073                         finish_urb (ohci, urb, regs);
1074
1075                 /* clean schedule:  unlink EDs that are no longer busy */
1076                 if (list_empty (&ed->td_list)) {
1077                         if (ed->state == ED_OPER)
1078                                 start_ed_unlink (ohci, ed);
1079
1080                 /* ... reenabling halted EDs only after fault cleanup */
1081                 } else if ((ed->hwINFO & (ED_SKIP | ED_DEQUEUE)) == ED_SKIP) {
1082                         td = list_entry (ed->td_list.next, struct td, td_list);
1083                         if (!(td->hwINFO & TD_DONE)) {
1084                                 ed->hwINFO &= ~ED_SKIP;
1085                                 /* ... hc may need waking-up */
1086                                 switch (ed->type) {
1087                                 case PIPE_CONTROL:
1088                                         writel (OHCI_CLF,
1089                                                 &ohci->regs->cmdstatus);   
1090                                         break;
1091                                 case PIPE_BULK:
1092                                         writel (OHCI_BLF,
1093                                                 &ohci->regs->cmdstatus);   
1094                                         break;
1095                                 }
1096                         }
1097                 }
1098
1099                 td = td_next;
1100         }  
1101         spin_unlock_irqrestore (&ohci->lock, flags);
1102 }