2 * Copyright (c) 2001-2003 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /* this file is part of ehci-hcd.c */
22 /*-------------------------------------------------------------------------*/
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
37 static int ehci_get_frame (struct usb_hcd *hcd);
39 /*-------------------------------------------------------------------------*/
42 * periodic_next_shadow - return "next" pointer on shadow list
43 * @periodic: host pointer to qh/itd/sitd
44 * @tag: hardware tag for type of this record
46 static union ehci_shadow *
47 periodic_next_shadow (union ehci_shadow *periodic, int tag)
51 return &periodic->qh->qh_next;
53 return &periodic->fstn->fstn_next;
55 return &periodic->itd->itd_next;
58 return &periodic->sitd->sitd_next;
62 /* returns true after successful unlink */
63 /* caller must hold ehci->lock */
64 static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
66 union ehci_shadow *prev_p = &ehci->pshadow [frame];
67 u32 *hw_p = &ehci->periodic [frame];
68 union ehci_shadow here = *prev_p;
69 union ehci_shadow *next_p;
71 /* find predecessor of "ptr"; hw and shadow lists are in sync */
72 while (here.ptr && here.ptr != ptr) {
73 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
74 hw_p = &here.qh->hw_next;
77 /* an interrupt entry (at list end) could have been shared */
79 dbg ("entry %p no longer on frame [%d]", ptr, frame);
82 // vdbg ("periodic unlink %p from frame %d", ptr, frame);
84 /* update hardware list ... HC may still know the old structure, so
85 * don't change hw_next until it'll have purged its cache
87 next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
88 *hw_p = here.qh->hw_next;
90 /* unlink from shadow list; HCD won't see old structure again */
97 /* how many of the uframe's 125 usecs are allocated? */
99 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
101 u32 *hw_p = &ehci->periodic [frame];
102 union ehci_shadow *q = &ehci->pshadow [frame];
106 switch (Q_NEXT_TYPE (*hw_p)) {
108 /* is it in the S-mask? */
109 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
110 usecs += q->qh->usecs;
112 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
113 usecs += q->qh->c_usecs;
114 hw_p = &q->qh->hw_next;
118 /* for "save place" FSTNs, count the relevant INTR
119 * bandwidth from the previous frame
121 if (q->fstn->hw_prev != EHCI_LIST_END) {
122 ehci_dbg (ehci, "ignoring FSTN cost ...\n");
124 hw_p = &q->fstn->hw_next;
125 q = &q->fstn->fstn_next;
128 usecs += q->itd->usecs [uframe];
129 hw_p = &q->itd->hw_next;
130 q = &q->itd->itd_next;
133 /* is it in the S-mask? (count SPLIT, DATA) */
134 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
135 if (q->sitd->hw_fullspeed_ep &
136 __constant_cpu_to_le32 (1<<31))
137 usecs += q->sitd->stream->usecs;
138 else /* worst case for OUT start-split */
139 usecs += HS_USECS_ISO (188);
142 /* ... C-mask? (count CSPLIT, DATA) */
143 if (q->sitd->hw_uframe &
144 cpu_to_le32 (1 << (8 + uframe))) {
145 /* worst case for IN complete-split */
146 usecs += q->sitd->stream->c_usecs;
149 hw_p = &q->sitd->hw_next;
150 q = &q->sitd->sitd_next;
158 err ("overallocated uframe %d, periodic is %d usecs",
159 frame * 8 + uframe, usecs);
164 /*-------------------------------------------------------------------------*/
166 static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
168 if (!dev1->tt || !dev2->tt)
170 if (dev1->tt != dev2->tt)
173 return dev1->ttport == dev2->ttport;
178 /* return true iff the device's transaction translator is available
179 * for a periodic transfer starting at the specified frame, using
180 * all the uframes in the mask.
182 static int tt_no_collision (
183 struct ehci_hcd *ehci,
185 struct usb_device *dev,
190 if (period == 0) /* error */
193 /* note bandwidth wastage: split never follows csplit
194 * (different dev or endpoint) until the next uframe.
195 * calling convention doesn't make that distinction.
197 for (; frame < ehci->periodic_size; frame += period) {
198 union ehci_shadow here;
201 here = ehci->pshadow [frame];
202 type = Q_NEXT_TYPE (ehci->periodic [frame]);
206 type = Q_NEXT_TYPE (here.itd->hw_next);
207 here = here.itd->itd_next;
210 if (same_tt (dev, here.qh->dev)) {
213 mask = le32_to_cpu (here.qh->hw_info2);
214 /* "knows" no gap is needed */
219 type = Q_NEXT_TYPE (here.qh->hw_next);
220 here = here.qh->qh_next;
223 if (same_tt (dev, here.itd->urb->dev)) {
226 mask = le32_to_cpu (here.sitd
228 /* FIXME assumes no gap for IN! */
233 type = Q_NEXT_TYPE (here.qh->hw_next);
234 here = here.sitd->sitd_next;
239 "periodic frame %d bogus type %d\n",
243 /* collision or error */
252 /*-------------------------------------------------------------------------*/
254 static int enable_periodic (struct ehci_hcd *ehci)
259 /* did clearing PSE did take effect yet?
260 * takes effect only at frame boundaries...
262 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
264 ehci->hcd.state = USB_STATE_HALT;
268 cmd = readl (&ehci->regs->command) | CMD_PSE;
269 writel (cmd, &ehci->regs->command);
270 /* posted write ... PSS happens later */
271 ehci->hcd.state = USB_STATE_RUNNING;
273 /* make sure ehci_work scans these */
274 ehci->next_uframe = readl (&ehci->regs->frame_index)
275 % (ehci->periodic_size << 3);
279 static int disable_periodic (struct ehci_hcd *ehci)
284 /* did setting PSE not take effect yet?
285 * takes effect only at frame boundaries...
287 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
289 ehci->hcd.state = USB_STATE_HALT;
293 cmd = readl (&ehci->regs->command) & ~CMD_PSE;
294 writel (cmd, &ehci->regs->command);
295 /* posted write ... */
297 ehci->next_uframe = -1;
301 /*-------------------------------------------------------------------------*/
303 // FIXME microframe periods not yet handled
305 static void intr_deschedule (
306 struct ehci_hcd *ehci,
311 unsigned frame = qh->start;
314 periodic_unlink (ehci, frame, qh);
317 } while (frame < ehci->periodic_size);
319 qh->qh_state = QH_STATE_UNLINK;
321 ehci->periodic_sched--;
323 /* maybe turn off periodic schedule */
324 if (!ehci->periodic_sched)
325 status = disable_periodic (ehci);
328 vdbg ("periodic schedule still enabled");
332 * If the hc may be looking at this qh, then delay a uframe
333 * (yeech!) to be sure it's done.
334 * No other threads may be mucking with this qh.
336 if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
339 qh->hw_next = EHCI_LIST_END;
341 /* we may not be IDLE yet, but if the qh is empty
342 * the race is very short. then if qh also isn't
343 * rescheduled soon, it won't matter. otherwise...
345 vdbg ("intr_deschedule...");
348 qh->hw_next = EHCI_LIST_END;
350 qh->qh_state = QH_STATE_IDLE;
352 /* update per-qh bandwidth utilization (for usbfs) */
353 hcd_to_bus (&ehci->hcd)->bandwidth_allocated -=
354 (qh->usecs + qh->c_usecs) / qh->period;
356 dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d",
357 qh, qh->period, frame,
358 atomic_read (&qh->refcount), ehci->periodic_sched);
361 static int check_period (
362 struct ehci_hcd *ehci,
368 /* complete split running into next frame?
369 * given FSTN support, we could sometimes check...
375 * 80% periodic == 100 usec/uframe available
376 * convert "usecs we need" to "max already claimed"
383 // FIXME delete when intr_submit handles non-empty queues
384 // this gives us a one intr/frame limit (vs N/uframe)
385 // ... and also lets us avoid tracking split transactions
386 // that might collide at a given TT/hub.
387 if (ehci->pshadow [frame].ptr)
390 claimed = periodic_usecs (ehci, frame, uframe);
394 // FIXME update to handle sub-frame periods
395 } while ((frame += period) < ehci->periodic_size);
401 static int check_intr_schedule (
402 struct ehci_hcd *ehci,
405 const struct ehci_qh *qh,
409 int retval = -ENOSPC;
411 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
415 *c_maskp = cpu_to_le32 (0);
419 /* This is a split transaction; check the bandwidth available for
420 * the completion too. Check both worst and best case gaps: worst
421 * case is SPLIT near uframe end, and CSPLIT near start ... best is
422 * vice versa. Difference can be almost two uframe times, but we
423 * reserve unnecessary bandwidth (waste it) this way. (Actually
424 * even better cases exist, like immediate device NAK.)
426 * FIXME don't even bother unless we know this TT is idle in that
427 * range of uframes ... for now, check_period() allows only one
428 * interrupt transfer per frame, so needn't check "TT busy" status
429 * when scheduling a split (QH, SITD, or FSTN).
431 * FIXME ehci 0.96 and above can use FSTNs
433 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
434 qh->period, qh->c_usecs))
436 if (!check_period (ehci, frame, uframe + qh->gap_uf,
437 qh->period, qh->c_usecs))
440 *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
446 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
451 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
453 qh->hw_next = EHCI_LIST_END;
456 /* reuse the previous schedule slots, if we can */
457 if (frame < qh->period) {
458 uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
459 status = check_intr_schedule (ehci, frame, --uframe,
467 /* else scan the schedule to find a group of slots such that all
468 * uframes have enough periodic bandwidth available.
471 frame = qh->period - 1;
473 for (uframe = 0; uframe < 8; uframe++) {
474 status = check_intr_schedule (ehci,
480 } while (status && frame--);
485 /* reset S-frame and (maybe) C-frame masks */
486 qh->hw_info2 &= ~0xffff;
487 qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
489 dbg ("reused previous qh %p schedule", qh);
491 /* stuff into the periodic schedule */
492 qh->qh_state = QH_STATE_LINKED;
493 dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
494 qh, qh->usecs, qh->c_usecs,
495 qh->period, frame, uframe, qh->gap_uf);
497 if (unlikely (ehci->pshadow [frame].ptr != 0)) {
499 // FIXME -- just link toward the end, before any qh with a shorter period,
500 // AND accommodate it already having been linked here (after some other qh)
501 // AS WELL AS updating the schedule checking logic
505 ehci->pshadow [frame].qh = qh_get (qh);
506 ehci->periodic [frame] =
507 QH_NEXT (qh->qh_dma);
511 } while (frame < ehci->periodic_size);
513 /* update per-qh bandwidth for usbfs */
514 hcd_to_bus (&ehci->hcd)->bandwidth_allocated +=
515 (qh->usecs + qh->c_usecs) / qh->period;
517 /* maybe enable periodic schedule processing */
518 if (!ehci->periodic_sched++)
519 status = enable_periodic (ehci);
524 static int intr_submit (
525 struct ehci_hcd *ehci,
527 struct list_head *qtd_list,
536 struct list_head empty;
538 /* get endpoint and transfer/schedule data */
539 epnum = usb_pipeendpoint (urb->pipe);
540 is_input = usb_pipein (urb->pipe);
544 spin_lock_irqsave (&ehci->lock, flags);
545 dev = (struct hcd_dev *)urb->dev->hcpriv;
547 /* get qh and force any scheduling errors */
548 INIT_LIST_HEAD (&empty);
549 qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
554 if (qh->qh_state == QH_STATE_IDLE) {
555 if ((status = qh_schedule (ehci, qh)) != 0)
559 /* then queue the urb's tds to the qh */
560 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
563 /* ... update usbfs periodic stats */
564 hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;
567 spin_unlock_irqrestore (&ehci->lock, flags);
569 qtd_list_free (ehci, urb, qtd_list);
574 /*-------------------------------------------------------------------------*/
576 /* ehci_iso_stream ops work with both ITD and SITD */
578 static struct ehci_iso_stream *
579 iso_stream_alloc (int mem_flags)
581 struct ehci_iso_stream *stream;
583 stream = kmalloc(sizeof *stream, mem_flags);
584 if (likely (stream != 0)) {
585 memset (stream, 0, sizeof(*stream));
586 INIT_LIST_HEAD(&stream->td_list);
587 INIT_LIST_HEAD(&stream->free_list);
588 stream->next_uframe = -1;
589 stream->refcount = 1;
596 struct ehci_iso_stream *stream,
597 struct usb_device *dev,
602 static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
605 unsigned epnum, maxp;
610 * this might be a "high bandwidth" highspeed endpoint,
611 * as encoded in the ep descriptor's wMaxPacket field
613 epnum = usb_pipeendpoint (pipe);
614 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
616 maxp = dev->epmaxpacketin [epnum];
619 maxp = dev->epmaxpacketout [epnum];
623 /* knows about ITD vs SITD */
624 if (dev->speed == USB_SPEED_HIGH) {
625 unsigned multi = hb_mult(maxp);
627 stream->highspeed = 1;
629 maxp = max_packet(maxp);
633 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);
634 stream->buf1 = cpu_to_le32 (buf1);
635 stream->buf2 = cpu_to_le32 (multi);
637 /* usbfs wants to report the average usecs per frame tied up
638 * when transfers on this endpoint are scheduled ...
640 stream->usecs = HS_USECS_ISO (maxp);
641 bandwidth = stream->usecs * 8;
642 bandwidth /= 1 << (interval - 1);
647 addr = dev->ttport << 24;
648 addr |= dev->tt->hub->devnum << 16;
651 stream->usecs = HS_USECS_ISO (maxp);
656 stream->c_usecs = stream->usecs;
657 stream->usecs = HS_USECS_ISO (1);
658 stream->raw_mask = 1;
660 /* pessimistic c-mask */
661 tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp)
663 stream->raw_mask |= 3 << (tmp + 9);
665 stream->raw_mask = smask_out [maxp / 188];
666 bandwidth = stream->usecs + stream->c_usecs;
667 bandwidth /= 1 << (interval + 2);
669 /* stream->splits gets created from raw_mask later */
670 stream->address = cpu_to_le32 (addr);
672 stream->bandwidth = bandwidth;
676 stream->bEndpointAddress = is_input | epnum;
677 stream->interval = interval;
682 iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
686 /* free whenever just a dev->ep reference remains.
687 * not like a QH -- no persistent state (toggle, halt)
689 if (stream->refcount == 1) {
691 struct hcd_dev *dev = stream->udev->hcpriv;
693 // BUG_ON (!list_empty(&stream->td_list));
695 while (!list_empty (&stream->free_list)) {
696 struct list_head *entry;
698 entry = stream->free_list.next;
701 /* knows about ITD vs SITD */
702 if (stream->highspeed) {
703 struct ehci_itd *itd;
705 itd = list_entry (entry, struct ehci_itd,
707 dma_pool_free (ehci->itd_pool, itd,
710 struct ehci_sitd *sitd;
712 sitd = list_entry (entry, struct ehci_sitd,
714 dma_pool_free (ehci->sitd_pool, sitd,
719 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
720 stream->bEndpointAddress &= 0x0f;
721 dev->ep [is_in + stream->bEndpointAddress] = 0;
723 if (stream->rescheduled) {
724 ehci_info (ehci, "ep%d%s-iso rescheduled "
725 "%lu times in %lu seconds\n",
726 stream->bEndpointAddress, is_in ? "in" : "out",
728 ((jiffies - stream->start)/HZ)
736 static inline struct ehci_iso_stream *
737 iso_stream_get (struct ehci_iso_stream *stream)
739 if (likely (stream != 0))
744 static struct ehci_iso_stream *
745 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
749 struct ehci_iso_stream *stream;
752 epnum = usb_pipeendpoint (urb->pipe);
753 if (usb_pipein(urb->pipe))
756 spin_lock_irqsave (&ehci->lock, flags);
758 dev = (struct hcd_dev *)urb->dev->hcpriv;
759 stream = dev->ep [epnum];
761 if (unlikely (stream == 0)) {
762 stream = iso_stream_alloc(GFP_ATOMIC);
763 if (likely (stream != 0)) {
764 /* dev->ep owns the initial refcount */
765 dev->ep[epnum] = stream;
766 iso_stream_init(stream, urb->dev, urb->pipe,
770 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
771 } else if (unlikely (stream->hw_info1 != 0)) {
772 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
773 urb->dev->devpath, epnum & 0x0f,
774 (epnum & 0x10) ? "in" : "out");
778 /* caller guarantees an eventual matching iso_stream_put */
779 stream = iso_stream_get (stream);
781 spin_unlock_irqrestore (&ehci->lock, flags);
785 /*-------------------------------------------------------------------------*/
787 /* ehci_iso_sched ops can be shared, ITD-only, or SITD-only */
789 static struct ehci_iso_sched *
790 iso_sched_alloc (unsigned packets, int mem_flags)
792 struct ehci_iso_sched *iso_sched;
793 int size = sizeof *iso_sched;
795 size += packets * sizeof (struct ehci_iso_packet);
796 iso_sched = kmalloc (size, mem_flags);
797 if (likely (iso_sched != 0)) {
798 memset(iso_sched, 0, size);
799 INIT_LIST_HEAD (&iso_sched->td_list);
806 struct ehci_iso_sched *iso_sched,
807 struct ehci_iso_stream *stream,
812 dma_addr_t dma = urb->transfer_dma;
814 /* how many uframes are needed for these transfers */
815 iso_sched->span = urb->number_of_packets * stream->interval;
817 /* figure out per-uframe itd fields that we'll need later
818 * when we fit new itds into the schedule.
820 for (i = 0; i < urb->number_of_packets; i++) {
821 struct ehci_iso_packet *uframe = &iso_sched->packet [i];
826 length = urb->iso_frame_desc [i].length;
827 buf = dma + urb->iso_frame_desc [i].offset;
829 trans = EHCI_ISOC_ACTIVE;
830 trans |= buf & 0x0fff;
831 if (unlikely (((i + 1) == urb->number_of_packets))
832 && !(urb->transfer_flags & URB_NO_INTERRUPT))
833 trans |= EHCI_ITD_IOC;
834 trans |= length << 16;
835 uframe->transaction = cpu_to_le32 (trans);
837 /* might need to cross a buffer page within a td */
838 uframe->bufp = (buf & ~(u64)0x0fff);
840 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
847 struct ehci_iso_stream *stream,
848 struct ehci_iso_sched *iso_sched
853 // caller must hold ehci->lock!
854 list_splice (&iso_sched->td_list, &stream->free_list);
859 itd_urb_transaction (
860 struct ehci_iso_stream *stream,
861 struct ehci_hcd *ehci,
866 struct ehci_itd *itd;
870 struct ehci_iso_sched *sched;
873 sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
874 if (unlikely (sched == 0))
877 itd_sched_init (sched, stream, urb);
879 if (urb->interval < 8)
880 num_itds = 1 + (sched->span + 7) / 8;
882 num_itds = urb->number_of_packets;
884 /* allocate/init ITDs */
885 spin_lock_irqsave (&ehci->lock, flags);
886 for (i = 0; i < num_itds; i++) {
888 /* free_list.next might be cache-hot ... but maybe
889 * the HC caches it too. avoid that issue for now.
892 /* prefer previously-allocated itds */
893 if (likely (!list_empty(&stream->free_list))) {
894 itd = list_entry (stream->free_list.prev,
895 struct ehci_itd, itd_list);
896 list_del (&itd->itd_list);
897 itd_dma = itd->itd_dma;
902 spin_unlock_irqrestore (&ehci->lock, flags);
903 itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
905 spin_lock_irqsave (&ehci->lock, flags);
908 if (unlikely (0 == itd)) {
909 iso_sched_free (stream, sched);
912 memset (itd, 0, sizeof *itd);
913 itd->itd_dma = itd_dma;
914 list_add (&itd->itd_list, &sched->td_list);
916 spin_unlock_irqrestore (&ehci->lock, flags);
918 /* temporarily store schedule info in hcpriv */
920 urb->error_count = 0;
924 /*-------------------------------------------------------------------------*/
928 struct ehci_hcd *ehci,
937 /* can't commit more than 80% periodic == 100 usec */
938 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
942 /* we know urb->interval is 2^N uframes */
944 } while (uframe < mod);
950 struct ehci_hcd *ehci,
952 struct ehci_iso_stream *stream,
954 struct ehci_iso_sched *sched,
961 mask = stream->raw_mask << (uframe & 7);
963 /* for IN, don't wrap CSPLIT into the next frame */
967 /* this multi-pass logic is simple, but performance may
968 * suffer when the schedule data isn't cached.
971 /* check bandwidth */
972 uframe %= period_uframes;
979 /* tt must be idle for start(s), any gap, and csplit.
980 * assume scheduling slop leaves 10+% for control/bulk.
982 if (!tt_no_collision (ehci, period_uframes << 3,
983 stream->udev, frame, mask))
986 /* check starts (OUT uses more than one) */
987 max_used = 100 - stream->usecs;
988 for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
989 if (periodic_usecs (ehci, frame, uf) > max_used)
993 /* for IN, check CSPLIT */
994 if (stream->c_usecs) {
995 max_used = 100 - stream->c_usecs;
999 if ((stream->raw_mask & tmp) == 0)
1001 if (periodic_usecs (ehci, frame, uf)
1007 /* we know urb->interval is 2^N uframes */
1008 uframe += period_uframes;
1009 } while (uframe < mod);
1011 stream->splits = stream->raw_mask << (uframe & 7);
1012 cpu_to_le32s (&stream->splits);
1017 * This scheduler plans almost as far into the future as it has actual
1018 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1019 * "as small as possible" to be cache-friendlier.) That limits the size
1020 * transfers you can stream reliably; avoid more than 64 msec per urb.
1021 * Also avoid queue depths of less than ehci's worst irq latency (affected
1022 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1023 * and other factors); or more than about 230 msec total (for portability,
1024 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1027 #define SCHEDULE_SLOP 10 /* frames */
1030 iso_stream_schedule (
1031 struct ehci_hcd *ehci,
1033 struct ehci_iso_stream *stream
1036 u32 now, start, max, period;
1038 unsigned mod = ehci->periodic_size << 3;
1039 struct ehci_iso_sched *sched = urb->hcpriv;
1041 if (sched->span > (mod - 8 * SCHEDULE_SLOP)) {
1042 ehci_dbg (ehci, "iso request %p too long\n", urb);
1047 if ((stream->depth + sched->span) > mod) {
1048 ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
1049 urb, stream->depth, sched->span, mod);
1054 now = readl (&ehci->regs->frame_index) % mod;
1056 /* when's the last uframe this urb could start? */
1059 /* typical case: reuse current schedule. stream is still active,
1060 * and no gaps from host falling behind (irq delays etc)
1062 if (likely (!list_empty (&stream->td_list))) {
1063 start = stream->next_uframe;
1066 if (likely ((start + sched->span) < max))
1068 /* else fell behind; someday, try to reschedule */
1073 /* need to schedule; when's the next (u)frame we could start?
1074 * this is bigger than ehci->i_thresh allows; scheduling itself
1075 * isn't free, the slop should handle reasonably slow cpus. it
1076 * can also help high bandwidth if the dma and irq loads don't
1077 * jump until after the queue is primed.
1079 start = SCHEDULE_SLOP * 8 + (now & ~0x07);
1081 stream->next_uframe = start;
1083 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1085 period = urb->interval;
1086 if (!stream->highspeed)
1089 /* find a uframe slot with enough bandwidth */
1090 for (; start < (stream->next_uframe + period); start++) {
1093 /* check schedule: enough space? */
1094 if (stream->highspeed)
1095 enough_space = itd_slot_ok (ehci, mod, start,
1096 stream->usecs, period);
1098 if ((start % 8) >= 6)
1100 enough_space = sitd_slot_ok (ehci, mod, stream,
1101 start, sched, period);
1104 /* schedule it here if there's enough bandwidth */
1106 stream->next_uframe = start % mod;
1111 /* no room in the schedule */
1112 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
1113 list_empty (&stream->td_list) ? "" : "re",
1118 iso_sched_free (stream, sched);
1123 urb->start_frame = stream->next_uframe;
1127 /*-------------------------------------------------------------------------*/
1130 itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
1134 itd->hw_next = EHCI_LIST_END;
1135 itd->hw_bufp [0] = stream->buf0;
1136 itd->hw_bufp [1] = stream->buf1;
1137 itd->hw_bufp [2] = stream->buf2;
1139 for (i = 0; i < 8; i++)
1142 /* All other fields are filled when scheduling */
1147 struct ehci_itd *itd,
1148 struct ehci_iso_sched *iso_sched,
1154 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1155 unsigned pg = itd->pg;
1157 // BUG_ON (pg == 6 && uf->cross);
1160 itd->index [uframe] = index;
1162 itd->hw_transaction [uframe] = uf->transaction;
1163 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);
1164 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);
1165 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
1167 /* iso_frame_desc[].offset must be strictly increasing */
1168 if (unlikely (!first && uf->cross)) {
1169 u64 bufp = uf->bufp + 4096;
1171 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
1172 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));
1177 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1179 /* always prepend ITD/SITD ... only QH tree is order-sensitive */
1180 itd->itd_next = ehci->pshadow [frame];
1181 itd->hw_next = ehci->periodic [frame];
1182 ehci->pshadow [frame].itd = itd;
1185 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
1188 /* fit urb's itds into the selected schedule slot; activate as needed */
1191 struct ehci_hcd *ehci,
1194 struct ehci_iso_stream *stream
1197 int packet, first = 1;
1198 unsigned next_uframe, uframe, frame;
1199 struct ehci_iso_sched *iso_sched = urb->hcpriv;
1200 struct ehci_itd *itd;
1202 next_uframe = stream->next_uframe % mod;
1204 if (unlikely (list_empty(&stream->td_list))) {
1205 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1206 += stream->bandwidth;
1208 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1209 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1210 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1212 next_uframe >> 3, next_uframe & 0x7);
1213 stream->start = jiffies;
1215 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs++;
1217 /* fill iTDs uframe by uframe */
1218 for (packet = 0, itd = 0; packet < urb->number_of_packets; ) {
1220 /* ASSERT: we have all necessary itds */
1221 // BUG_ON (list_empty (&iso_sched->td_list));
1223 /* ASSERT: no itds for this endpoint in this uframe */
1225 itd = list_entry (iso_sched->td_list.next,
1226 struct ehci_itd, itd_list);
1227 list_move_tail (&itd->itd_list, &stream->td_list);
1228 itd->stream = iso_stream_get (stream);
1229 itd->urb = usb_get_urb (urb);
1231 itd_init (stream, itd);
1234 uframe = next_uframe & 0x07;
1235 frame = next_uframe >> 3;
1237 itd->usecs [uframe] = stream->usecs;
1238 itd_patch (itd, iso_sched, packet, uframe, first);
1241 next_uframe += stream->interval;
1242 stream->depth += stream->interval;
1246 /* link completed itds into the schedule */
1247 if (((next_uframe >> 3) != frame)
1248 || packet == urb->number_of_packets) {
1249 itd_link (ehci, frame % ehci->periodic_size, itd);
1253 stream->next_uframe = next_uframe;
1255 /* don't need that schedule data any more */
1256 iso_sched_free (stream, iso_sched);
1259 timer_action (ehci, TIMER_IO_WATCHDOG);
1260 if (unlikely (!ehci->periodic_sched++))
1261 return enable_periodic (ehci);
1265 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1269 struct ehci_hcd *ehci,
1270 struct ehci_itd *itd,
1271 struct pt_regs *regs
1273 struct urb *urb = itd->urb;
1274 struct usb_iso_packet_descriptor *desc;
1278 struct ehci_iso_stream *stream = itd->stream;
1279 struct usb_device *dev;
1281 /* for each uframe with a packet */
1282 for (uframe = 0; uframe < 8; uframe++) {
1283 if (likely (itd->index[uframe] == -1))
1285 urb_index = itd->index[uframe];
1286 desc = &urb->iso_frame_desc [urb_index];
1288 t = le32_to_cpup (&itd->hw_transaction [uframe]);
1289 itd->hw_transaction [uframe] = 0;
1290 stream->depth -= stream->interval;
1292 /* report transfer status */
1293 if (unlikely (t & ISO_ERRS)) {
1295 if (t & EHCI_ISOC_BUF_ERR)
1296 desc->status = usb_pipein (urb->pipe)
1297 ? -ENOSR /* hc couldn't read */
1298 : -ECOMM; /* hc couldn't write */
1299 else if (t & EHCI_ISOC_BABBLE)
1300 desc->status = -EOVERFLOW;
1301 else /* (t & EHCI_ISOC_XACTERR) */
1302 desc->status = -EPROTO;
1304 /* HC need not update length with this error */
1305 if (!(t & EHCI_ISOC_BABBLE))
1306 desc->actual_length = EHCI_ITD_LENGTH (t);
1307 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1309 desc->actual_length = EHCI_ITD_LENGTH (t);
1316 list_move (&itd->itd_list, &stream->free_list);
1317 iso_stream_put (ehci, stream);
1319 /* handle completion now? */
1320 if (likely ((urb_index + 1) != urb->number_of_packets))
1323 /* ASSERT: it's really the last itd for this urb
1324 list_for_each_entry (itd, &stream->td_list, itd_list)
1325 BUG_ON (itd->urb == urb);
1328 /* give urb back to the driver ... can be out-of-order */
1329 dev = usb_get_dev (urb->dev);
1330 ehci_urb_done (ehci, urb, regs);
1333 /* defer stopping schedule; completion can submit */
1334 ehci->periodic_sched--;
1335 if (unlikely (!ehci->periodic_sched))
1336 (void) disable_periodic (ehci);
1337 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs--;
1339 if (unlikely (list_empty (&stream->td_list))) {
1340 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1341 -= stream->bandwidth;
1343 "deschedule devp %s ep%d%s-iso\n",
1344 dev->devpath, stream->bEndpointAddress & 0x0f,
1345 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1347 iso_stream_put (ehci, stream);
1353 /*-------------------------------------------------------------------------*/
1355 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1357 int status = -EINVAL;
1358 unsigned long flags;
1359 struct ehci_iso_stream *stream;
1361 /* Get iso_stream head */
1362 stream = iso_stream_find (ehci, urb);
1363 if (unlikely (stream == 0)) {
1364 ehci_dbg (ehci, "can't get iso stream\n");
1367 if (unlikely (urb->interval != stream->interval)) {
1368 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1369 stream->interval, urb->interval);
1373 #ifdef EHCI_URB_TRACE
1375 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1376 __FUNCTION__, urb->dev->devpath, urb,
1377 usb_pipeendpoint (urb->pipe),
1378 usb_pipein (urb->pipe) ? "in" : "out",
1379 urb->transfer_buffer_length,
1380 urb->number_of_packets, urb->interval,
1384 /* allocate ITDs w/o locking anything */
1385 status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1386 if (unlikely (status < 0)) {
1387 ehci_dbg (ehci, "can't init itds\n");
1391 /* schedule ... need to lock */
1392 spin_lock_irqsave (&ehci->lock, flags);
1393 status = iso_stream_schedule (ehci, urb, stream);
1394 if (likely (status == 0))
1395 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1396 spin_unlock_irqrestore (&ehci->lock, flags);
1399 if (unlikely (status < 0))
1400 iso_stream_put (ehci, stream);
1404 #ifdef CONFIG_USB_EHCI_SPLIT_ISO
1406 /*-------------------------------------------------------------------------*/
1409 * "Split ISO TDs" ... used for USB 1.1 devices going through the
1410 * TTs in USB 2.0 hubs. These need microframe scheduling.
1415 struct ehci_iso_sched *iso_sched,
1416 struct ehci_iso_stream *stream,
1421 dma_addr_t dma = urb->transfer_dma;
1423 /* how many frames are needed for these transfers */
1424 iso_sched->span = urb->number_of_packets * stream->interval;
1426 /* figure out per-frame sitd fields that we'll need later
1427 * when we fit new sitds into the schedule.
1429 for (i = 0; i < urb->number_of_packets; i++) {
1430 struct ehci_iso_packet *packet = &iso_sched->packet [i];
1435 length = urb->iso_frame_desc [i].length & 0x03ff;
1436 buf = dma + urb->iso_frame_desc [i].offset;
1438 trans = SITD_STS_ACTIVE;
1439 if (((i + 1) == urb->number_of_packets)
1440 && !(urb->transfer_flags & URB_NO_INTERRUPT))
1442 trans |= length << 16;
1443 packet->transaction = cpu_to_le32 (trans);
1445 /* might need to cross a buffer page within a td */
1448 packet->buf1 = buf & ~0x0fff;
1449 if (packet->buf1 != (buf & ~(u64)0x0fff))
1452 /* OUT uses multiple start-splits */
1453 if (stream->bEndpointAddress & USB_DIR_IN)
1455 length = 1 + (length / 188);
1456 packet->buf1 |= length;
1457 if (length > 1) /* BEGIN vs ALL */
1458 packet->buf1 |= 1 << 3;
1463 sitd_urb_transaction (
1464 struct ehci_iso_stream *stream,
1465 struct ehci_hcd *ehci,
1470 struct ehci_sitd *sitd;
1471 dma_addr_t sitd_dma;
1473 struct ehci_iso_sched *iso_sched;
1474 unsigned long flags;
1476 iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1480 sitd_sched_init (iso_sched, stream, urb);
1482 /* allocate/init sITDs */
1483 spin_lock_irqsave (&ehci->lock, flags);
1484 for (i = 0; i < urb->number_of_packets; i++) {
1486 /* NOTE: for now, we don't try to handle wraparound cases
1487 * for IN (using sitd->hw_backpointer, like a FSTN), which
1488 * means we never need two sitds for full speed packets.
1491 /* free_list.next might be cache-hot ... but maybe
1492 * the HC caches it too. avoid that issue for now.
1495 /* prefer previously-allocated sitds */
1496 if (!list_empty(&stream->free_list)) {
1497 sitd = list_entry (stream->free_list.prev,
1498 struct ehci_sitd, sitd_list);
1499 list_del (&sitd->sitd_list);
1500 sitd_dma = sitd->sitd_dma;
1505 spin_unlock_irqrestore (&ehci->lock, flags);
1506 sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1508 spin_lock_irqsave (&ehci->lock, flags);
1512 iso_sched_free (stream, iso_sched);
1513 spin_unlock_irqrestore (&ehci->lock, flags);
1516 memset (sitd, 0, sizeof *sitd);
1517 sitd->sitd_dma = sitd_dma;
1518 list_add (&sitd->sitd_list, &iso_sched->td_list);
1521 /* temporarily store schedule info in hcpriv */
1522 urb->hcpriv = iso_sched;
1523 urb->error_count = 0;
1525 spin_unlock_irqrestore (&ehci->lock, flags);
1529 /*-------------------------------------------------------------------------*/
1533 struct ehci_iso_stream *stream,
1534 struct ehci_sitd *sitd,
1535 struct ehci_iso_sched *iso_sched,
1539 struct ehci_iso_packet *uf = &iso_sched->packet [index];
1540 u64 bufp = uf->bufp;
1542 sitd->hw_next = EHCI_LIST_END;
1543 sitd->hw_fullspeed_ep = stream->address;
1544 sitd->hw_uframe = stream->splits;
1545 sitd->hw_results = uf->transaction;
1546 sitd->hw_backpointer = EHCI_LIST_END;
1549 sitd->hw_buf [0] = cpu_to_le32 (bufp);
1550 sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32);
1552 sitd->hw_buf [1] = cpu_to_le32 (uf->buf1);
1555 sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32);
1557 sitd->index = index;
1561 sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1563 /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1564 sitd->sitd_next = ehci->pshadow [frame];
1565 sitd->hw_next = ehci->periodic [frame];
1566 ehci->pshadow [frame].sitd = sitd;
1567 sitd->frame = frame;
1569 ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD;
1572 /* fit urb's sitds into the selected schedule slot; activate as needed */
1575 struct ehci_hcd *ehci,
1578 struct ehci_iso_stream *stream
1582 unsigned next_uframe;
1583 struct ehci_iso_sched *sched = urb->hcpriv;
1584 struct ehci_sitd *sitd;
1586 next_uframe = stream->next_uframe;
1588 if (list_empty(&stream->td_list)) {
1589 /* usbfs ignores TT bandwidth */
1590 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1591 += stream->bandwidth;
1593 "sched dev%s ep%d%s-iso [%d] %dms/%04x\n",
1594 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1595 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1596 (next_uframe >> 3) % ehci->periodic_size,
1597 stream->interval, le32_to_cpu (stream->splits));
1598 stream->start = jiffies;
1600 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs++;
1602 /* fill sITDs frame by frame */
1603 for (packet = 0, sitd = 0;
1604 packet < urb->number_of_packets;
1607 /* ASSERT: we have all necessary sitds */
1608 BUG_ON (list_empty (&sched->td_list));
1610 /* ASSERT: no itds for this endpoint in this frame */
1612 sitd = list_entry (sched->td_list.next,
1613 struct ehci_sitd, sitd_list);
1614 list_move_tail (&sitd->sitd_list, &stream->td_list);
1615 sitd->stream = iso_stream_get (stream);
1616 sitd->urb = usb_get_urb (urb);
1618 sitd_patch (stream, sitd, sched, packet);
1619 sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
1622 next_uframe += stream->interval << 3;
1623 stream->depth += stream->interval << 3;
1625 stream->next_uframe = next_uframe % mod;
1627 /* don't need that schedule data any more */
1628 iso_sched_free (stream, sched);
1631 timer_action (ehci, TIMER_IO_WATCHDOG);
1632 if (!ehci->periodic_sched++)
1633 return enable_periodic (ehci);
1637 /*-------------------------------------------------------------------------*/
1639 #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
1640 | SITD_STS_XACT | SITD_STS_MMF | SITD_STS_STS)
1644 struct ehci_hcd *ehci,
1645 struct ehci_sitd *sitd,
1646 struct pt_regs *regs
1648 struct urb *urb = sitd->urb;
1649 struct usb_iso_packet_descriptor *desc;
1652 struct ehci_iso_stream *stream = sitd->stream;
1653 struct usb_device *dev;
1655 urb_index = sitd->index;
1656 desc = &urb->iso_frame_desc [urb_index];
1657 t = le32_to_cpup (&sitd->hw_results);
1659 /* report transfer status */
1660 if (t & SITD_ERRS) {
1662 if (t & SITD_STS_DBE)
1663 desc->status = usb_pipein (urb->pipe)
1664 ? -ENOSR /* hc couldn't read */
1665 : -ECOMM; /* hc couldn't write */
1666 else if (t & SITD_STS_BABBLE)
1667 desc->status = -EOVERFLOW;
1668 else /* XACT, MMF, etc */
1669 desc->status = -EPROTO;
1672 desc->actual_length = desc->length - SITD_LENGTH (t);
1678 list_move (&sitd->sitd_list, &stream->free_list);
1679 stream->depth -= stream->interval << 3;
1680 iso_stream_put (ehci, stream);
1682 /* handle completion now? */
1683 if ((urb_index + 1) != urb->number_of_packets)
1686 /* ASSERT: it's really the last sitd for this urb
1687 list_for_each_entry (sitd, &stream->td_list, sitd_list)
1688 BUG_ON (sitd->urb == urb);
1691 /* give urb back to the driver */
1692 dev = usb_get_dev (urb->dev);
1693 ehci_urb_done (ehci, urb, regs);
1696 /* defer stopping schedule; completion can submit */
1697 ehci->periodic_sched--;
1698 if (!ehci->periodic_sched)
1699 (void) disable_periodic (ehci);
1700 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs--;
1702 if (list_empty (&stream->td_list)) {
1703 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1704 -= stream->bandwidth;
1706 "deschedule devp %s ep%d%s-iso\n",
1707 dev->devpath, stream->bEndpointAddress & 0x0f,
1708 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1710 iso_stream_put (ehci, stream);
1717 static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1719 int status = -EINVAL;
1720 unsigned long flags;
1721 struct ehci_iso_stream *stream;
1723 // FIXME remove when csplits behave
1724 if (usb_pipein(urb->pipe)) {
1725 ehci_dbg (ehci, "no iso-IN split transactions yet\n");
1729 /* Get iso_stream head */
1730 stream = iso_stream_find (ehci, urb);
1732 ehci_dbg (ehci, "can't get iso stream\n");
1735 if (urb->interval != stream->interval) {
1736 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1737 stream->interval, urb->interval);
1741 #ifdef EHCI_URB_TRACE
1743 "submit %p dev%s ep%d%s-iso len %d\n",
1744 urb, urb->dev->devpath,
1745 usb_pipeendpoint (urb->pipe),
1746 usb_pipein (urb->pipe) ? "in" : "out",
1747 urb->transfer_buffer_length);
1750 /* allocate SITDs */
1751 status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
1753 ehci_dbg (ehci, "can't init sitds\n");
1757 /* schedule ... need to lock */
1758 spin_lock_irqsave (&ehci->lock, flags);
1759 status = iso_stream_schedule (ehci, urb, stream);
1761 sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1762 spin_unlock_irqrestore (&ehci->lock, flags);
1766 iso_stream_put (ehci, stream);
1773 sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1775 ehci_dbg (ehci, "split iso support is disabled\n");
1779 static inline unsigned
1781 struct ehci_hcd *ehci,
1782 struct ehci_sitd *sitd,
1783 struct pt_regs *regs
1785 ehci_err (ehci, "sitd_complete %p?\n", sitd);
1789 #endif /* USB_EHCI_SPLIT_ISO */
1791 /*-------------------------------------------------------------------------*/
1794 scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
1796 unsigned frame, clock, now_uframe, mod;
1799 mod = ehci->periodic_size << 3;
1802 * When running, scan from last scan point up to "now"
1803 * else clean up by scanning everything that's left.
1804 * Touches as few pages as possible: cache-friendly.
1806 now_uframe = ehci->next_uframe;
1807 if (HCD_IS_RUNNING (ehci->hcd.state))
1808 clock = readl (&ehci->regs->frame_index);
1810 clock = now_uframe + mod - 1;
1814 union ehci_shadow q, *q_p;
1818 /* don't scan past the live uframe */
1819 frame = now_uframe >> 3;
1820 if (frame == (clock >> 3))
1821 uframes = now_uframe & 0x07;
1823 /* safe to scan the whole frame at once */
1829 /* scan each element in frame's queue for completions */
1830 q_p = &ehci->pshadow [frame];
1831 hw_p = &ehci->periodic [frame];
1833 type = Q_NEXT_TYPE (*hw_p);
1836 while (q.ptr != 0) {
1838 union ehci_shadow temp;
1842 /* handle any completions */
1843 temp.qh = qh_get (q.qh);
1844 type = Q_NEXT_TYPE (q.qh->hw_next);
1846 modified = qh_completions (ehci, temp.qh, regs);
1847 if (unlikely (list_empty (&temp.qh->qtd_list)))
1848 intr_deschedule (ehci, temp.qh, 0);
1849 qh_put (ehci, temp.qh);
1852 /* for "save place" FSTNs, look at QH entries
1853 * in the previous frame for completions.
1855 if (q.fstn->hw_prev != EHCI_LIST_END) {
1856 dbg ("ignoring completions from FSTNs");
1858 type = Q_NEXT_TYPE (q.fstn->hw_next);
1859 q = q.fstn->fstn_next;
1862 /* skip itds for later in the frame */
1864 for (uf = uframes; uf < 8; uf++) {
1865 if (0 == (q.itd->hw_transaction [uf]
1868 q_p = &q.itd->itd_next;
1869 hw_p = &q.itd->hw_next;
1870 type = Q_NEXT_TYPE (q.itd->hw_next);
1877 /* this one's ready ... HC won't cache the
1878 * pointer for much longer, if at all.
1880 *q_p = q.itd->itd_next;
1881 *hw_p = q.itd->hw_next;
1882 type = Q_NEXT_TYPE (q.itd->hw_next);
1884 modified = itd_complete (ehci, q.itd, regs);
1888 if (q.sitd->hw_results & SITD_ACTIVE) {
1889 q_p = &q.sitd->sitd_next;
1890 hw_p = &q.sitd->hw_next;
1891 type = Q_NEXT_TYPE (q.sitd->hw_next);
1895 *q_p = q.sitd->sitd_next;
1896 *hw_p = q.sitd->hw_next;
1897 type = Q_NEXT_TYPE (q.sitd->hw_next);
1899 modified = sitd_complete (ehci, q.sitd, regs);
1903 dbg ("corrupt type %d frame %d shadow %p",
1904 type, frame, q.ptr);
1909 /* assume completion callbacks modify the queue */
1910 if (unlikely (modified))
1914 /* stop when we catch up to the HC */
1916 // FIXME: this assumes we won't get lapped when
1917 // latencies climb; that should be rare, but...
1918 // detect it, and just go all the way around.
1919 // FLR might help detect this case, so long as latencies
1920 // don't exceed periodic_size msec (default 1.024 sec).
1922 // FIXME: likewise assumes HC doesn't halt mid-scan
1924 if (now_uframe == clock) {
1927 if (!HCD_IS_RUNNING (ehci->hcd.state))
1929 ehci->next_uframe = now_uframe;
1930 now = readl (&ehci->regs->frame_index) % mod;
1931 if (now_uframe == now)
1934 /* rescan the rest of this frame, then ... */