/*
- * Copyright (c) 2001-2003 by David Brownell
+ * Copyright (c) 2001-2004 by David Brownell
* Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
- *
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *
-periodic_next_shadow (union ehci_shadow *periodic, int tag)
+periodic_next_shadow (union ehci_shadow *periodic, __le32 tag)
{
switch (tag) {
case Q_TYPE_QH:
}
}
-/* returns true after successful unlink */
/* caller must hold ehci->lock */
-static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
+static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &ehci->pshadow [frame];
- u32 *hw_p = &ehci->periodic [frame];
+ __le32 *hw_p = &ehci->periodic [frame];
union ehci_shadow here = *prev_p;
- union ehci_shadow *next_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
- hw_p = &here.qh->hw_next;
+ hw_p = here.hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
- if (!here.ptr) {
- dbg ("entry %p no longer on frame [%d]", ptr, frame);
- return 0;
- }
- // vdbg ("periodic unlink %p from frame %d", ptr, frame);
+ if (!here.ptr)
+ return;
- /* update hardware list ... HC may still know the old structure, so
- * don't change hw_next until it'll have purged its cache
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
*/
- next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
- *hw_p = here.qh->hw_next;
-
- /* unlink from shadow list; HCD won't see old structure again */
- *prev_p = *next_p;
- next_p->ptr = 0;
-
- return 1;
+ *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
+ *hw_p = *here.hw_next;
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short
periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
{
- u32 *hw_p = &ehci->periodic [frame];
+ __le32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned usecs = 0;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
break;
- case Q_TYPE_FSTN:
+ // case Q_TYPE_FSTN:
+ default:
/* for "save place" FSTNs, count the relevant INTR
* bandwidth from the previous frame
*/
hw_p = &q->sitd->hw_next;
q = &q->sitd->sitd_next;
break;
- default:
- BUG ();
}
}
#ifdef DEBUG
if (usecs > 100)
- err ("overallocated uframe %d, periodic is %d usecs",
+ ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
#endif
return usecs;
return 1;
}
+#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
+
+/* Which uframe does the low/fullspeed transfer start in?
+ *
+ * The parameter is the mask of ssplits in "H-frame" terms
+ * and this returns the transfer start uframe in "B-frame" terms,
+ * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
+ * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
+ * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
+ */
+static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __le32 mask)
+{
+ unsigned char smask = QH_SMASK & le32_to_cpu(mask);
+ if (!smask) {
+ ehci_err(ehci, "invalid empty smask!\n");
+ /* uframe 7 can't have bw so this will indicate failure */
+ return 7;
+ }
+ return ffs(smask) - 1;
+}
+
+static const unsigned char
+max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
+
+/* carryover low/fullspeed bandwidth that crosses uframe boundries */
+static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
+{
+ int i;
+ for (i=0; i<7; i++) {
+ if (max_tt_usecs[i] < tt_usecs[i]) {
+ tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
+ tt_usecs[i] = max_tt_usecs[i];
+ }
+ }
+}
+
+/* How many of the tt's periodic downstream 1000 usecs are allocated?
+ *
+ * While this measures the bandwidth in terms of usecs/uframe,
+ * the low/fullspeed bus has no notion of uframes, so any particular
+ * low/fullspeed transfer can "carry over" from one uframe to the next,
+ * since the TT just performs downstream transfers in sequence.
+ *
+ * For example two seperate 100 usec transfers can start in the same uframe,
+ * and the second one would "carry over" 75 usecs into the next uframe.
+ */
+static void
+periodic_tt_usecs (
+ struct ehci_hcd *ehci,
+ struct usb_device *dev,
+ unsigned frame,
+ unsigned short tt_usecs[8]
+)
+{
+ __le32 *hw_p = &ehci->periodic [frame];
+ union ehci_shadow *q = &ehci->pshadow [frame];
+ unsigned char uf;
+
+ memset(tt_usecs, 0, 16);
+
+ while (q->ptr) {
+ switch (Q_NEXT_TYPE(*hw_p)) {
+ case Q_TYPE_ITD:
+ hw_p = &q->itd->hw_next;
+ q = &q->itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ if (same_tt(dev, q->qh->dev)) {
+ uf = tt_start_uframe(ehci, q->qh->hw_info2);
+ tt_usecs[uf] += q->qh->tt_usecs;
+ }
+ hw_p = &q->qh->hw_next;
+ q = &q->qh->qh_next;
+ continue;
+ case Q_TYPE_SITD:
+ if (same_tt(dev, q->sitd->urb->dev)) {
+ uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
+ tt_usecs[uf] += q->sitd->stream->tt_usecs;
+ }
+ hw_p = &q->sitd->hw_next;
+ q = &q->sitd->sitd_next;
+ continue;
+ // case Q_TYPE_FSTN:
+ default:
+ ehci_dbg(ehci,
+ "ignoring periodic frame %d FSTN\n", frame);
+ hw_p = &q->fstn->hw_next;
+ q = &q->fstn->fstn_next;
+ }
+ }
+
+ carryover_tt_bandwidth(tt_usecs);
+
+ if (max_tt_usecs[7] < tt_usecs[7])
+ ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
+ frame, tt_usecs[7] - max_tt_usecs[7]);
+}
+
+/*
+ * Return true if the device's tt's downstream bus is available for a
+ * periodic transfer of the specified length (usecs), starting at the
+ * specified frame/uframe. Note that (as summarized in section 11.19
+ * of the usb 2.0 spec) TTs can buffer multiple transactions for each
+ * uframe.
+ *
+ * The uframe parameter is when the fullspeed/lowspeed transfer
+ * should be executed in "B-frame" terms, which is the same as the
+ * highspeed ssplit's uframe (which is in "H-frame" terms). For example
+ * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
+ * See the EHCI spec sec 4.5 and fig 4.7.
+ *
+ * This checks if the full/lowspeed bus, at the specified starting uframe,
+ * has the specified bandwidth available, according to rules listed
+ * in USB 2.0 spec section 11.18.1 fig 11-60.
+ *
+ * This does not check if the transfer would exceed the max ssplit
+ * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
+ * since proper scheduling limits ssplits to less than 16 per uframe.
+ */
+static int tt_available (
+ struct ehci_hcd *ehci,
+ unsigned period,
+ struct usb_device *dev,
+ unsigned frame,
+ unsigned uframe,
+ u16 usecs
+)
+{
+ if ((period == 0) || (uframe >= 7)) /* error */
+ return 0;
+
+ for (; frame < ehci->periodic_size; frame += period) {
+ unsigned short tt_usecs[8];
+
+ periodic_tt_usecs (ehci, dev, frame, tt_usecs);
+
+ ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
+ " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
+ frame, usecs, uframe,
+ tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
+ tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
+
+ if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
+ ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
+ frame, uframe);
+ return 0;
+ }
+
+ /* special case for isoc transfers larger than 125us:
+ * the first and each subsequent fully used uframe
+ * must be empty, so as to not illegally delay
+ * already scheduled transactions
+ */
+ if (125 < usecs) {
+ int ufs = (usecs / 125) - 1;
+ int i;
+ for (i = uframe; i < (uframe + ufs) && i < 8; i++)
+ if (0 < tt_usecs[i]) {
+ ehci_vdbg(ehci,
+ "multi-uframe xfer can't fit "
+ "in frame %d uframe %d\n",
+ frame, i);
+ return 0;
+ }
+ }
+
+ tt_usecs[uframe] += usecs;
+
+ carryover_tt_bandwidth(tt_usecs);
+
+ /* fail if the carryover pushed bw past the last uframe's limit */
+ if (max_tt_usecs[7] < tt_usecs[7]) {
+ ehci_vdbg(ehci,
+ "tt unavailable usecs %d frame %d uframe %d\n",
+ usecs, frame, uframe);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+#else
+
/* return true iff the device's transaction translator is available
* for a periodic transfer starting at the specified frame, using
* all the uframes in the mask.
*/
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
- u32 type;
+ __le32 type;
here = ehci->pshadow [frame];
type = Q_NEXT_TYPE (ehci->periodic [frame]);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
- if (same_tt (dev, here.itd->urb->dev)) {
+ if (same_tt (dev, here.sitd->urb->dev)) {
u16 mask;
mask = le32_to_cpu (here.sitd
if (mask & uf_mask)
break;
}
- type = Q_NEXT_TYPE (here.qh->hw_next);
+ type = Q_NEXT_TYPE (here.sitd->hw_next);
here = here.sitd->sitd_next;
continue;
// case Q_TYPE_FSTN:
return 1;
}
+#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
+
/*-------------------------------------------------------------------------*/
static int enable_periodic (struct ehci_hcd *ehci)
*/
status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
if (status != 0) {
- ehci->hcd.state = USB_STATE_HALT;
+ ehci_to_hcd(ehci)->state = HC_STATE_HALT;
return status;
}
cmd = readl (&ehci->regs->command) | CMD_PSE;
writel (cmd, &ehci->regs->command);
/* posted write ... PSS happens later */
- ehci->hcd.state = USB_STATE_RUNNING;
+ ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
/* make sure ehci_work scans these */
ehci->next_uframe = readl (&ehci->regs->frame_index)
*/
status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
if (status != 0) {
- ehci->hcd.state = USB_STATE_HALT;
+ ehci_to_hcd(ehci)->state = HC_STATE_HALT;
return status;
}
/*-------------------------------------------------------------------------*/
-// FIXME microframe periods not yet handled
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; ehci 0.96+)
+ */
+static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
-static void intr_deschedule (
- struct ehci_hcd *ehci,
- struct ehci_qh *qh,
- int wait
-) {
- int status;
- unsigned frame = qh->start;
+ dev_dbg (&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
- do {
- periodic_unlink (ehci, frame, qh);
- qh_put (qh);
- frame += qh->period;
- } while (frame < ehci->periodic_size);
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < ehci->periodic_size; i += period) {
+ union ehci_shadow *prev = &ehci->pshadow [i];
+ __le32 *hw_p = &ehci->periodic [i];
+ union ehci_shadow here = *prev;
+ __le32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE (*hw_p);
+ if (type == Q_TYPE_QH)
+ break;
+ prev = periodic_next_shadow (prev, type);
+ hw_p = &here.qh->hw_next;
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw_next = *hw_p;
+ wmb ();
+ prev->qh = qh;
+ *hw_p = QH_NEXT (qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh_get (qh);
+
+ /* update per-qh bandwidth for usbfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+ /* maybe enable periodic schedule processing */
+ if (!ehci->periodic_sched++)
+ return enable_periodic (ehci);
+
+ return 0;
+}
+
+static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ // FIXME:
+ // IF this isn't high speed
+ // and this qh is active in the current uframe
+ // (and overlay token SplitXstate is false?)
+ // THEN
+ // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */);
+
+ /* high bandwidth, or otherwise part of every microframe */
+ if ((period = qh->period) == 0)
+ period = 1;
+
+ for (i = qh->start; i < ehci->periodic_size; i += period)
+ periodic_unlink (ehci, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg (&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
- qh->qh_next.ptr = 0;
- ehci->periodic_sched--;
+ qh->qh_next.ptr = NULL;
+ qh_put (qh);
/* maybe turn off periodic schedule */
+ ehci->periodic_sched--;
if (!ehci->periodic_sched)
- status = disable_periodic (ehci);
- else {
- status = 0;
- vdbg ("periodic schedule still enabled");
- }
+ (void) disable_periodic (ehci);
+}
- /*
- * If the hc may be looking at this qh, then delay a uframe
- * (yeech!) to be sure it's done.
- * No other threads may be mucking with this qh.
- */
- if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
- if (wait) {
- udelay (125);
- qh->hw_next = EHCI_LIST_END;
- } else {
- /* we may not be IDLE yet, but if the qh is empty
- * the race is very short. then if qh also isn't
- * rescheduled soon, it won't matter. otherwise...
- */
- vdbg ("intr_deschedule...");
- }
- } else
- qh->hw_next = EHCI_LIST_END;
+static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ unsigned wait;
- qh->qh_state = QH_STATE_IDLE;
+ qh_unlink_periodic (ehci, qh);
- /* update per-qh bandwidth utilization (for usbfs) */
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated -=
- (qh->usecs + qh->c_usecs) / qh->period;
+ /* simple/paranoid: always delay, expecting the HC needs to read
+ * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
+ * expect khubd to clean up after any CSPLITs we won't issue.
+ * active high speed queues may need bigger delays...
+ */
+ if (list_empty (&qh->qtd_list)
+ || (__constant_cpu_to_le32 (QH_CMASK)
+ & qh->hw_info2) != 0)
+ wait = 2;
+ else
+ wait = 55; /* worst case: 3 * 1024 */
- dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d",
- qh, qh->period, frame,
- atomic_read (&qh->kref.refcount), ehci->periodic_sched);
+ udelay (wait);
+ qh->qh_state = QH_STATE_IDLE;
+ qh->hw_next = EHCI_LIST_END;
+ wmb ();
}
+/*-------------------------------------------------------------------------*/
+
static int check_period (
- struct ehci_hcd *ehci,
+ struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
unsigned period,
unsigned usecs
) {
+ int claimed;
+
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
/*
* 80% periodic == 100 usec/uframe available
- * convert "usecs we need" to "max already claimed"
+ * convert "usecs we need" to "max already claimed"
*/
usecs = 100 - usecs;
- do {
- int claimed;
-
-// FIXME delete when intr_submit handles non-empty queues
-// this gives us a one intr/frame limit (vs N/uframe)
-// ... and also lets us avoid tracking split transactions
-// that might collide at a given TT/hub.
- if (ehci->pshadow [frame].ptr)
- return 0;
-
- claimed = periodic_usecs (ehci, frame, uframe);
- if (claimed > usecs)
- return 0;
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely (period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs (ehci, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < ehci->periodic_size);
-// FIXME update to handle sub-frame periods
- } while ((frame += period) < ehci->periodic_size);
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs (ehci, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < ehci->periodic_size);
+ }
// success!
return 1;
}
static int check_intr_schedule (
- struct ehci_hcd *ehci,
+ struct ehci_hcd *ehci,
unsigned frame,
unsigned uframe,
const struct ehci_qh *qh,
- u32 *c_maskp
+ __le32 *c_maskp
)
{
- int retval = -ENOSPC;
+ int retval = -ENOSPC;
+ u8 mask = 0;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
- *c_maskp = cpu_to_le32 (0);
+ *c_maskp = 0;
goto done;
}
- /* This is a split transaction; check the bandwidth available for
- * the completion too. Check both worst and best case gaps: worst
- * case is SPLIT near uframe end, and CSPLIT near start ... best is
- * vice versa. Difference can be almost two uframe times, but we
- * reserve unnecessary bandwidth (waste it) this way. (Actually
- * even better cases exist, like immediate device NAK.)
- *
- * FIXME don't even bother unless we know this TT is idle in that
- * range of uframes ... for now, check_period() allows only one
- * interrupt transfer per frame, so needn't check "TT busy" status
- * when scheduling a split (QH, SITD, or FSTN).
+#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
+ if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
+ qh->tt_usecs)) {
+ unsigned i;
+
+ /* TODO : this may need FSTN for SSPLIT in uframe 5. */
+ for (i=uframe+1; i<8 && i<uframe+4; i++)
+ if (!check_period (ehci, frame, i,
+ qh->period, qh->c_usecs))
+ goto done;
+ else
+ mask |= 1 << i;
+
+ retval = 0;
+
+ *c_maskp = cpu_to_le32 (mask << 8);
+ }
+#else
+ /* Make sure this tt's buffer is also available for CSPLITs.
+ * We pessimize a bit; probably the typical full speed case
+ * doesn't need the second CSPLIT.
*
- * FIXME ehci 0.96 and above can use FSTNs
+ * NOTE: both SPLIT and CSPLIT could be checked in just
+ * one smart pass...
*/
- if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
- qh->period, qh->c_usecs))
- goto done;
- if (!check_period (ehci, frame, uframe + qh->gap_uf,
- qh->period, qh->c_usecs))
- goto done;
+ mask = 0x03 << (uframe + qh->gap_uf);
+ *c_maskp = cpu_to_le32 (mask << 8);
- *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
- retval = 0;
+ mask |= 1 << uframe;
+ if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
+ if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
+ qh->period, qh->c_usecs))
+ goto done;
+ if (!check_period (ehci, frame, uframe + qh->gap_uf,
+ qh->period, qh->c_usecs))
+ goto done;
+ retval = 0;
+ }
+#endif
done:
return retval;
}
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int status;
+ int status;
unsigned uframe;
- u32 c_mask;
+ __le32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ qh_refresh(ehci, qh);
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
- uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
+ uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
* uframes have enough periodic bandwidth available.
*/
if (status) {
- frame = qh->period - 1;
- do {
- for (uframe = 0; uframe < 8; uframe++) {
- status = check_intr_schedule (ehci,
- frame, uframe, qh,
- &c_mask);
- if (status == 0)
- break;
- }
- } while (status && frame--);
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ frame = qh->period - 1;
+ do {
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule (ehci,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ } while (status && frame--);
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
+ }
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
- qh->hw_info2 &= ~0xffff;
- qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
+ qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK));
+ qh->hw_info2 |= qh->period
+ ? cpu_to_le32 (1 << uframe)
+ : __constant_cpu_to_le32 (QH_SMASK);
+ qh->hw_info2 |= c_mask;
} else
- dbg ("reused previous qh %p schedule", qh);
+ ehci_dbg (ehci, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
- qh->qh_state = QH_STATE_LINKED;
- dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
- qh, qh->usecs, qh->c_usecs,
- qh->period, frame, uframe, qh->gap_uf);
- do {
- if (unlikely (ehci->pshadow [frame].ptr != 0)) {
-
-// FIXME -- just link toward the end, before any qh with a shorter period,
-// AND accommodate it already having been linked here (after some other qh)
-// AS WELL AS updating the schedule checking logic
-
- BUG ();
- } else {
- ehci->pshadow [frame].qh = qh_get (qh);
- ehci->periodic [frame] =
- QH_NEXT (qh->qh_dma);
- }
- wmb ();
- frame += qh->period;
- } while (frame < ehci->periodic_size);
-
- /* update per-qh bandwidth for usbfs */
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated +=
- (qh->usecs + qh->c_usecs) / qh->period;
-
- /* maybe enable periodic schedule processing */
- if (!ehci->periodic_sched++)
- status = enable_periodic (ehci);
+ status = qh_link_periodic (ehci, qh);
done:
return status;
}
static int intr_submit (
struct ehci_hcd *ehci,
+ struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
- int mem_flags
+ gfp_t mem_flags
) {
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
- struct hcd_dev *dev;
- int is_input;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
- epnum = usb_pipeendpoint (urb->pipe);
- is_input = usb_pipein (urb->pipe);
- if (is_input)
- epnum |= 0x10;
+ epnum = ep->desc.bEndpointAddress;
spin_lock_irqsave (&ehci->lock, flags);
- dev = (struct hcd_dev *)urb->dev->hcpriv;
+
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+ &ehci_to_hcd(ehci)->flags))) {
+ status = -ESHUTDOWN;
+ goto done;
+ }
/* get qh and force any scheduling errors */
INIT_LIST_HEAD (&empty);
- qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
- if (qh == 0) {
+ qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv);
+ if (qh == NULL) {
status = -ENOMEM;
goto done;
}
}
/* then queue the urb's tds to the qh */
- qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
- BUG_ON (qh == 0);
+ qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
+ BUG_ON (qh == NULL);
/* ... update usbfs periodic stats */
- hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;
+ ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
done:
spin_unlock_irqrestore (&ehci->lock, flags);
/* ehci_iso_stream ops work with both ITD and SITD */
static struct ehci_iso_stream *
-iso_stream_alloc (int mem_flags)
+iso_stream_alloc (gfp_t mem_flags)
{
struct ehci_iso_stream *stream;
- stream = kmalloc(sizeof *stream, mem_flags);
- if (likely (stream != 0)) {
- memset (stream, 0, sizeof(*stream));
+ stream = kzalloc(sizeof *stream, mem_flags);
+ if (likely (stream != NULL)) {
INIT_LIST_HEAD(&stream->td_list);
INIT_LIST_HEAD(&stream->free_list);
stream->next_uframe = -1;
static void
iso_stream_init (
+ struct ehci_hcd *ehci,
struct ehci_iso_stream *stream,
struct usb_device *dev,
int pipe,
*/
epnum = usb_pipeendpoint (pipe);
is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
+ maxp = usb_maxpacket(dev, pipe, !is_input);
if (is_input) {
- maxp = dev->epmaxpacketin [epnum];
buf1 = (1 << 11);
} else {
- maxp = dev->epmaxpacketout [epnum];
buf1 = 0;
}
} else {
u32 addr;
+ int think_time;
+ int hs_transfers;
addr = dev->ttport << 24;
- addr |= dev->tt->hub->devnum << 16;
+ if (!ehci_is_TDI(ehci)
+ || (dev->tt->hub !=
+ ehci_to_hcd(ehci)->self.root_hub))
+ addr |= dev->tt->hub->devnum << 16;
addr |= epnum << 8;
addr |= dev->devnum;
stream->usecs = HS_USECS_ISO (maxp);
+ think_time = dev->tt ? dev->tt->think_time : 0;
+ stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
+ dev->speed, is_input, 1, maxp));
+ hs_transfers = max (1u, (maxp + 187) / 188);
if (is_input) {
u32 tmp;
stream->usecs = HS_USECS_ISO (1);
stream->raw_mask = 1;
- /* pessimistic c-mask */
- tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp)
- / (125 * 1000);
- stream->raw_mask |= 3 << (tmp + 9);
+ /* c-mask as specified in USB 2.0 11.18.4 3.c */
+ tmp = (1 << (hs_transfers + 2)) - 1;
+ stream->raw_mask |= tmp << (8 + 2);
} else
- stream->raw_mask = smask_out [maxp / 188];
+ stream->raw_mask = smask_out [hs_transfers - 1];
bandwidth = stream->usecs + stream->c_usecs;
bandwidth /= 1 << (interval + 2);
*/
if (stream->refcount == 1) {
int is_in;
- struct hcd_dev *dev = stream->udev->hcpriv;
// BUG_ON (!list_empty(&stream->td_list));
is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
stream->bEndpointAddress &= 0x0f;
- dev->ep [is_in + stream->bEndpointAddress] = 0;
+ stream->ep->hcpriv = NULL;
if (stream->rescheduled) {
ehci_info (ehci, "ep%d%s-iso rescheduled "
static inline struct ehci_iso_stream *
iso_stream_get (struct ehci_iso_stream *stream)
{
- if (likely (stream != 0))
+ if (likely (stream != NULL))
stream->refcount++;
return stream;
}
iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
{
unsigned epnum;
- struct hcd_dev *dev;
struct ehci_iso_stream *stream;
+ struct usb_host_endpoint *ep;
unsigned long flags;
epnum = usb_pipeendpoint (urb->pipe);
if (usb_pipein(urb->pipe))
- epnum += 0x10;
+ ep = urb->dev->ep_in[epnum];
+ else
+ ep = urb->dev->ep_out[epnum];
spin_lock_irqsave (&ehci->lock, flags);
+ stream = ep->hcpriv;
- dev = (struct hcd_dev *)urb->dev->hcpriv;
- stream = dev->ep [epnum];
-
- if (unlikely (stream == 0)) {
+ if (unlikely (stream == NULL)) {
stream = iso_stream_alloc(GFP_ATOMIC);
- if (likely (stream != 0)) {
+ if (likely (stream != NULL)) {
/* dev->ep owns the initial refcount */
- dev->ep[epnum] = stream;
- iso_stream_init(stream, urb->dev, urb->pipe,
+ ep->hcpriv = stream;
+ stream->ep = ep;
+ iso_stream_init(ehci, stream, urb->dev, urb->pipe,
urb->interval);
}
/* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
} else if (unlikely (stream->hw_info1 != 0)) {
ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
- urb->dev->devpath, epnum & 0x0f,
- (epnum & 0x10) ? "in" : "out");
- stream = 0;
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
+ stream = NULL;
}
/* caller guarantees an eventual matching iso_stream_put */
/*-------------------------------------------------------------------------*/
-/* ehci_iso_sched ops can be shared, ITD-only, or SITD-only */
+/* ehci_iso_sched ops can be ITD-only or SITD-only */
static struct ehci_iso_sched *
-iso_sched_alloc (unsigned packets, int mem_flags)
+iso_sched_alloc (unsigned packets, gfp_t mem_flags)
{
struct ehci_iso_sched *iso_sched;
int size = sizeof *iso_sched;
size += packets * sizeof (struct ehci_iso_packet);
- iso_sched = kmalloc (size, mem_flags);
- if (likely (iso_sched != 0)) {
- memset(iso_sched, 0, size);
+ iso_sched = kzalloc(size, mem_flags);
+ if (likely (iso_sched != NULL)) {
INIT_LIST_HEAD (&iso_sched->td_list);
}
return iso_sched;
trans |= length << 16;
uframe->transaction = cpu_to_le32 (trans);
- /* might need to cross a buffer page within a td */
+ /* might need to cross a buffer page within a uframe */
uframe->bufp = (buf & ~(u64)0x0fff);
buf += length;
if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
- int mem_flags
+ gfp_t mem_flags
)
{
struct ehci_itd *itd;
unsigned long flags;
sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
- if (unlikely (sched == 0))
+ if (unlikely (sched == NULL))
return -ENOMEM;
itd_sched_init (sched, stream, urb);
list_del (&itd->itd_list);
itd_dma = itd->itd_dma;
} else
- itd = 0;
+ itd = NULL;
if (!itd) {
spin_unlock_irqrestore (&ehci->lock, flags);
spin_lock_irqsave (&ehci->lock, flags);
}
- if (unlikely (0 == itd)) {
+ if (unlikely (NULL == itd)) {
iso_sched_free (stream, sched);
+ spin_unlock_irqrestore (&ehci->lock, flags);
return -ENOMEM;
}
memset (itd, 0, sizeof *itd);
frame = uframe >> 3;
uf = uframe & 7;
+#ifdef CONFIG_USB_EHCI_TT_NEWSCHED
+ /* The tt's fullspeed bus bandwidth must be available.
+ * tt_available scheduling guarantees 10+% for control/bulk.
+ */
+ if (!tt_available (ehci, period_uframes << 3,
+ stream->udev, frame, uf, stream->tt_usecs))
+ return 0;
+#else
/* tt must be idle for start(s), any gap, and csplit.
* assume scheduling slop leaves 10+% for control/bulk.
*/
if (!tt_no_collision (ehci, period_uframes << 3,
stream->udev, frame, mask))
return 0;
+#endif
/* check starts (OUT uses more than one) */
max_used = 100 - stream->usecs;
/* for IN, check CSPLIT */
if (stream->c_usecs) {
+ uf = uframe & 7;
max_used = 100 - stream->c_usecs;
do {
tmp = 1 << uf;
uframe += period_uframes;
} while (uframe < mod);
- stream->splits = stream->raw_mask << (uframe & 7);
- cpu_to_le32s (&stream->splits);
+ stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7));
return 1;
}
fail:
iso_sched_free (stream, sched);
- urb->hcpriv = 0;
+ urb->hcpriv = NULL;
return status;
ready:
+ /* report high speed start in uframes; full speed, in frames */
urb->start_frame = stream->next_uframe;
+ if (!stream->highspeed)
+ urb->start_frame >>= 3;
return 0;
}
{
int i;
+ /* it's been recently zeroed */
itd->hw_next = EHCI_LIST_END;
itd->hw_bufp [0] = stream->buf0;
itd->hw_bufp [1] = stream->buf1;
struct ehci_itd *itd,
struct ehci_iso_sched *iso_sched,
unsigned index,
- u16 uframe,
- int first
+ u16 uframe
)
{
struct ehci_iso_packet *uf = &iso_sched->packet [index];
itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
/* iso_frame_desc[].offset must be strictly increasing */
- if (unlikely (!first && uf->cross)) {
+ if (unlikely (uf->cross)) {
u64 bufp = uf->bufp + 4096;
itd->pg = ++pg;
itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
struct ehci_iso_stream *stream
)
{
- int packet, first = 1;
+ int packet;
unsigned next_uframe, uframe, frame;
struct ehci_iso_sched *iso_sched = urb->hcpriv;
struct ehci_itd *itd;
next_uframe = stream->next_uframe % mod;
if (unlikely (list_empty(&stream->td_list))) {
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated
+ ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
ehci_vdbg (ehci,
"schedule devp %s ep%d%s-iso period %d start %d.%d\n",
next_uframe >> 3, next_uframe & 0x7);
stream->start = jiffies;
}
- hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs++;
+ ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill iTDs uframe by uframe */
- for (packet = 0, itd = 0; packet < urb->number_of_packets; ) {
- if (itd == 0) {
+ for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
+ if (itd == NULL) {
/* ASSERT: we have all necessary itds */
// BUG_ON (list_empty (&iso_sched->td_list));
list_move_tail (&itd->itd_list, &stream->td_list);
itd->stream = iso_stream_get (stream);
itd->urb = usb_get_urb (urb);
- first = 1;
itd_init (stream, itd);
}
frame = next_uframe >> 3;
itd->usecs [uframe] = stream->usecs;
- itd_patch (itd, iso_sched, packet, uframe, first);
- first = 0;
+ itd_patch (itd, iso_sched, packet, uframe);
next_uframe += stream->interval;
stream->depth += stream->interval;
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
itd_link (ehci, frame % ehci->periodic_size, itd);
- itd = 0;
+ itd = NULL;
}
}
stream->next_uframe = next_uframe;
/* don't need that schedule data any more */
iso_sched_free (stream, iso_sched);
- urb->hcpriv = 0;
+ urb->hcpriv = NULL;
timer_action (ehci, TIMER_IO_WATCHDOG);
if (unlikely (!ehci->periodic_sched++))
static unsigned
itd_complete (
struct ehci_hcd *ehci,
- struct ehci_itd *itd,
- struct pt_regs *regs
+ struct ehci_itd *itd
) {
struct urb *urb = itd->urb;
struct usb_iso_packet_descriptor *desc;
}
usb_put_urb (urb);
- itd->urb = 0;
- itd->stream = 0;
+ itd->urb = NULL;
+ itd->stream = NULL;
list_move (&itd->itd_list, &stream->free_list);
iso_stream_put (ehci, stream);
*/
/* give urb back to the driver ... can be out-of-order */
- dev = usb_get_dev (urb->dev);
- ehci_urb_done (ehci, urb, regs);
- urb = 0;
+ dev = urb->dev;
+ ehci_urb_done (ehci, urb);
+ urb = NULL;
/* defer stopping schedule; completion can submit */
ehci->periodic_sched--;
if (unlikely (!ehci->periodic_sched))
(void) disable_periodic (ehci);
- hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs--;
+ ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (unlikely (list_empty (&stream->td_list))) {
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated
+ ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
ehci_vdbg (ehci,
"deschedule devp %s ep%d%s-iso\n",
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
iso_stream_put (ehci, stream);
- usb_put_dev (dev);
return 1;
}
/*-------------------------------------------------------------------------*/
-static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
+static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
+ gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
/* Get iso_stream head */
stream = iso_stream_find (ehci, urb);
- if (unlikely (stream == 0)) {
+ if (unlikely (stream == NULL)) {
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- status = iso_stream_schedule (ehci, urb, stream);
- if (likely (status == 0))
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+ &ehci_to_hcd(ehci)->flags)))
+ status = -ESHUTDOWN;
+ else
+ status = iso_stream_schedule (ehci, urb, stream);
+ if (likely (status == 0))
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
spin_unlock_irqrestore (&ehci->lock, flags);
/* might need to cross a buffer page within a td */
packet->bufp = buf;
- buf += length;
- packet->buf1 = buf & ~0x0fff;
+ packet->buf1 = (buf + length) & ~0x0fff;
if (packet->buf1 != (buf & ~(u64)0x0fff))
packet->cross = 1;
- /* OUT uses multiple start-splits */
+ /* OUT uses multiple start-splits */
if (stream->bEndpointAddress & USB_DIR_IN)
continue;
- length = 1 + (length / 188);
- packet->buf1 |= length;
+ length = (length + 187) / 188;
if (length > 1) /* BEGIN vs ALL */
- packet->buf1 |= 1 << 3;
+ length |= 1 << 3;
+ packet->buf1 |= length;
}
}
struct ehci_iso_stream *stream,
struct ehci_hcd *ehci,
struct urb *urb,
- int mem_flags
+ gfp_t mem_flags
)
{
struct ehci_sitd *sitd;
unsigned long flags;
iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
- if (iso_sched == 0)
+ if (iso_sched == NULL)
return -ENOMEM;
sitd_sched_init (iso_sched, stream, urb);
list_del (&sitd->sitd_list);
sitd_dma = sitd->sitd_dma;
} else
- sitd = 0;
+ sitd = NULL;
if (!sitd) {
spin_unlock_irqrestore (&ehci->lock, flags);
sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32);
sitd->hw_buf [1] = cpu_to_le32 (uf->buf1);
- if (uf->cross) {
+ if (uf->cross)
bufp += 4096;
- sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32);
- }
+ sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32);
sitd->index = index;
}
if (list_empty(&stream->td_list)) {
/* usbfs ignores TT bandwidth */
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated
+ ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
ehci_vdbg (ehci,
- "sched dev%s ep%d%s-iso [%d] %dms/%04x\n",
+ "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
urb->dev->devpath, stream->bEndpointAddress & 0x0f,
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
(next_uframe >> 3) % ehci->periodic_size,
stream->interval, le32_to_cpu (stream->splits));
stream->start = jiffies;
}
- hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs++;
+ ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
/* fill sITDs frame by frame */
- for (packet = 0, sitd = 0;
+ for (packet = 0, sitd = NULL;
packet < urb->number_of_packets;
packet++) {
/* don't need that schedule data any more */
iso_sched_free (stream, sched);
- urb->hcpriv = 0;
+ urb->hcpriv = NULL;
timer_action (ehci, TIMER_IO_WATCHDOG);
if (!ehci->periodic_sched++)
/*-------------------------------------------------------------------------*/
#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
- | SITD_STS_XACT | SITD_STS_MMF | SITD_STS_STS)
+ | SITD_STS_XACT | SITD_STS_MMF)
static unsigned
sitd_complete (
struct ehci_hcd *ehci,
- struct ehci_sitd *sitd,
- struct pt_regs *regs
+ struct ehci_sitd *sitd
) {
struct urb *urb = sitd->urb;
struct usb_iso_packet_descriptor *desc;
}
usb_put_urb (urb);
- sitd->urb = 0;
- sitd->stream = 0;
+ sitd->urb = NULL;
+ sitd->stream = NULL;
list_move (&sitd->sitd_list, &stream->free_list);
stream->depth -= stream->interval << 3;
iso_stream_put (ehci, stream);
*/
/* give urb back to the driver */
- dev = usb_get_dev (urb->dev);
- ehci_urb_done (ehci, urb, regs);
- urb = 0;
+ dev = urb->dev;
+ ehci_urb_done (ehci, urb);
+ urb = NULL;
/* defer stopping schedule; completion can submit */
ehci->periodic_sched--;
if (!ehci->periodic_sched)
(void) disable_periodic (ehci);
- hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs--;
+ ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
if (list_empty (&stream->td_list)) {
- hcd_to_bus (&ehci->hcd)->bandwidth_allocated
+ ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
ehci_vdbg (ehci,
"deschedule devp %s ep%d%s-iso\n",
(stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
}
iso_stream_put (ehci, stream);
- usb_put_dev (dev);
return 1;
}
-static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
+static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
+ gfp_t mem_flags)
{
int status = -EINVAL;
unsigned long flags;
struct ehci_iso_stream *stream;
- // FIXME remove when csplits behave
- if (usb_pipein(urb->pipe)) {
- ehci_dbg (ehci, "no iso-IN split transactions yet\n");
- return -ENOMEM;
- }
-
/* Get iso_stream head */
stream = iso_stream_find (ehci, urb);
- if (stream == 0) {
+ if (stream == NULL) {
ehci_dbg (ehci, "can't get iso stream\n");
return -ENOMEM;
}
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
- status = iso_stream_schedule (ehci, urb, stream);
- if (status == 0)
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
+ &ehci_to_hcd(ehci)->flags)))
+ status = -ESHUTDOWN;
+ else
+ status = iso_stream_schedule (ehci, urb, stream);
+ if (status == 0)
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
spin_unlock_irqrestore (&ehci->lock, flags);
#else
static inline int
-sitd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
+sitd_submit (struct ehci_hcd *ehci, struct urb *urb, gfp_t mem_flags)
{
ehci_dbg (ehci, "split iso support is disabled\n");
return -ENOSYS;
static inline unsigned
sitd_complete (
struct ehci_hcd *ehci,
- struct ehci_sitd *sitd,
- struct pt_regs *regs
+ struct ehci_sitd *sitd
) {
ehci_err (ehci, "sitd_complete %p?\n", sitd);
return 0;
/*-------------------------------------------------------------------------*/
static void
-scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
+scan_periodic (struct ehci_hcd *ehci)
{
unsigned frame, clock, now_uframe, mod;
unsigned modified;
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = ehci->next_uframe;
- if (HCD_IS_RUNNING (ehci->hcd.state))
+ if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
clock = readl (&ehci->regs->frame_index);
else
clock = now_uframe + mod - 1;
for (;;) {
union ehci_shadow q, *q_p;
- u32 type, *hw_p;
+ __le32 type, *hw_p;
unsigned uframes;
/* don't scan past the live uframe */
type = Q_NEXT_TYPE (*hw_p);
modified = 0;
- while (q.ptr != 0) {
+ while (q.ptr != NULL) {
unsigned uf;
union ehci_shadow temp;
+ int live;
+ live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
switch (type) {
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get (q.qh);
type = Q_NEXT_TYPE (q.qh->hw_next);
q = q.qh->qh_next;
- modified = qh_completions (ehci, temp.qh, regs);
+ modified = qh_completions (ehci, temp.qh);
if (unlikely (list_empty (&temp.qh->qtd_list)))
- intr_deschedule (ehci, temp.qh, 0);
+ intr_deschedule (ehci, temp.qh);
qh_put (temp.qh);
break;
case Q_TYPE_FSTN:
case Q_TYPE_ITD:
/* skip itds for later in the frame */
rmb ();
- for (uf = uframes; uf < 8; uf++) {
+ for (uf = live ? uframes : 8; uf < 8; uf++) {
if (0 == (q.itd->hw_transaction [uf]
& ITD_ACTIVE))
continue;
*hw_p = q.itd->hw_next;
type = Q_NEXT_TYPE (q.itd->hw_next);
wmb();
- modified = itd_complete (ehci, q.itd, regs);
+ modified = itd_complete (ehci, q.itd);
q = *q_p;
break;
case Q_TYPE_SITD:
- if (q.sitd->hw_results & SITD_ACTIVE) {
+ if ((q.sitd->hw_results & SITD_ACTIVE)
+ && live) {
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
type = Q_NEXT_TYPE (q.sitd->hw_next);
*hw_p = q.sitd->hw_next;
type = Q_NEXT_TYPE (q.sitd->hw_next);
wmb();
- modified = sitd_complete (ehci, q.sitd, regs);
+ modified = sitd_complete (ehci, q.sitd);
q = *q_p;
break;
default:
dbg ("corrupt type %d frame %d shadow %p",
type, frame, q.ptr);
// BUG ();
- q.ptr = 0;
+ q.ptr = NULL;
}
/* assume completion callbacks modify the queue */
if (now_uframe == clock) {
unsigned now;
- if (!HCD_IS_RUNNING (ehci->hcd.state))
+ if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state))
break;
ehci->next_uframe = now_uframe;
now = readl (&ehci->regs->frame_index) % mod;
now_uframe++;
now_uframe %= mod;
}
- }
+ }
}