/* used when updating hcd data */
static spinlock_t hcd_data_lock = SPIN_LOCK_UNLOCKED;
-/* wait queue for synchronous unlinks */
-DECLARE_WAIT_QUEUE_HEAD(usb_kill_urb_queue);
-
/*-------------------------------------------------------------------------*/
/*
{
struct usb_ctrlrequest *cmd;
u16 typeReq, wValue, wIndex, wLength;
- const u8 *bufp = NULL;
+ const u8 *bufp = 0;
u8 *ubuf = urb->transfer_buffer;
int len = 0;
int patch_wakeup = 0;
hcd->rh_timer.data = 0;
urb->actual_length = length;
urb->status = 0;
- urb->hcpriv = NULL;
+ urb->hcpriv = 0;
} else
mod_timer (&hcd->rh_timer, jiffies + HZ/4);
spin_unlock (&hcd_data_lock);
/*-------------------------------------------------------------------------*/
-int usb_rh_status_dequeue (struct usb_hcd *hcd, struct urb *urb)
+void usb_rh_status_dequeue (struct usb_hcd *hcd, struct urb *urb)
{
unsigned long flags;
hcd->rh_timer.data = 0;
local_irq_save (flags);
- urb->hcpriv = NULL;
+ urb->hcpriv = 0;
usb_hcd_giveback_urb (hcd, urb, NULL);
local_irq_restore (flags);
- return 0;
}
/*-------------------------------------------------------------------------*/
.release = &usb_host_release,
};
-int usb_host_init(void)
+void usb_host_init(void)
{
- return class_register(&usb_host_class);
+ class_register(&usb_host_class);
}
void usb_host_cleanup(void)
static void urb_unlink (struct urb *urb)
{
unsigned long flags;
+ struct usb_device *dev;
/* Release any periodic transfer bandwidth */
if (urb->bandwidth)
spin_lock_irqsave (&hcd_data_lock, flags);
list_del_init (&urb->urb_list);
+ dev = urb->dev;
spin_unlock_irqrestore (&hcd_data_lock, flags);
- usb_put_dev (urb->dev);
+ usb_put_dev (dev);
}
// FIXME: verify that quiescing hc works right (RH cleans up)
spin_lock_irqsave (&hcd_data_lock, flags);
- if (unlikely (urb->reject))
- status = -EPERM;
- else if (HCD_IS_RUNNING (hcd->state) &&
- hcd->state != USB_STATE_QUIESCING) {
+ if (HCD_IS_RUNNING (hcd->state) && hcd->state != USB_STATE_QUIESCING) {
usb_get_dev (urb->dev);
list_add_tail (&urb->urb_list, &dev->urb_list);
status = 0;
- } else
+ } else {
+ INIT_LIST_HEAD (&urb->urb_list);
status = -ESHUTDOWN;
+ }
spin_unlock_irqrestore (&hcd_data_lock, flags);
- if (status) {
- INIT_LIST_HEAD (&urb->urb_list);
+ if (status)
return status;
- }
/* increment urb's reference count as part of giving it to the HCD
* (which now controls it). HCD guarantees that it either returns
* an error or calls giveback(), but not both.
*/
urb = usb_get_urb (urb);
- atomic_inc (&urb->use_count);
-
if (urb->dev == hcd->self.root_hub) {
/* NOTE: requirement on hub callers (usbfs and the hub
* driver, for now) that URBs' urb->transfer_buffer be
status = hcd->driver->urb_enqueue (hcd, urb, mem_flags);
done:
- if (unlikely (status)) {
- urb_unlink (urb);
- atomic_dec (&urb->use_count);
- if (urb->reject)
- wake_up (&usb_kill_urb_queue);
+ if (status) {
usb_put_urb (urb);
+ urb_unlink (urb);
}
return status;
}
* soon as practical. we've already set up the urb's return status,
* but we can't know if the callback completed already.
*/
-static int
+static void
unlink1 (struct usb_hcd *hcd, struct urb *urb)
{
- int value;
-
if (urb == (struct urb *) hcd->rh_timer.data)
- value = usb_rh_status_dequeue (hcd, urb);
+ usb_rh_status_dequeue (hcd, urb);
else {
+ int value;
- /* The only reason an HCD might fail this call is if
- * it has not yet fully queued the urb to begin with.
- * Such failures should be harmless. */
+ /* failures "should" be harmless */
value = hcd->driver->urb_dequeue (hcd, urb);
+ if (value != 0)
+ dev_dbg (hcd->self.controller,
+ "dequeue %p --> %d\n",
+ urb, value);
}
+}
- if (value != 0)
- dev_dbg (hcd->self.controller, "dequeue %p --> %d\n",
- urb, value);
- return value;
+struct completion_splice { // modified urb context:
+ /* did we complete? */
+ struct completion done;
+
+ /* original urb data */
+ usb_complete_t complete;
+ void *context;
+};
+
+static void unlink_complete (struct urb *urb, struct pt_regs *regs)
+{
+ struct completion_splice *splice;
+
+ splice = (struct completion_splice *) urb->context;
+
+ /* issue original completion call */
+ urb->complete = splice->complete;
+ urb->context = splice->context;
+ urb->complete (urb, regs);
+
+ /* then let the synchronous unlink call complete */
+ complete (&splice->done);
}
/*
- * called in any context
+ * called in any context; note ASYNC_UNLINK restrictions
*
* caller guarantees urb won't be recycled till both unlink()
* and the urb's completion function return
*/
-static int hcd_unlink_urb (struct urb *urb, int status)
+static int hcd_unlink_urb (struct urb *urb)
{
struct hcd_dev *dev;
- struct usb_hcd *hcd = NULL;
- struct device *sys = NULL;
+ struct usb_hcd *hcd = 0;
+ struct device *sys = 0;
unsigned long flags;
+ struct completion_splice splice;
struct list_head *tmp;
int retval;
/* Any status except -EINPROGRESS means something already started to
* unlink this URB from the hardware. So there's no more work to do.
+ *
+ * FIXME use better explicit urb state
*/
if (urb->status != -EINPROGRESS) {
retval = -EBUSY;
hcd->saw_irq = 1;
}
- urb->status = status;
-
+ /* maybe set up to block until the urb's completion fires. the
+ * lower level hcd code is always async, locking on urb->status
+ * updates; an intercepted completion unblocks us.
+ */
+ if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) {
+ if (in_interrupt ()) {
+ dev_dbg (hcd->self.controller,
+ "non-async unlink in_interrupt");
+ retval = -EWOULDBLOCK;
+ goto done;
+ }
+ /* synchronous unlink: block till we see the completion */
+ init_completion (&splice.done);
+ splice.complete = urb->complete;
+ splice.context = urb->context;
+ urb->complete = unlink_complete;
+ urb->context = &splice;
+ urb->status = -ENOENT;
+ } else {
+ /* asynchronous unlink */
+ urb->status = -ECONNRESET;
+ }
spin_unlock (&hcd_data_lock);
spin_unlock_irqrestore (&urb->lock, flags);
- retval = unlink1 (hcd, urb);
- if (retval == 0)
- retval = -EINPROGRESS;
- return retval;
+ // FIXME remove splicing, so this becomes unlink1 (hcd, urb);
+ if (urb == (struct urb *) hcd->rh_timer.data) {
+ usb_rh_status_dequeue (hcd, urb);
+ retval = 0;
+ } else {
+ retval = hcd->driver->urb_dequeue (hcd, urb);
+
+ /* hcds shouldn't really fail these calls, but... */
+ if (retval) {
+ dev_dbg (sys, "dequeue %p --> %d\n", urb, retval);
+ if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) {
+ spin_lock_irqsave (&urb->lock, flags);
+ urb->complete = splice.complete;
+ urb->context = splice.context;
+ spin_unlock_irqrestore (&urb->lock, flags);
+ }
+ goto bye;
+ }
+ }
+
+ /* block till giveback, if needed */
+ if (urb->transfer_flags & URB_ASYNC_UNLINK)
+ return -EINPROGRESS;
+
+ wait_for_completion (&splice.done);
+ return 0;
done:
spin_unlock (&hcd_data_lock);
spin_unlock_irqrestore (&urb->lock, flags);
+bye:
if (retval != -EIDRM && sys && sys->driver)
dev_dbg (sys, "hcd_unlink_urb %p fail %d\n", urb, retval);
return retval;
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_SUSPEND
-
-static int hcd_hub_suspend (struct usb_bus *bus)
-{
- struct usb_hcd *hcd;
-
- hcd = container_of (bus, struct usb_hcd, self);
- if (hcd->driver->hub_suspend)
- return hcd->driver->hub_suspend (hcd);
- return 0;
-}
-
-static int hcd_hub_resume (struct usb_bus *bus)
-{
- struct usb_hcd *hcd;
-
- hcd = container_of (bus, struct usb_hcd, self);
- if (hcd->driver->hub_resume)
- return hcd->driver->hub_resume (hcd);
- return 0;
-}
-
-#endif
-
-/*-------------------------------------------------------------------------*/
-
/* called by khubd, rmmod, apmd, or other thread for hcd-private cleanup.
* we're guaranteed that the device is fully quiesced. also, that each
* endpoint has been hcd_endpoint_disabled.
.buffer_alloc = hcd_buffer_alloc,
.buffer_free = hcd_buffer_free,
.disable = hcd_endpoint_disable,
-#ifdef CONFIG_USB_SUSPEND
- .hub_suspend = hcd_hub_suspend,
- .hub_resume = hcd_hub_resume,
-#endif
};
EXPORT_SYMBOL (usb_hcd_operations);
/* pass ownership to the completion handler */
urb->complete (urb, regs);
- atomic_dec (&urb->use_count);
- if (unlikely (urb->reject))
- wake_up (&usb_kill_urb_queue);
usb_put_urb (urb);
}
EXPORT_SYMBOL (usb_hcd_giveback_urb);