X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fusb%2Fcore%2Furb.c;h=9864988377c74c11c052336a2065be1bdf8c54c7;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=777f34ea653b6b43aa79dfc4a2c5af3fc953d6d2;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 777f34ea6..986498837 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -1,15 +1,8 @@ -#include #include #include #include #include #include - -#ifdef CONFIG_USB_DEBUG - #define DEBUG -#else - #undef DEBUG -#endif #include #include "hcd.h" @@ -39,7 +32,7 @@ void usb_init_urb(struct urb *urb) { if (urb) { memset(urb, 0, sizeof(*urb)); - kref_init(&urb->kref, urb_destroy); + kref_init(&urb->kref); spin_lock_init(&urb->lock); } } @@ -60,7 +53,7 @@ void usb_init_urb(struct urb *urb) * * The driver must call usb_free_urb() when it is finished with the urb. */ -struct urb *usb_alloc_urb(int iso_packets, int mem_flags) +struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; @@ -88,7 +81,7 @@ struct urb *usb_alloc_urb(int iso_packets, int mem_flags) void usb_free_urb(struct urb *urb) { if (urb) - kref_put(&urb->kref); + kref_put(&urb->kref, urb_destroy); } /** @@ -121,7 +114,7 @@ struct urb * usb_get_urb(struct urb *urb) * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink - * (a software-induced fault, also called "request cancelation"). + * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * @@ -170,7 +163,7 @@ struct urb * usb_get_urb(struct urb *urb) * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup - * after faults (transfer errors or cancelation). + * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * @@ -224,7 +217,7 @@ struct urb * usb_get_urb(struct urb *urb) * GFP_NOIO, unless b) or c) apply * */ -int usb_submit_urb(struct urb *urb, int mem_flags) +int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int pipe, temp, max; struct usb_device *dev; @@ -237,7 +230,8 @@ int usb_submit_urb(struct urb *urb, int mem_flags) (dev->state < USB_STATE_DEFAULT) || (!dev->bus) || (dev->devnum <= 0)) return -ENODEV; - if (dev->state == USB_STATE_SUSPENDED) + if (dev->bus->controller->power.power_state.event != PM_EVENT_ON + || dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (!(op = dev->bus->op) || !op->submit_urb) return -ENODEV; @@ -256,13 +250,6 @@ int usb_submit_urb(struct urb *urb, int mem_flags) if (!usb_pipecontrol (pipe) && dev->state < USB_STATE_CONFIGURED) return -ENODEV; - /* (actually HCDs may need to duplicate this, endpoint might yet - * stall due to queued bulk/intr transactions that complete after - * we check) - */ - if (usb_endpoint_halted (dev, usb_pipeendpoint (pipe), is_out)) - return -EPIPE; - /* FIXME there should be a sharable lock protecting us against * config/altsetting changes and disconnects, kicking in here. * (here == before maxpacket, and eventually endpoint type, @@ -271,11 +258,10 @@ int usb_submit_urb(struct urb *urb, int mem_flags) max = usb_maxpacket (dev, pipe, is_out); if (max <= 0) { - dbg ("%s: bogus endpoint %d-%s on usb-%s-%s (bad maxpacket %d)", - __FUNCTION__, - usb_pipeendpoint (pipe), is_out ? "OUT" : "IN", - dev->bus->bus_name, dev->devpath, - max); + dev_dbg(&dev->dev, + "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", + usb_pipeendpoint (pipe), is_out ? "out" : "in", + __FUNCTION__, max); return -EMSGSIZE; } @@ -317,9 +303,8 @@ int usb_submit_urb(struct urb *urb, int mem_flags) unsigned int allowed; /* enforce simple/standard policy */ - allowed = URB_ASYNC_UNLINK; // affects later unlinks - allowed |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); - allowed |= URB_NO_INTERRUPT; + allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | + URB_NO_INTERRUPT); switch (temp) { case PIPE_BULK: if (is_out) @@ -403,19 +388,13 @@ int usb_submit_urb(struct urb *urb, int mem_flags) * * This routine cancels an in-progress request. URBs complete only * once per submission, and may be canceled only once per submission. - * Successful cancelation means the requests's completion handler will + * Successful cancellation means the requests's completion handler will * be called with a status code indicating that the request has been * canceled (rather than any other code) and will quickly be removed * from host controller data structures. * - * In the past, clearing the URB_ASYNC_UNLINK transfer flag for the - * URB indicated that the request was synchronous. This usage is now - * deprecated; if the flag is clear the call will be forwarded to - * usb_kill_urb() and the return value will be 0. In the future, drivers - * should call usb_kill_urb() directly for synchronous unlinking. - * - * When the URB_ASYNC_UNLINK transfer flag for the URB is set, this - * request is asynchronous. Success is indicated by returning -EINPROGRESS, + * This request is always asynchronous. + * Success is indicated by returning -EINPROGRESS, * at which time the URB will normally have been unlinked but not yet * given back to the device driver. When it is called, the completion * function will see urb->status == -ECONNRESET. Failure is indicated @@ -428,12 +407,16 @@ int usb_submit_urb(struct urb *urb, int mem_flags) * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller - * hardware processes each request. But when an URB terminates with any - * fault (such as an error, or being unlinked) its queue stops, at least - * until that URB's completion routine returns. It is guaranteed that - * the queue will not restart until all its unlinked URBs have been fully - * retired, with their completion routines run, even if that's not until - * some time after the original completion handler returns. + * hardware processes each request. But when an URB terminates with an + * error its queue stops, at least until that URB's completion routine + * returns. It is guaranteed that the queue will not restart until all + * its unlinked URBs have been fully retired, with their completion + * routines run, even if that's not until some time after the original + * completion handler returns. Normally the same behavior and guarantees + * apply when an URB terminates because it was unlinked; however if an + * URB is unlinked before the hardware has started to execute it, then + * its queue is not guaranteed to stop until all the preceding URBs have + * completed. * * This means that USB device drivers can safely build deep queues for * large or complex transfers, and clean them up reliably after any sort @@ -457,10 +440,6 @@ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; - if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) { - usb_kill_urb(urb); - return 0; - } if (!(urb->dev && urb->dev->bus && urb->dev->bus->op)) return -ENODEV; return urb->dev->bus->op->unlink_urb(urb, -ECONNRESET); @@ -488,6 +467,7 @@ int usb_unlink_urb(struct urb *urb) */ void usb_kill_urb(struct urb *urb) { + might_sleep(); if (!(urb && urb->dev && urb->dev->bus && urb->dev->bus->op)) return; spin_lock_irq(&urb->lock);