X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fusb%2Fcore%2Furb.c;h=9801d08edacf8db26056175116d28df45af985ed;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=3c14361bbeb38d2ccc4cdf55307d58191d7ce109;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 3c14361bb..9801d08ed 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -1,15 +1,8 @@ -#include #include #include #include #include #include - -#ifdef CONFIG_USB_DEBUG - #define DEBUG -#else - #undef DEBUG -#endif #include #include "hcd.h" @@ -60,11 +53,11 @@ void usb_init_urb(struct urb *urb) * * The driver must call usb_free_urb() when it is finished with the urb. */ -struct urb *usb_alloc_urb(int iso_packets, int mem_flags) +struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; - urb = (struct urb *)kmalloc(sizeof(struct urb) + + urb = kmalloc(sizeof(struct urb) + iso_packets * sizeof(struct usb_iso_packet_descriptor), mem_flags); if (!urb) { @@ -121,7 +114,7 @@ struct urb * usb_get_urb(struct urb *urb) * describing that request to the USB subsystem. Request completion will * be indicated later, asynchronously, by calling the completion handler. * The three types of completion are success, error, and unlink - * (a software-induced fault, also called "request cancelation"). + * (a software-induced fault, also called "request cancellation"). * * URBs may be submitted in interrupt context. * @@ -170,7 +163,7 @@ struct urb * usb_get_urb(struct urb *urb) * As of Linux 2.6, all USB endpoint transfer queues support depths greater * than one. This was previously a HCD-specific behavior, except for ISO * transfers. Non-isochronous endpoint queues are inactive during cleanup - * after faults (transfer errors or cancelation). + * after faults (transfer errors or cancellation). * * Reserved Bandwidth Transfers: * @@ -224,11 +217,10 @@ struct urb * usb_get_urb(struct urb *urb) * GFP_NOIO, unless b) or c) apply * */ -int usb_submit_urb(struct urb *urb, int mem_flags) +int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int pipe, temp, max; struct usb_device *dev; - struct usb_operations *op; int is_out; if (!urb || urb->hcpriv || !urb->complete) @@ -237,10 +229,9 @@ int usb_submit_urb(struct urb *urb, int mem_flags) (dev->state < USB_STATE_DEFAULT) || (!dev->bus) || (dev->devnum <= 0)) return -ENODEV; - if (dev->state == USB_STATE_SUSPENDED) + if (dev->bus->controller->power.power_state.event != PM_EVENT_ON + || dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; - if (!(op = dev->bus->op) || !op->submit_urb) - return -ENODEV; urb->status = -EINPROGRESS; urb->actual_length = 0; @@ -264,11 +255,10 @@ int usb_submit_urb(struct urb *urb, int mem_flags) max = usb_maxpacket (dev, pipe, is_out); if (max <= 0) { - dbg ("%s: bogus endpoint %d-%s on usb-%s-%s (bad maxpacket %d)", - __FUNCTION__, - usb_pipeendpoint (pipe), is_out ? "OUT" : "IN", - dev->bus->bus_name, dev->devpath, - max); + dev_dbg(&dev->dev, + "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n", + usb_pipeendpoint (pipe), is_out ? "out" : "in", + __FUNCTION__, max); return -EMSGSIZE; } @@ -310,9 +300,8 @@ int usb_submit_urb(struct urb *urb, int mem_flags) unsigned int allowed; /* enforce simple/standard policy */ - allowed = URB_ASYNC_UNLINK; // affects later unlinks - allowed |= (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP); - allowed |= URB_NO_INTERRUPT; + allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | + URB_NO_INTERRUPT); switch (temp) { case PIPE_BULK: if (is_out) @@ -384,7 +373,7 @@ int usb_submit_urb(struct urb *urb, int mem_flags) urb->interval = temp; } - return op->submit_urb (urb, mem_flags); + return usb_hcd_submit_urb (urb, mem_flags); } /*-------------------------------------------------------------------*/ @@ -396,19 +385,13 @@ int usb_submit_urb(struct urb *urb, int mem_flags) * * This routine cancels an in-progress request. URBs complete only * once per submission, and may be canceled only once per submission. - * Successful cancelation means the requests's completion handler will + * Successful cancellation means the requests's completion handler will * be called with a status code indicating that the request has been * canceled (rather than any other code) and will quickly be removed * from host controller data structures. * - * In the past, clearing the URB_ASYNC_UNLINK transfer flag for the - * URB indicated that the request was synchronous. This usage is now - * deprecated; if the flag is clear the call will be forwarded to - * usb_kill_urb() and the return value will be 0. In the future, drivers - * should call usb_kill_urb() directly for synchronous unlinking. - * - * When the URB_ASYNC_UNLINK transfer flag for the URB is set, this - * request is asynchronous. Success is indicated by returning -EINPROGRESS, + * This request is always asynchronous. + * Success is indicated by returning -EINPROGRESS, * at which time the URB will normally have been unlinked but not yet * given back to the device driver. When it is called, the completion * function will see urb->status == -ECONNRESET. Failure is indicated @@ -421,12 +404,16 @@ int usb_submit_urb(struct urb *urb, int mem_flags) * * Host Controller Drivers (HCDs) place all the URBs for a particular * endpoint in a queue. Normally the queue advances as the controller - * hardware processes each request. But when an URB terminates with any - * fault (such as an error, or being unlinked) its queue stops, at least - * until that URB's completion routine returns. It is guaranteed that - * the queue will not restart until all its unlinked URBs have been fully - * retired, with their completion routines run, even if that's not until - * some time after the original completion handler returns. + * hardware processes each request. But when an URB terminates with an + * error its queue stops, at least until that URB's completion routine + * returns. It is guaranteed that the queue will not restart until all + * its unlinked URBs have been fully retired, with their completion + * routines run, even if that's not until some time after the original + * completion handler returns. Normally the same behavior and guarantees + * apply when an URB terminates because it was unlinked; however if an + * URB is unlinked before the hardware has started to execute it, then + * its queue is not guaranteed to stop until all the preceding URBs have + * completed. * * This means that USB device drivers can safely build deep queues for * large or complex transfers, and clean them up reliably after any sort @@ -450,13 +437,9 @@ int usb_unlink_urb(struct urb *urb) { if (!urb) return -EINVAL; - if (!(urb->transfer_flags & URB_ASYNC_UNLINK)) { - usb_kill_urb(urb); - return 0; - } - if (!(urb->dev && urb->dev->bus && urb->dev->bus->op)) + if (!(urb->dev && urb->dev->bus)) return -ENODEV; - return urb->dev->bus->op->unlink_urb(urb, -ECONNRESET); + return usb_hcd_unlink_urb(urb, -ECONNRESET); } /** @@ -481,13 +464,14 @@ int usb_unlink_urb(struct urb *urb) */ void usb_kill_urb(struct urb *urb) { - if (!(urb && urb->dev && urb->dev->bus && urb->dev->bus->op)) + might_sleep(); + if (!(urb && urb->dev && urb->dev->bus)) return; spin_lock_irq(&urb->lock); ++urb->reject; spin_unlock_irq(&urb->lock); - urb->dev->bus->op->unlink_urb(urb, -ENOENT); + usb_hcd_unlink_urb(urb, -ENOENT); wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0); spin_lock_irq(&urb->lock);