+/*
+ * Function: scsi_allocate_request
+ *
+ * Purpose: Allocate a request descriptor.
+ *
+ * Arguments: device - device for which we want a request
+ * gfp_mask - allocation flags passed to kmalloc
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ *
+ * Returns: Pointer to request block.
+ */
+struct scsi_request *scsi_allocate_request(struct scsi_device *sdev,
+ gfp_t gfp_mask)
+{
+ const int offset = ALIGN(sizeof(struct scsi_request), 4);
+ const int size = offset + sizeof(struct request);
+ struct scsi_request *sreq;
+
+ sreq = kzalloc(size, gfp_mask);
+ if (likely(sreq != NULL)) {
+ sreq->sr_request = (struct request *)(((char *)sreq) + offset);
+ sreq->sr_device = sdev;
+ sreq->sr_host = sdev->host;
+ sreq->sr_magic = SCSI_REQ_MAGIC;
+ sreq->sr_data_direction = DMA_BIDIRECTIONAL;
+ }
+
+ return sreq;
+}
+EXPORT_SYMBOL(scsi_allocate_request);
+
+void __scsi_release_request(struct scsi_request *sreq)
+{
+ struct request *req = sreq->sr_request;
+
+ /* unlikely because the tag was usually ended earlier by the
+ * mid-layer. However, for layering reasons ULD's don't end
+ * the tag of commands they generate. */
+ if (unlikely(blk_rq_tagged(req))) {
+ unsigned long flags;
+ struct request_queue *q = req->q;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_queue_end_tag(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+
+ if (likely(sreq->sr_command != NULL)) {
+ struct scsi_cmnd *cmd = sreq->sr_command;
+
+ sreq->sr_command = NULL;
+ scsi_next_command(cmd);
+ }
+}
+
+/*
+ * Function: scsi_release_request
+ *
+ * Purpose: Release a request descriptor.
+ *
+ * Arguments: sreq - request to release
+ *
+ * Lock status: No locks assumed to be held. This function is SMP-safe.
+ */
+void scsi_release_request(struct scsi_request *sreq)
+{
+ __scsi_release_request(sreq);
+ kfree(sreq);
+}
+EXPORT_SYMBOL(scsi_release_request);
+