#include <linux/notifier.h>
#include <linux/cpu.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
-#include "scsi.h"
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_request.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
* output in scsi_log_completion.
*/
printk(" ");
- print_command(cmd->cmnd);
+ scsi_print_command(cmd);
if (level > 3) {
printk(KERN_INFO "buffer = 0x%p, bufflen = %d,"
" done = 0x%p, queuecommand 0x%p\n",
printk("UNKNOWN");
}
printk(" %8x ", cmd->result);
- print_command(cmd->cmnd);
+ scsi_print_command(cmd);
if (status_byte(cmd->result) & CHECK_CONDITION) {
/*
* XXX The print_sense formatting/prefix
* doesn't match this function.
*/
- print_sense("", cmd);
+ scsi_print_sense("", cmd);
}
if (level > 3) {
printk(KERN_INFO "scsi host busy %d failed %d\n",
*/
void scsi_done(struct scsi_cmnd *cmd)
{
- unsigned long flags;
-
/*
* We don't have to worry about this one timing out any more.
* If we are unable to remove the timer, then the command
*/
if (!scsi_delete_timer(cmd))
return;
+ __scsi_done(cmd);
+}
+
+/* Private entry to scsi_done() to complete a command when the timer
+ * isn't running --- used by scsi_times_out */
+void __scsi_done(struct scsi_cmnd *cmd)
+{
+ unsigned long flags;
/*
* Set the serial numbers back to zero
*/
if (tags <= 0)
return;
- /*
- * Limit max queue depth on a single lun to 256 for now. Remember,
- * we allocate a struct scsi_command for each of these and keep it
- * around forever. Too deep of a depth just wastes memory.
- */
- if (tags > 256)
- return;
spin_lock_irqsave(&device_request_lock, flags);
+ spin_lock(sdev->request_queue->queue_lock);
+
+ /* Check to see if the queue is managed by the block layer
+ * if it is, and we fail to adjust the depth, exit */
+ if (blk_queue_tagged(sdev->request_queue) &&
+ blk_queue_resize_tags(sdev->request_queue, tags) != 0)
+ goto out;
+
sdev->queue_depth = tags;
switch (tagged) {
case MSG_ORDERED_TAG:
sdev->queue_depth = tags;
break;
}
+ out:
+ spin_unlock(sdev->request_queue->queue_lock);
spin_unlock_irqrestore(&device_request_lock, flags);
}
*/
int scsi_device_get(struct scsi_device *sdev)
{
- if(!sdev)
- return -ENXIO;
if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL)
return -ENXIO;
if (!get_device(&sdev->sdev_gendev))