{
int ret = 1;
- BUG_ON(!(rq->flags & REQ_STARTED));
-
/*
* if failfast is set on a request, override number of sectors and
* complete the whole request right now
if (!end_that_request_first(rq, uptodate, nr_sectors)) {
add_disk_randomness(rq->rq_disk);
- blkdev_dequeue_request(rq);
+ if (!list_empty(&rq->queuelist))
+ blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
end_that_request_last(rq, uptodate);
ret = 0;
ide_pm_flush_cache = ide_pm_state_start_suspend,
idedisk_pm_standby,
- idedisk_pm_idle = ide_pm_state_start_resume,
+ idedisk_pm_restore_pio = ide_pm_state_start_resume,
+ idedisk_pm_idle,
ide_pm_restore_dma,
};
static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
{
- struct request_pm_state *pm = rq->end_io_data;
+ struct request_pm_state *pm = rq->data;
if (drive->media != ide_disk)
return;
case idedisk_pm_standby: /* Suspend step 2 (standby) complete */
pm->pm_step = ide_pm_state_completed;
break;
- case idedisk_pm_idle: /* Resume step 1 (idle) complete */
+ case idedisk_pm_restore_pio: /* Resume step 1 complete */
+ pm->pm_step = idedisk_pm_idle;
+ break;
+ case idedisk_pm_idle: /* Resume step 2 (idle) complete */
pm->pm_step = ide_pm_restore_dma;
break;
}
static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->end_io_data;
+ struct request_pm_state *pm = rq->data;
ide_task_t *args = rq->special;
memset(args, 0, sizeof(*args));
if (drive->media != ide_disk) {
- /* skip idedisk_pm_idle for ATAPI devices */
- if (pm->pm_step == idedisk_pm_idle)
+ /*
+ * skip idedisk_pm_restore_pio and idedisk_pm_idle for ATAPI
+ * devices
+ */
+ if (pm->pm_step == idedisk_pm_restore_pio)
pm->pm_step = ide_pm_restore_dma;
}
args->handler = &task_no_data_intr;
return do_rw_taskfile(drive, args);
- case idedisk_pm_idle: /* Resume step 1 (idle) */
+ case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */
+ if (drive->hwif->tuneproc != NULL)
+ drive->hwif->tuneproc(drive, 255);
+ ide_complete_power_step(drive, rq, 0, 0);
+ return ide_stopped;
+
+ case idedisk_pm_idle: /* Resume step 2 (idle) */
args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE;
args->command_type = IDE_DRIVE_TASK_NO_DATA;
args->handler = task_no_data_intr;
return do_rw_taskfile(drive, args);
- case ide_pm_restore_dma: /* Resume step 2 (restore DMA) */
+ case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */
/*
* Right now, all we do is call hwif->ide_dma_check(drive),
* we could be smarter and check for current xfer_speed
spin_lock_irqsave(&ide_lock, flags);
- BUG_ON(!(rq->flags & REQ_STARTED));
+ BUG_ON(!blk_rq_started(rq));
/*
* if failfast is set on a request, override number of sectors and
rq = HWGROUP(drive)->rq;
spin_unlock_irqrestore(&ide_lock, flags);
- if (rq->flags & REQ_DRIVE_CMD) {
+ if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
u8 *args = (u8 *) rq->buffer;
if (rq->errors == 0)
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
args[1] = err;
args[2] = hwif->INB(IDE_NSECTOR_REG);
}
- } else if (rq->flags & REQ_DRIVE_TASK) {
+ } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
u8 *args = (u8 *) rq->buffer;
if (rq->errors == 0)
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
args[5] = hwif->INB(IDE_HCYL_REG);
args[6] = hwif->INB(IDE_SELECT_REG);
}
- } else if (rq->flags & REQ_DRIVE_TASKFILE) {
+ } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *args = (ide_task_t *) rq->special;
if (rq->errors == 0)
rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
}
}
} else if (blk_pm_request(rq)) {
- struct request_pm_state *pm = rq->end_io_data;
+ struct request_pm_state *pm = rq->data;
#ifdef DEBUG_PM
printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
drive->name, rq->pm->pm_step, stat, err);
if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0)
try_to_flush_leftover_data(drive);
+ if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
+ ide_kill_rq(drive, rq);
+ return ide_stopped;
+ }
+
if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
- /* force an abort */
- hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
+ rq->errors |= ERROR_RESET;
- if (rq->errors >= ERROR_MAX || blk_noretry_request(rq))
- ide_kill_rq(drive, rq);
- else {
- if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
- ++rq->errors;
- return ide_do_reset(drive);
- }
- if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
- drive->special.b.recalibrate = 1;
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
++rq->errors;
+ return ide_do_reset(drive);
}
+
+ if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+
+ ++rq->errors;
+
return ide_stopped;
}
return ide_stopped;
/* retry only "normal" I/O: */
- if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) {
+ if (!blk_fs_request(rq)) {
rq->errors = 1;
ide_end_drive_cmd(drive, stat, err);
return ide_stopped;
return ide_stopped;
/* retry only "normal" I/O: */
- if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK | REQ_DRIVE_TASKFILE)) {
+ if (!blk_fs_request(rq)) {
rq->errors = 1;
ide_end_drive_cmd(drive, BUSY_STAT, 0);
return ide_stopped;
if (hwif->sg_mapped) /* needed by ide-scsi */
return;
- if ((rq->flags & REQ_DRIVE_TASKFILE) == 0) {
+ if (rq->cmd_type != REQ_TYPE_ATA_TASKFILE) {
hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
} else {
sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
struct request *rq)
{
ide_hwif_t *hwif = HWIF(drive);
- if (rq->flags & REQ_DRIVE_TASKFILE) {
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *args = rq->special;
if (!args)
if (args->tf_out_flags.all != 0)
return flagged_taskfile(drive, args);
return do_rw_taskfile(drive, args);
- } else if (rq->flags & REQ_DRIVE_TASK) {
+ } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
u8 *args = rq->buffer;
u8 sel;
hwif->OUTB(sel, IDE_SELECT_REG);
ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
return ide_started;
- } else if (rq->flags & REQ_DRIVE_CMD) {
+ } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
u8 *args = rq->buffer;
if (!args)
static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
- struct request_pm_state *pm = rq->end_io_data;
+ struct request_pm_state *pm = rq->data;
if (blk_pm_suspend_request(rq) &&
pm->pm_step == ide_pm_state_start_suspend)
ide_startstop_t startstop;
sector_t block;
- BUG_ON(!(rq->flags & REQ_STARTED));
+ BUG_ON(!blk_rq_started(rq));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
if (!drive->special.all) {
ide_driver_t *drv;
- if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK))
- return execute_drive_cmd(drive, rq);
- else if (rq->flags & REQ_DRIVE_TASKFILE)
+ /*
+ * We reset the drive so we need to issue a SETFEATURES.
+ * Do it _after_ do_special() restored device parameters.
+ */
+ if (drive->current_speed == 0xff)
+ ide_config_drive_speed(drive, drive->desired_speed);
+
+ if (rq->cmd_type == REQ_TYPE_ATA_CMD ||
+ rq->cmd_type == REQ_TYPE_ATA_TASK ||
+ rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
return execute_drive_cmd(drive, rq);
else if (blk_pm_request(rq)) {
- struct request_pm_state *pm = rq->end_io_data;
+ struct request_pm_state *pm = rq->data;
#ifdef DEBUG_PM
printk("%s: start_power_step(step: %d)\n",
drive->name, rq->pm->pm_step);
* We count how many times we loop here to make sure we service
* all drives in the hwgroup without looping for ever
*/
- if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) {
+ if (drive->blocked && !blk_pm_request(rq) && !(rq->cmd_flags & REQ_PREEMPT)) {
drive = drive->next ? drive->next : hwgroup->drive;
if (loops++ < 4 && !blk_queue_plugged(drive->queue))
goto again;
* make sure request is sane
*/
rq = HWGROUP(drive)->rq;
+
+ if (!rq)
+ goto out;
+
HWGROUP(drive)->rq = NULL;
rq->errors = 0;
* on the hwgroup and the process begins again.
*/
-irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
+irqreturn_t ide_intr (int irq, void *dev_id)
{
unsigned long flags;
ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
del_timer(&hwgroup->timer);
spin_unlock(&ide_lock);
+ /* Some controllers might set DMA INTR no matter DMA or PIO;
+ * bmdma status might need to be cleared even for
+ * PIO interrupts to prevent spurious/lost irq.
+ */
+ if (hwif->ide_dma_clear_irq && !(drive->waiting_for_dma))
+ /* ide_dma_end() needs bmdma status for error checking.
+ * So, skip clearing bmdma status here and leave it
+ * to ide_dma_end() if this is dma interrupt.
+ */
+ hwif->ide_dma_clear_irq(drive);
+
if (drive->unmask)
local_irq_enable_in_hardirq();
/* service this interrupt, may set handler for next interrupt */
void ide_init_drive_cmd (struct request *rq)
{
memset(rq, 0, sizeof(*rq));
- rq->flags = REQ_DRIVE_CMD;
+ rq->cmd_type = REQ_TYPE_ATA_CMD;
rq->ref_count = 1;
}
int must_wait = (action == ide_wait || action == ide_head_wait);
rq->errors = 0;
- rq->rq_status = RQ_ACTIVE;
/*
* we need to hold an extra reference to request for safe inspection
*/
if (must_wait) {
rq->ref_count++;
- rq->waiting = &wait;
+ rq->end_io_data = &wait;
rq->end_io = blk_end_sync_rq;
}
hwgroup->rq = NULL;
if (action == ide_preempt || action == ide_head_wait) {
where = ELEVATOR_INSERT_FRONT;
- rq->flags |= REQ_PREEMPT;
+ rq->cmd_flags |= REQ_PREEMPT;
}
__elv_add_request(drive->queue, rq, where, 0);
ide_do_request(hwgroup, IDE_NO_IRQ);
err = 0;
if (must_wait) {
wait_for_completion(&wait);
- rq->waiting = NULL;
if (rq->errors)
err = -EIO;