*/
static int i2o_block_device_flush(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
+ struct i2o_message *msg;
u32 m;
m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
*/
static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
+ struct i2o_message *msg;
u32 m;
m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
*/
static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
+ struct i2o_message *msg;
u32 m;
m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
*/
static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
+ struct i2o_message *msg;
u32 m;
m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
{
struct i2o_device *i2o_dev = dev->i2o_dev;
struct i2o_controller *c = i2o_dev->iop;
- struct i2o_message __iomem *msg;
+ struct i2o_message *msg;
u32 m;
int rc;
unsigned long flags;
/* FAILed message */
- if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
+ if (unlikely(readl(&msg->u.head[0]) & (1 << 13))) {
struct i2o_message *pmsg;
u32 pm;
+ printk(KERN_WARNING "FAIL");
/*
* FAILed message from controller
* We increment the error count and abort it
* better be on the safe side since no one really follows
* the spec to the book :)
*/
- pm = le32_to_cpu(msg->body[3]);
- pmsg = i2o_msg_in_to_virt(c, pm);
+ pm = readl(&msg->body[3]);
+ pmsg = c->in_queue.virt + pm;
- req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
+ req = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
if (unlikely(!req)) {
printk(KERN_ERR "block-osm: NULL reply received!\n");
return -1;
spin_lock_irqsave(q->queue_lock, flags);
while (end_that_request_chunk(req, !req->errors,
- le32_to_cpu(pmsg->body[1]))) ;
+ readl(&pmsg->body[1]))) ;
end_that_request_last(req);
dev->open_queue_depth--;
return -1;
}
- req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
+ req = i2o_cntxt_list_get(c, readl(&msg->u.s.tcntxt));
if (unlikely(!req)) {
printk(KERN_ERR "block-osm: NULL reply received!\n");
return -1;
"I2O Block: Data transfer to deleted device!\n");
spin_lock_irqsave(q->queue_lock, flags);
while (end_that_request_chunk
- (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
+ (req, !req->errors, readl(&msg->body[1]))) ;
end_that_request_last(req);
dev->open_queue_depth--;
* request in the context.
*/
- st = le32_to_cpu(msg->body[0]) >> 24;
+ st = readl(&msg->body[0]) >> 24;
if (st != 0) {
int err;
"Volume has changed, waiting for acknowledgement"
};
- err = le32_to_cpu(msg->body[0]) & 0xffff;
+ err = readl(&msg->body[0]) & 0xffff;
/*
* Device not ready means two things. One is that the
* Don't stick a supertrak100 into cache aggressive modes
*/
- printk(KERN_ERR "/dev/%s error: %s", dev->gd->disk_name,
- bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
- if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
- printk(KERN_ERR " - DDM attempted %d retries",
- (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
- printk(KERN_ERR ".\n");
+ printk(KERN_ERR "\n/dev/%s error: %s", dev->gd->disk_name,
+ bsa_errors[readl(&msg->body[0]) & 0xffff]);
+ if (readl(&msg->body[0]) & 0x00ff0000)
+ printk(" - DDM attempted %d retries",
+ (readl(&msg->body[0]) >> 16) & 0x00ff);
+ printk(".\n");
req->errors++;
} else
req->errors = 0;
- if (!end_that_request_chunk
- (req, !req->errors, le32_to_cpu(msg->body[1]))) {
+ if (!end_that_request_chunk(req, !req->errors, readl(&msg->body[1]))) {
add_disk_randomness(req->rq_disk);
spin_lock_irqsave(q->queue_lock, flags);
i2o_block_sglist_free(ireq);
i2o_block_request_free(ireq);
} else
- printk(KERN_ERR "i2o_block: still remaining chunks\n");
+ printk(KERN_ERR "still remaining chunks\n");
return 1;
};
printk(KERN_INFO "block-osm: event received\n");
};
+#if 0
+static int i2o_block_event(void *dummy)
+{
+ unsigned int evt;
+ unsigned long flags;
+ struct i2o_block_device *dev;
+ int unit;
+ //The only event that has data is the SCSI_SMART event.
+ struct i2o_reply {
+ u32 header[4];
+ u32 evt_indicator;
+ u8 ASC;
+ u8 ASCQ;
+ u16 pad;
+ u8 data[16];
+ } *evt_local;
+
+ daemonize("i2oblock");
+ allow_signal(SIGKILL);
+
+ evt_running = 1;
+
+ while (1) {
+ if (down_interruptible(&i2ob_evt_sem)) {
+ evt_running = 0;
+ printk("exiting...");
+ break;
+ }
+
+ /*
+ * Keep another CPU/interrupt from overwriting the
+ * message while we're reading it
+ *
+ * We stuffed the unit in the TxContext and grab the event mask
+ * None of the BSA we care about events have EventData
+ */
+ spin_lock_irqsave(&i2ob_evt_lock, flags);
+ evt_local = (struct i2o_reply *)evt_msg;
+ spin_unlock_irqrestore(&i2ob_evt_lock, flags);
+
+ unit = le32_to_cpu(evt_local->header[3]);
+ evt = le32_to_cpu(evt_local->evt_indicator);
+
+ dev = &i2o_blk_dev[unit];
+ switch (evt) {
+ /*
+ * New volume loaded on same TID, so we just re-install.
+ * The TID/controller don't change as it is the same
+ * I2O device. It's just new media that we have to
+ * rescan.
+ */
+ case I2O_EVT_IND_BSA_VOLUME_LOAD:
+ {
+ i2ob_install_device(dev->i2o_device->iop,
+ dev->i2o_device, unit);
+ add_disk(dev->gendisk);
+ break;
+ }
+
+ /*
+ * No media, so set all parameters to 0 and set the media
+ * change flag. The I2O device is still valid, just doesn't
+ * have media, so we don't want to clear the controller or
+ * device pointer.
+ */
+ case I2O_EVT_IND_BSA_VOLUME_UNLOAD:
+ {
+ struct gendisk *p = dev->gendisk;
+ blk_queue_max_sectors(dev->gendisk->queue, 0);
+ del_gendisk(p);
+ put_disk(p);
+ dev->gendisk = NULL;
+ dev->media_change_flag = 1;
+ break;
+ }
+
+ case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ:
+ printk(KERN_WARNING
+ "%s: Attempt to eject locked media\n",
+ dev->i2o_device->dev_name);
+ break;
+
+ /*
+ * The capacity has changed and we are going to be
+ * updating the max_sectors and other information
+ * about this disk. We try a revalidate first. If
+ * the block device is in use, we don't want to
+ * do that as there may be I/Os bound for the disk
+ * at the moment. In that case we read the size
+ * from the device and update the information ourselves
+ * and the user can later force a partition table
+ * update through an ioctl.
+ */
+ case I2O_EVT_IND_BSA_CAPACITY_CHANGE:
+ {
+ u64 size;
+
+ if (i2ob_query_device(dev, 0x0004, 0, &size, 8)
+ != 0)
+ i2ob_query_device(dev, 0x0000, 4, &size,
+ 8);
+
+ spin_lock_irqsave(dev->req_queue->queue_lock,
+ flags);
+ set_capacity(dev->gendisk, size >> 9);
+ spin_unlock_irqrestore(dev->req_queue->
+ queue_lock, flags);
+ break;
+ }
+
+ /*
+ * We got a SCSI SMART event, we just log the relevant
+ * information and let the user decide what they want
+ * to do with the information.
+ */
+ case I2O_EVT_IND_BSA_SCSI_SMART:
+ {
+ char buf[16];
+ printk(KERN_INFO
+ "I2O Block: %s received a SCSI SMART Event\n",
+ dev->i2o_device->dev_name);
+ evt_local->data[16] = '\0';
+ sprintf(buf, "%s", &evt_local->data[0]);
+ printk(KERN_INFO " Disk Serial#:%s\n",
+ buf);
+ printk(KERN_INFO " ASC 0x%02x \n",
+ evt_local->ASC);
+ printk(KERN_INFO " ASCQ 0x%02x \n",
+ evt_local->ASCQ);
+ break;
+ }
+
+ /*
+ * Non event
+ */
+
+ case 0:
+ break;
+
+ /*
+ * An event we didn't ask for. Call the card manufacturer
+ * and tell them to fix their firmware :)
+ */
+
+ case 0x20:
+ /*
+ * If a promise card reports 0x20 event then the brown stuff
+ * hit the fan big time. The card seems to recover but loses
+ * the pending writes. Deeply ungood except for testing fsck
+ */
+ if (dev->i2o_device->iop->promise)
+ panic
+ ("I2O controller firmware failed. Reboot and force a filesystem check.\n");
+ default:
+ printk(KERN_INFO
+ "%s: Received event 0x%X we didn't register for\n"
+ KERN_INFO
+ " Blame the I2O card manufacturer 8)\n",
+ dev->i2o_device->dev_name, evt);
+ break;
+ }
+ };
+
+ complete_and_exit(&i2ob_thread_dead, 0);
+ return 0;
+}
+#endif
+
/*
* SCSI-CAM for ioctl geometry mapping
* Duplicated with SCSI - this should be moved into somewhere common
struct i2o_block_device *dev = req->rq_disk->private_data;
struct i2o_controller *c = dev->i2o_dev->iop;
int tid = dev->i2o_dev->lct_data.tid;
- struct i2o_message __iomem *msg;
- void __iomem *mptr;
+ struct i2o_message *msg;
+ void *mptr;
struct i2o_block_request *ireq = req->special;
struct scatterlist *sg;
int sgnum;