4 * Copyright (C) 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
25 * Multiple device handling error fixes,
26 * Added a queue depth.
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
53 #include <linux/module.h>
54 #include <linux/i2o.h>
56 #include <linux/mempool.h>
58 #include <linux/genhd.h>
59 #include <linux/blkdev.h>
60 #include <linux/hdreg.h>
62 #include "i2o_block.h"
64 static struct i2o_driver i2o_block_driver;
66 /* global Block OSM request mempool */
67 static struct i2o_block_mempool i2o_blk_req_pool;
69 /* Block OSM class handling definition */
70 static struct i2o_class_id i2o_block_class_id[] = {
71 {I2O_CLASS_RANDOM_BLOCK_STORAGE},
76 * i2o_block_device_free - free the memory of the I2O Block device
77 * @dev: I2O Block device, which should be cleaned up
79 * Frees the request queue, gendisk and the i2o_block_device structure.
81 static void i2o_block_device_free(struct i2o_block_device *dev)
83 blk_cleanup_queue(dev->gd->queue);
91 * i2o_block_remove - remove the I2O Block device from the system again
92 * @dev: I2O Block device which should be removed
94 * Remove gendisk from system and free all allocated memory.
98 static int i2o_block_remove(struct device *dev)
100 struct i2o_device *i2o_dev = to_i2o_device(dev);
101 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
103 printk(KERN_INFO "block-osm: Device removed %s\n",
104 i2o_blk_dev->gd->disk_name);
106 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
108 del_gendisk(i2o_blk_dev->gd);
110 dev_set_drvdata(dev, NULL);
112 i2o_device_claim_release(i2o_dev);
114 i2o_block_device_free(i2o_blk_dev);
120 * i2o_block_device flush - Flush all dirty data of I2O device dev
121 * @dev: I2O device which should be flushed
123 * Flushes all dirty data on device dev.
125 * Returns 0 on success or negative error code on failure.
127 static int i2o_block_device_flush(struct i2o_device *dev)
129 struct i2o_message *msg;
132 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
133 if (m == I2O_QUEUE_EMPTY)
136 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
137 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
139 writel(60 << 16, &msg->body[0]);
140 pr_debug("Flushing...\n");
142 return i2o_msg_post_wait(dev->iop, m, 60);
146 * i2o_block_device_mount - Mount (load) the media of device dev
147 * @dev: I2O device which should receive the mount request
148 * @media_id: Media Identifier
150 * Load a media into drive. Identifier should be set to -1, because the
151 * spec does not support any other value.
153 * Returns 0 on success or negative error code on failure.
155 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
157 struct i2o_message *msg;
160 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
161 if (m == I2O_QUEUE_EMPTY)
164 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
165 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
167 writel(-1, &msg->body[0]);
168 writel(0, &msg->body[1]);
169 pr_debug("Mounting...\n");
171 return i2o_msg_post_wait(dev->iop, m, 2);
175 * i2o_block_device_lock - Locks the media of device dev
176 * @dev: I2O device which should receive the lock request
177 * @media_id: Media Identifier
179 * Lock media of device dev to prevent removal. The media identifier
180 * should be set to -1, because the spec does not support any other value.
182 * Returns 0 on success or negative error code on failure.
184 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
186 struct i2o_message *msg;
189 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
190 if (m == I2O_QUEUE_EMPTY)
193 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
194 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
196 writel(-1, &msg->body[0]);
197 pr_debug("Locking...\n");
199 return i2o_msg_post_wait(dev->iop, m, 2);
203 * i2o_block_device_unlock - Unlocks the media of device dev
204 * @dev: I2O device which should receive the unlocked request
205 * @media_id: Media Identifier
207 * Unlocks the media in device dev. The media identifier should be set to
208 * -1, because the spec does not support any other value.
210 * Returns 0 on success or negative error code on failure.
212 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
214 struct i2o_message *msg;
217 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
218 if (m == I2O_QUEUE_EMPTY)
221 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
222 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
224 writel(media_id, &msg->body[0]);
225 pr_debug("Unlocking...\n");
227 return i2o_msg_post_wait(dev->iop, m, 2);
231 * i2o_block_device_power - Power management for device dev
232 * @dev: I2O device which should receive the power management request
233 * @operation: Operation which should be send
235 * Send a power management request to the device dev.
237 * Returns 0 on success or negative error code on failure.
239 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
241 struct i2o_device *i2o_dev = dev->i2o_dev;
242 struct i2o_controller *c = i2o_dev->iop;
243 struct i2o_message *msg;
247 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
248 if (m == I2O_QUEUE_EMPTY)
251 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
252 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
253 tid, &msg->u.head[1]);
254 writel(op << 24, &msg->body[0]);
255 pr_debug("Power...\n");
257 rc = i2o_msg_post_wait(c, m, 60);
265 * i2o_block_request_alloc - Allocate an I2O block request struct
267 * Allocates an I2O block request struct and initialize the list.
269 * Returns a i2o_block_request pointer on success or negative error code
272 static inline struct i2o_block_request *i2o_block_request_alloc(void)
274 struct i2o_block_request *ireq;
276 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
278 return ERR_PTR(-ENOMEM);
280 INIT_LIST_HEAD(&ireq->queue);
286 * i2o_block_request_free - Frees a I2O block request
287 * @ireq: I2O block request which should be freed
289 * Fres the allocated memory (give it back to the request mempool).
291 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
293 mempool_free(ireq, i2o_blk_req_pool.pool);
297 * i2o_block_sglist_alloc - Allocate the SG list and map it
298 * @ireq: I2O block request
300 * Builds the SG list and map it into to be accessable by the controller.
302 * Returns the number of elements in the SG list or 0 on failure.
304 static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
306 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
309 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
311 if (rq_data_dir(ireq->req) == READ)
312 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
314 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
316 ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
317 ireq->sg_dma_direction);
319 return ireq->sg_nents;
323 * i2o_block_sglist_free - Frees the SG list
324 * @ireq: I2O block request from which the SG should be freed
326 * Frees the SG list from the I2O block request.
328 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
330 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
332 dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
333 ireq->sg_dma_direction);
337 * i2o_block_prep_req_fn - Allocates I2O block device specific struct
338 * @q: request queue for the request
339 * @req: the request to prepare
341 * Allocate the necessary i2o_block_request struct and connect it to
342 * the request. This is needed that we not loose the SG list later on.
344 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
346 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
348 struct i2o_block_device *i2o_blk_dev = q->queuedata;
349 struct i2o_block_request *ireq;
351 /* request is already processed by us, so return */
352 if (req->flags & REQ_SPECIAL) {
353 pr_debug("REQ_SPECIAL already set!\n");
354 req->flags |= REQ_DONTPREP;
358 /* connect the i2o_block_request to the request */
360 ireq = i2o_block_request_alloc();
361 if (unlikely(IS_ERR(ireq))) {
362 pr_debug("unable to allocate i2o_block_request!\n");
363 return BLKPREP_DEFER;
366 ireq->i2o_blk_dev = i2o_blk_dev;
372 /* do not come back here */
373 req->flags |= REQ_DONTPREP | REQ_SPECIAL;
379 * i2o_block_delayed_request_fn - delayed request queue function
380 * delayed_request: the delayed request with the queue to start
382 * If the request queue is stopped for a disk, and there is no open
383 * request, a new event is created, which calls this function to start
384 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
387 static void i2o_block_delayed_request_fn(void *delayed_request)
389 struct i2o_block_delayed_request *dreq = delayed_request;
390 struct request_queue *q = dreq->queue;
393 spin_lock_irqsave(q->queue_lock, flags);
395 spin_unlock_irqrestore(q->queue_lock, flags);
400 * i2o_block_reply - Block OSM reply handler.
401 * @c: I2O controller from which the message arrives
402 * @m: message id of reply
403 * qmsg: the actuall I2O message reply
405 * This function gets all the message replies.
408 static int i2o_block_reply(struct i2o_controller *c, u32 m,
409 struct i2o_message *msg)
411 struct i2o_block_request *ireq;
413 struct i2o_block_device *dev;
414 struct request_queue *q;
419 if (unlikely(readl(&msg->u.head[0]) & (1 << 13))) {
420 struct i2o_message *pmsg;
423 printk(KERN_WARNING "FAIL");
425 * FAILed message from controller
426 * We increment the error count and abort it
428 * In theory this will never happen. The I2O block class
429 * specification states that block devices never return
430 * FAILs but instead use the REQ status field...but
431 * better be on the safe side since no one really follows
432 * the spec to the book :)
434 pm = readl(&msg->body[3]);
435 pmsg = c->in_queue.virt + pm;
437 req = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
438 if (unlikely(!req)) {
439 printk(KERN_ERR "block-osm: NULL reply received!\n");
444 dev = ireq->i2o_blk_dev;
449 spin_lock_irqsave(q->queue_lock, flags);
451 while (end_that_request_chunk(req, !req->errors,
452 readl(&pmsg->body[1]))) ;
453 end_that_request_last(req);
455 dev->open_queue_depth--;
456 list_del(&ireq->queue);
459 spin_unlock_irqrestore(q->queue_lock, flags);
461 /* Now flush the message by making it a NOP */
467 req = i2o_cntxt_list_get(c, readl(&msg->u.s.tcntxt));
468 if (unlikely(!req)) {
469 printk(KERN_ERR "block-osm: NULL reply received!\n");
474 dev = ireq->i2o_blk_dev;
477 if (unlikely(!dev->i2o_dev)) {
479 * This is HACK, but Intel Integrated RAID allows user
480 * to delete a volume that is claimed, locked, and in use
481 * by the OS. We have to check for a reply from a
482 * non-existent device and flag it as an error or the system
487 "I2O Block: Data transfer to deleted device!\n");
488 spin_lock_irqsave(q->queue_lock, flags);
489 while (end_that_request_chunk
490 (req, !req->errors, readl(&msg->body[1]))) ;
491 end_that_request_last(req);
493 dev->open_queue_depth--;
494 list_del(&ireq->queue);
497 spin_unlock_irqrestore(q->queue_lock, flags);
502 * Lets see what is cooking. We stuffed the
503 * request in the context.
506 st = readl(&msg->body[0]) >> 24;
510 char *bsa_errors[] = {
513 "Failure communicating to device",
515 "Device is not ready",
517 "Media is locked by another user",
519 "Failure communicating to device",
520 "Device bus failure",
521 "Device is locked by another user",
522 "Device is write protected",
524 "Volume has changed, waiting for acknowledgement"
527 err = readl(&msg->body[0]) & 0xffff;
530 * Device not ready means two things. One is that the
531 * the thing went offline (but not a removal media)
533 * The second is that you have a SuperTrak 100 and the
534 * firmware got constipated. Unlike standard i2o card
535 * setups the supertrak returns an error rather than
536 * blocking for the timeout in these cases.
538 * Don't stick a supertrak100 into cache aggressive modes
541 printk(KERN_ERR "\n/dev/%s error: %s", dev->gd->disk_name,
542 bsa_errors[readl(&msg->body[0]) & 0xffff]);
543 if (readl(&msg->body[0]) & 0x00ff0000)
544 printk(" - DDM attempted %d retries",
545 (readl(&msg->body[0]) >> 16) & 0x00ff);
551 if (!end_that_request_chunk(req, !req->errors, readl(&msg->body[1]))) {
552 add_disk_randomness(req->rq_disk);
553 spin_lock_irqsave(q->queue_lock, flags);
555 end_that_request_last(req);
557 dev->open_queue_depth--;
558 list_del(&ireq->queue);
561 spin_unlock_irqrestore(q->queue_lock, flags);
563 i2o_block_sglist_free(ireq);
564 i2o_block_request_free(ireq);
566 printk(KERN_ERR "still remaining chunks\n");
571 static void i2o_block_event(struct i2o_event *evt)
573 printk(KERN_INFO "block-osm: event received\n");
577 static int i2o_block_event(void *dummy)
581 struct i2o_block_device *dev;
583 //The only event that has data is the SCSI_SMART event.
593 daemonize("i2oblock");
594 allow_signal(SIGKILL);
599 if (down_interruptible(&i2ob_evt_sem)) {
601 printk("exiting...");
606 * Keep another CPU/interrupt from overwriting the
607 * message while we're reading it
609 * We stuffed the unit in the TxContext and grab the event mask
610 * None of the BSA we care about events have EventData
612 spin_lock_irqsave(&i2ob_evt_lock, flags);
613 evt_local = (struct i2o_reply *)evt_msg;
614 spin_unlock_irqrestore(&i2ob_evt_lock, flags);
616 unit = le32_to_cpu(evt_local->header[3]);
617 evt = le32_to_cpu(evt_local->evt_indicator);
619 dev = &i2o_blk_dev[unit];
622 * New volume loaded on same TID, so we just re-install.
623 * The TID/controller don't change as it is the same
624 * I2O device. It's just new media that we have to
627 case I2O_EVT_IND_BSA_VOLUME_LOAD:
629 i2ob_install_device(dev->i2o_device->iop,
630 dev->i2o_device, unit);
631 add_disk(dev->gendisk);
636 * No media, so set all parameters to 0 and set the media
637 * change flag. The I2O device is still valid, just doesn't
638 * have media, so we don't want to clear the controller or
641 case I2O_EVT_IND_BSA_VOLUME_UNLOAD:
643 struct gendisk *p = dev->gendisk;
644 blk_queue_max_sectors(dev->gendisk->queue, 0);
648 dev->media_change_flag = 1;
652 case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ:
654 "%s: Attempt to eject locked media\n",
655 dev->i2o_device->dev_name);
659 * The capacity has changed and we are going to be
660 * updating the max_sectors and other information
661 * about this disk. We try a revalidate first. If
662 * the block device is in use, we don't want to
663 * do that as there may be I/Os bound for the disk
664 * at the moment. In that case we read the size
665 * from the device and update the information ourselves
666 * and the user can later force a partition table
667 * update through an ioctl.
669 case I2O_EVT_IND_BSA_CAPACITY_CHANGE:
673 if (i2ob_query_device(dev, 0x0004, 0, &size, 8)
675 i2ob_query_device(dev, 0x0000, 4, &size,
678 spin_lock_irqsave(dev->req_queue->queue_lock,
680 set_capacity(dev->gendisk, size >> 9);
681 spin_unlock_irqrestore(dev->req_queue->
687 * We got a SCSI SMART event, we just log the relevant
688 * information and let the user decide what they want
689 * to do with the information.
691 case I2O_EVT_IND_BSA_SCSI_SMART:
695 "I2O Block: %s received a SCSI SMART Event\n",
696 dev->i2o_device->dev_name);
697 evt_local->data[16] = '\0';
698 sprintf(buf, "%s", &evt_local->data[0]);
699 printk(KERN_INFO " Disk Serial#:%s\n",
701 printk(KERN_INFO " ASC 0x%02x \n",
703 printk(KERN_INFO " ASCQ 0x%02x \n",
716 * An event we didn't ask for. Call the card manufacturer
717 * and tell them to fix their firmware :)
722 * If a promise card reports 0x20 event then the brown stuff
723 * hit the fan big time. The card seems to recover but loses
724 * the pending writes. Deeply ungood except for testing fsck
726 if (dev->i2o_device->iop->promise)
728 ("I2O controller firmware failed. Reboot and force a filesystem check.\n");
731 "%s: Received event 0x%X we didn't register for\n"
733 " Blame the I2O card manufacturer 8)\n",
734 dev->i2o_device->dev_name, evt);
739 complete_and_exit(&i2ob_thread_dead, 0);
745 * SCSI-CAM for ioctl geometry mapping
746 * Duplicated with SCSI - this should be moved into somewhere common
749 * LBA -> CHS mapping table taken from:
751 * "Incorporating the I2O Architecture into BIOS for Intel Architecture
754 * This is an I2O document that is only available to I2O members,
757 * From my understanding, this is how all the I2O cards do this
759 * Disk Size | Sectors | Heads | Cylinders
760 * ---------------+---------+-------+-------------------
761 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
762 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
763 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
764 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
767 #define BLOCK_SIZE_528M 1081344
768 #define BLOCK_SIZE_1G 2097152
769 #define BLOCK_SIZE_21G 4403200
770 #define BLOCK_SIZE_42G 8806400
771 #define BLOCK_SIZE_84G 17612800
773 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
774 unsigned char *hds, unsigned char *secs)
776 unsigned long heads, sectors, cylinders;
778 sectors = 63L; /* Maximize sectors per track */
779 if (capacity <= BLOCK_SIZE_528M)
781 else if (capacity <= BLOCK_SIZE_1G)
783 else if (capacity <= BLOCK_SIZE_21G)
785 else if (capacity <= BLOCK_SIZE_42G)
790 cylinders = (unsigned long)capacity / (heads * sectors);
792 *cyls = (unsigned short)cylinders; /* Stuff return values */
793 *secs = (unsigned char)sectors;
794 *hds = (unsigned char)heads;
798 * i2o_block_open - Open the block device
800 * Power up the device, mount and lock the media. This function is called,
801 * if the block device is opened for access.
803 * Returns 0 on success or negative error code on failure.
805 static int i2o_block_open(struct inode *inode, struct file *file)
807 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
812 if (dev->power > 0x1f)
813 i2o_block_device_power(dev, 0x02);
815 i2o_block_device_mount(dev->i2o_dev, -1);
817 i2o_block_device_lock(dev->i2o_dev, -1);
819 pr_debug("Ready.\n");
825 * i2o_block_release - Release the I2O block device
827 * Unlock and unmount the media, and power down the device. Gets called if
828 * the block device is closed.
830 * Returns 0 on success or negative error code on failure.
832 static int i2o_block_release(struct inode *inode, struct file *file)
834 struct gendisk *disk = inode->i_bdev->bd_disk;
835 struct i2o_block_device *dev = disk->private_data;
839 * This is to deail with the case of an application
840 * opening a device and then the device dissapears while
841 * it's in use, and then the application tries to release
842 * it. ex: Unmounting a deleted RAID volume at reboot.
843 * If we send messages, it will just cause FAILs since
844 * the TID no longer exists.
849 i2o_block_device_flush(dev->i2o_dev);
851 i2o_block_device_unlock(dev->i2o_dev, -1);
853 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
858 i2o_block_device_power(dev, operation);
864 * i2o_block_ioctl - Issue device specific ioctl calls.
865 * @cmd: ioctl command
868 * Handles ioctl request for the block device.
870 * Return 0 on success or negative error on failure.
872 static int i2o_block_ioctl(struct inode *inode, struct file *file,
873 unsigned int cmd, unsigned long arg)
875 struct gendisk *disk = inode->i_bdev->bd_disk;
876 struct i2o_block_device *dev = disk->private_data;
877 void __user *argp = (void __user *)arg;
879 /* Anyone capable of this syscall can do *real bad* things */
881 if (!capable(CAP_SYS_ADMIN))
887 struct hd_geometry g;
888 i2o_block_biosparam(get_capacity(disk),
889 &g.cylinders, &g.heads, &g.sectors);
890 g.start = get_start_sect(inode->i_bdev);
891 return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
895 return put_user(dev->rcache, (int __user *)arg);
897 return put_user(dev->wcache, (int __user *)arg);
899 if (arg < 0 || arg > CACHE_SMARTFETCH)
905 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
914 * i2o_block_media_changed - Have we seen a media change?
915 * @disk: gendisk which should be verified
917 * Verifies if the media has changed.
919 * Returns 1 if the media was changed or 0 otherwise.
921 static int i2o_block_media_changed(struct gendisk *disk)
923 struct i2o_block_device *p = disk->private_data;
925 if (p->media_change_flag) {
926 p->media_change_flag = 0;
933 * i2o_block_transfer - Transfer a request to/from the I2O controller
934 * @req: the request which should be transfered
936 * This function converts the request into a I2O message. The necessary
937 * DMA buffers are allocated and after everything is setup post the message
938 * to the I2O controller. No cleanup is done by this function. It is done
939 * on the interrupt side when the reply arrives.
941 * Return 0 on success or negative error code on failure.
943 static int i2o_block_transfer(struct request *req)
945 struct i2o_block_device *dev = req->rq_disk->private_data;
946 struct i2o_controller *c = dev->i2o_dev->iop;
947 int tid = dev->i2o_dev->lct_data.tid;
948 struct i2o_message *msg;
950 struct i2o_block_request *ireq = req->special;
951 struct scatterlist *sg;
959 m = i2o_msg_get(c, &msg);
960 if (m == I2O_QUEUE_EMPTY) {
965 tcntxt = i2o_cntxt_list_add(c, req);
971 if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
976 /* Build the message based on the request. */
977 writel(i2o_block_driver.context, &msg->u.s.icntxt);
978 writel(tcntxt, &msg->u.s.tcntxt);
979 writel(req->nr_sectors << 9, &msg->body[1]);
981 writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
982 writel(req->sector >> 23, &msg->body[3]);
984 mptr = &msg->body[4];
988 if (rq_data_dir(req) == READ) {
989 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
991 sg_flags = 0x10000000;
992 switch (dev->rcache) {
994 writel(0, &msg->body[0]);
997 writel(0x201F0008, &msg->body[0]);
999 case CACHE_SMARTFETCH:
1000 if (req->nr_sectors > 16)
1001 writel(0x201F0008, &msg->body[0]);
1003 writel(0x001F0000, &msg->body[0]);
1007 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
1009 sg_flags = 0x14000000;
1010 switch (dev->wcache) {
1012 writel(0, &msg->body[0]);
1014 case CACHE_WRITETHROUGH:
1015 writel(0x001F0008, &msg->body[0]);
1017 case CACHE_WRITEBACK:
1018 writel(0x001F0010, &msg->body[0]);
1020 case CACHE_SMARTBACK:
1021 if (req->nr_sectors > 16)
1022 writel(0x001F0004, &msg->body[0]);
1024 writel(0x001F0010, &msg->body[0]);
1026 case CACHE_SMARTTHROUGH:
1027 if (req->nr_sectors > 16)
1028 writel(0x001F0004, &msg->body[0]);
1030 writel(0x001F0010, &msg->body[0]);
1034 for (i = sgnum; i > 0; i--) {
1036 sg_flags |= 0x80000000;
1037 writel(sg_flags | sg_dma_len(sg), mptr);
1038 writel(sg_dma_address(sg), mptr + 4);
1043 writel(I2O_MESSAGE_SIZE
1044 (((unsigned long)mptr -
1045 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
1050 list_add_tail(&ireq->queue, &dev->open_queue);
1051 dev->open_queue_depth++;
1056 i2o_cntxt_list_remove(c, req);
1066 * i2o_block_request_fn - request queue handling function
1067 * q: request queue from which the request could be fetched
1069 * Takes the next request from the queue, transfers it and if no error
1070 * occurs dequeue it from the queue. On arrival of the reply the message
1071 * will be processed further. If an error occurs requeue the request.
1073 static void i2o_block_request_fn(struct request_queue *q)
1075 struct request *req;
1077 while (!blk_queue_plugged(q)) {
1078 req = elv_next_request(q);
1082 if (blk_fs_request(req)) {
1083 struct i2o_block_delayed_request *dreq;
1084 struct i2o_block_request *ireq = req->special;
1085 unsigned int queue_depth;
1087 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
1089 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
1090 if (!i2o_block_transfer(req)) {
1091 blkdev_dequeue_request(req);
1098 /* stop the queue and retry later */
1099 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
1104 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
1107 printk(KERN_INFO "block-osm: transfer error\n");
1108 if (!queue_delayed_work(i2o_block_driver.event_queue,
1110 I2O_BLOCK_RETRY_TIME))
1117 end_request(req, 0);
1121 /* I2O Block device operations definition */
1122 static struct block_device_operations i2o_block_fops = {
1123 .owner = THIS_MODULE,
1124 .open = i2o_block_open,
1125 .release = i2o_block_release,
1126 .ioctl = i2o_block_ioctl,
1127 .media_changed = i2o_block_media_changed
1131 * i2o_block_device_alloc - Allocate memory for a I2O Block device
1133 * Allocate memory for the i2o_block_device struct, gendisk and request
1134 * queue and initialize them as far as no additional information is needed.
1136 * Returns a pointer to the allocated I2O Block device on succes or a
1137 * negative error code on failure.
1139 static struct i2o_block_device *i2o_block_device_alloc(void)
1141 struct i2o_block_device *dev;
1143 struct request_queue *queue;
1146 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1148 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1149 "I2O Block disk.\n");
1153 memset(dev, 0, sizeof(*dev));
1155 INIT_LIST_HEAD(&dev->open_queue);
1156 spin_lock_init(&dev->lock);
1157 dev->rcache = CACHE_PREFETCH;
1158 dev->wcache = CACHE_WRITEBACK;
1160 /* allocate a gendisk with 16 partitions */
1161 gd = alloc_disk(16);
1163 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1169 /* initialize the request queue */
1170 queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1172 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1173 "request queue.\n");
1178 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1180 gd->major = I2O_MAJOR;
1182 gd->fops = &i2o_block_fops;
1183 gd->private_data = dev;
1200 * i2o_block_probe - verify if dev is a I2O Block device and install it
1201 * @dev: device to verify if it is a I2O Block device
1203 * We only verify if the user_tid of the device is 0xfff and then install
1204 * the device. Otherwise it is used by some other device (e. g. RAID).
1206 * Returns 0 on success or negative error code on failure.
1208 static int i2o_block_probe(struct device *dev)
1210 struct i2o_device *i2o_dev = to_i2o_device(dev);
1211 struct i2o_block_device *i2o_blk_dev;
1212 struct i2o_controller *c = i2o_dev->iop;
1214 struct request_queue *queue;
1215 static int unit = 0;
1223 /* skip devices which are used by IOP */
1224 if (i2o_dev->lct_data.user_tid != 0xfff) {
1225 pr_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1229 printk(KERN_INFO "block-osm: New device detected (TID: %03x)\n",
1230 i2o_dev->lct_data.tid);
1232 if (i2o_device_claim(i2o_dev)) {
1233 printk(KERN_WARNING "block-osm: Unable to claim device. "
1234 "Installation aborted\n");
1239 i2o_blk_dev = i2o_block_device_alloc();
1240 if (IS_ERR(i2o_blk_dev)) {
1241 printk(KERN_ERR "block-osm: could not alloc a new I2O block"
1243 rc = PTR_ERR(i2o_blk_dev);
1247 i2o_blk_dev->i2o_dev = i2o_dev;
1248 dev_set_drvdata(dev, i2o_blk_dev);
1251 gd = i2o_blk_dev->gd;
1252 gd->first_minor = unit << 4;
1253 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1254 sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1255 gd->driverfs_dev = &i2o_dev->device;
1257 /* setup request queue */
1259 queue->queuedata = i2o_blk_dev;
1261 blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1262 blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1267 i2o_status_block *sb;
1269 sb = c->status_block.virt;
1271 segments = (sb->inbound_frame_size -
1272 sizeof(struct i2o_message) / 4 - 4) / 2;
1275 blk_queue_max_hw_segments(queue, segments);
1277 pr_debug("max sectors: %d\n", I2O_MAX_SECTORS);
1278 pr_debug("phys segments: %d\n", I2O_MAX_SEGMENTS);
1279 pr_debug("hw segments: %d\n", segments);
1282 * Ask for the current media data. If that isn't supported
1283 * then we ask for the device capacity data
1285 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1286 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1287 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1288 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1290 pr_debug("blocksize: %d\n", blocksize);
1292 if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1294 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1295 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1297 set_capacity(gd, size >> 9);
1299 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1308 i2o_device_claim_release(i2o_dev);
1314 /* Block OSM driver struct */
1315 static struct i2o_driver i2o_block_driver = {
1316 .name = "block-osm",
1317 .event = i2o_block_event,
1318 .reply = i2o_block_reply,
1319 .classes = i2o_block_class_id,
1321 .probe = i2o_block_probe,
1322 .remove = i2o_block_remove,
1327 * i2o_block_init - Block OSM initialization function
1329 * Allocate the slab and mempool for request structs, registers i2o_block
1330 * block device and finally register the Block OSM in the I2O core.
1332 * Returns 0 on success or negative error code on failure.
1334 static int __init i2o_block_init(void)
1339 printk(KERN_INFO "I2O Block Storage OSM v0.9\n");
1340 printk(KERN_INFO " (c) Copyright 1999-2001 Red Hat Software.\n");
1342 /* Allocate request mempool and slab */
1343 size = sizeof(struct i2o_block_request);
1344 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1345 SLAB_HWCACHE_ALIGN, NULL,
1347 if (!i2o_blk_req_pool.slab) {
1348 printk(KERN_ERR "block-osm: can't init request slab\n");
1353 i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1356 i2o_blk_req_pool.slab);
1357 if (!i2o_blk_req_pool.pool) {
1358 printk(KERN_ERR "block-osm: can't init request mempool\n");
1363 /* Register the block device interfaces */
1364 rc = register_blkdev(I2O_MAJOR, "i2o_block");
1366 printk(KERN_ERR "block-osm: unable to register block device\n");
1370 printk(KERN_INFO "block-osm: registered device at major %d\n",
1374 /* Register Block OSM into I2O core */
1375 rc = i2o_driver_register(&i2o_block_driver);
1377 printk(KERN_ERR "block-osm: Could not register Block driver\n");
1378 goto unregister_blkdev;
1384 unregister_blkdev(I2O_MAJOR, "i2o_block");
1387 mempool_destroy(i2o_blk_req_pool.pool);
1390 kmem_cache_destroy(i2o_blk_req_pool.slab);
1397 * i2o_block_exit - Block OSM exit function
1399 * Unregisters Block OSM from I2O core, unregisters i2o_block block device
1400 * and frees the mempool and slab.
1402 static void __exit i2o_block_exit(void)
1404 /* Unregister I2O Block OSM from I2O core */
1405 i2o_driver_unregister(&i2o_block_driver);
1407 /* Unregister block device */
1408 unregister_blkdev(I2O_MAJOR, "i2o_block");
1410 /* Free request mempool and slab */
1411 mempool_destroy(i2o_blk_req_pool.pool);
1412 kmem_cache_destroy(i2o_blk_req_pool.slab);
1415 MODULE_AUTHOR("Red Hat");
1416 MODULE_DESCRIPTION("I2O Block Device OSM");
1417 MODULE_LICENSE("GPL");
1419 module_init(i2o_block_init);
1420 module_exit(i2o_block_exit);