4 * Copyright (C) 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
25 * Multiple device handling error fixes,
26 * Added a queue depth.
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
53 #include <linux/module.h>
54 #include <linux/i2o.h>
56 #include <linux/mempool.h>
58 #include <linux/genhd.h>
59 #include <linux/blkdev.h>
60 #include <linux/hdreg.h>
62 #include "i2o_block.h"
64 static struct i2o_driver i2o_block_driver;
66 /* global Block OSM request mempool */
67 static struct i2o_block_mempool i2o_blk_req_pool;
69 /* Block OSM class handling definition */
70 static struct i2o_class_id i2o_block_class_id[] = {
71 {I2O_CLASS_RANDOM_BLOCK_STORAGE},
76 * i2o_block_device_free - free the memory of the I2O Block device
77 * @dev: I2O Block device, which should be cleaned up
79 * Frees the request queue, gendisk and the i2o_block_device structure.
81 static void i2o_block_device_free(struct i2o_block_device *dev)
83 blk_cleanup_queue(dev->gd->queue);
91 * i2o_block_remove - remove the I2O Block device from the system again
92 * @dev: I2O Block device which should be removed
94 * Remove gendisk from system and free all allocated memory.
98 static int i2o_block_remove(struct device *dev)
100 struct i2o_device *i2o_dev = to_i2o_device(dev);
101 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
103 printk(KERN_INFO "block-osm: Device removed %s\n",
104 i2o_blk_dev->gd->disk_name);
106 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
108 del_gendisk(i2o_blk_dev->gd);
110 dev_set_drvdata(dev, NULL);
112 i2o_device_claim_release(i2o_dev);
114 i2o_block_device_free(i2o_blk_dev);
120 * i2o_block_device flush - Flush all dirty data of I2O device dev
121 * @dev: I2O device which should be flushed
123 * Flushes all dirty data on device dev.
125 * Returns 0 on success or negative error code on failure.
127 static int i2o_block_device_flush(struct i2o_device *dev)
129 struct i2o_message __iomem *msg;
132 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
133 if (m == I2O_QUEUE_EMPTY)
136 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
137 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
139 writel(60 << 16, &msg->body[0]);
140 pr_debug("Flushing...\n");
142 return i2o_msg_post_wait(dev->iop, m, 60);
146 * i2o_block_device_mount - Mount (load) the media of device dev
147 * @dev: I2O device which should receive the mount request
148 * @media_id: Media Identifier
150 * Load a media into drive. Identifier should be set to -1, because the
151 * spec does not support any other value.
153 * Returns 0 on success or negative error code on failure.
155 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
157 struct i2o_message __iomem *msg;
160 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
161 if (m == I2O_QUEUE_EMPTY)
164 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
165 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
167 writel(-1, &msg->body[0]);
168 writel(0, &msg->body[1]);
169 pr_debug("Mounting...\n");
171 return i2o_msg_post_wait(dev->iop, m, 2);
175 * i2o_block_device_lock - Locks the media of device dev
176 * @dev: I2O device which should receive the lock request
177 * @media_id: Media Identifier
179 * Lock media of device dev to prevent removal. The media identifier
180 * should be set to -1, because the spec does not support any other value.
182 * Returns 0 on success or negative error code on failure.
184 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
186 struct i2o_message __iomem *msg;
189 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
190 if (m == I2O_QUEUE_EMPTY)
193 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
194 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
196 writel(-1, &msg->body[0]);
197 pr_debug("Locking...\n");
199 return i2o_msg_post_wait(dev->iop, m, 2);
203 * i2o_block_device_unlock - Unlocks the media of device dev
204 * @dev: I2O device which should receive the unlocked request
205 * @media_id: Media Identifier
207 * Unlocks the media in device dev. The media identifier should be set to
208 * -1, because the spec does not support any other value.
210 * Returns 0 on success or negative error code on failure.
212 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
214 struct i2o_message __iomem *msg;
217 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
218 if (m == I2O_QUEUE_EMPTY)
221 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
222 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
224 writel(media_id, &msg->body[0]);
225 pr_debug("Unlocking...\n");
227 return i2o_msg_post_wait(dev->iop, m, 2);
231 * i2o_block_device_power - Power management for device dev
232 * @dev: I2O device which should receive the power management request
233 * @operation: Operation which should be send
235 * Send a power management request to the device dev.
237 * Returns 0 on success or negative error code on failure.
239 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
241 struct i2o_device *i2o_dev = dev->i2o_dev;
242 struct i2o_controller *c = i2o_dev->iop;
243 struct i2o_message __iomem *msg;
247 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
248 if (m == I2O_QUEUE_EMPTY)
251 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
252 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
253 tid, &msg->u.head[1]);
254 writel(op << 24, &msg->body[0]);
255 pr_debug("Power...\n");
257 rc = i2o_msg_post_wait(c, m, 60);
265 * i2o_block_request_alloc - Allocate an I2O block request struct
267 * Allocates an I2O block request struct and initialize the list.
269 * Returns a i2o_block_request pointer on success or negative error code
272 static inline struct i2o_block_request *i2o_block_request_alloc(void)
274 struct i2o_block_request *ireq;
276 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
278 return ERR_PTR(-ENOMEM);
280 INIT_LIST_HEAD(&ireq->queue);
286 * i2o_block_request_free - Frees a I2O block request
287 * @ireq: I2O block request which should be freed
289 * Fres the allocated memory (give it back to the request mempool).
291 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
293 mempool_free(ireq, i2o_blk_req_pool.pool);
297 * i2o_block_sglist_alloc - Allocate the SG list and map it
298 * @ireq: I2O block request
300 * Builds the SG list and map it into to be accessable by the controller.
302 * Returns the number of elements in the SG list or 0 on failure.
304 static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
306 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
309 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
311 if (rq_data_dir(ireq->req) == READ)
312 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
314 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
316 ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
317 ireq->sg_dma_direction);
319 return ireq->sg_nents;
323 * i2o_block_sglist_free - Frees the SG list
324 * @ireq: I2O block request from which the SG should be freed
326 * Frees the SG list from the I2O block request.
328 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
330 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
332 dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
333 ireq->sg_dma_direction);
337 * i2o_block_prep_req_fn - Allocates I2O block device specific struct
338 * @q: request queue for the request
339 * @req: the request to prepare
341 * Allocate the necessary i2o_block_request struct and connect it to
342 * the request. This is needed that we not loose the SG list later on.
344 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
346 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
348 struct i2o_block_device *i2o_blk_dev = q->queuedata;
349 struct i2o_block_request *ireq;
351 /* request is already processed by us, so return */
352 if (req->flags & REQ_SPECIAL) {
353 pr_debug("REQ_SPECIAL already set!\n");
354 req->flags |= REQ_DONTPREP;
358 /* connect the i2o_block_request to the request */
360 ireq = i2o_block_request_alloc();
361 if (unlikely(IS_ERR(ireq))) {
362 pr_debug("unable to allocate i2o_block_request!\n");
363 return BLKPREP_DEFER;
366 ireq->i2o_blk_dev = i2o_blk_dev;
372 /* do not come back here */
373 req->flags |= REQ_DONTPREP | REQ_SPECIAL;
379 * i2o_block_delayed_request_fn - delayed request queue function
380 * delayed_request: the delayed request with the queue to start
382 * If the request queue is stopped for a disk, and there is no open
383 * request, a new event is created, which calls this function to start
384 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
387 static void i2o_block_delayed_request_fn(void *delayed_request)
389 struct i2o_block_delayed_request *dreq = delayed_request;
390 struct request_queue *q = dreq->queue;
393 spin_lock_irqsave(q->queue_lock, flags);
395 spin_unlock_irqrestore(q->queue_lock, flags);
400 * i2o_block_reply - Block OSM reply handler.
401 * @c: I2O controller from which the message arrives
402 * @m: message id of reply
403 * qmsg: the actuall I2O message reply
405 * This function gets all the message replies.
408 static int i2o_block_reply(struct i2o_controller *c, u32 m,
409 struct i2o_message *msg)
411 struct i2o_block_request *ireq;
413 struct i2o_block_device *dev;
414 struct request_queue *q;
419 if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
420 struct i2o_message *pmsg;
424 * FAILed message from controller
425 * We increment the error count and abort it
427 * In theory this will never happen. The I2O block class
428 * specification states that block devices never return
429 * FAILs but instead use the REQ status field...but
430 * better be on the safe side since no one really follows
431 * the spec to the book :)
433 pm = le32_to_cpu(msg->body[3]);
434 pmsg = i2o_msg_in_to_virt(c, pm);
436 req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
437 if (unlikely(!req)) {
438 printk(KERN_ERR "block-osm: NULL reply received!\n");
443 dev = ireq->i2o_blk_dev;
448 spin_lock_irqsave(q->queue_lock, flags);
450 while (end_that_request_chunk(req, !req->errors,
451 le32_to_cpu(pmsg->body[1]))) ;
452 end_that_request_last(req);
454 dev->open_queue_depth--;
455 list_del(&ireq->queue);
458 spin_unlock_irqrestore(q->queue_lock, flags);
460 /* Now flush the message by making it a NOP */
466 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
467 if (unlikely(!req)) {
468 printk(KERN_ERR "block-osm: NULL reply received!\n");
473 dev = ireq->i2o_blk_dev;
476 if (unlikely(!dev->i2o_dev)) {
478 * This is HACK, but Intel Integrated RAID allows user
479 * to delete a volume that is claimed, locked, and in use
480 * by the OS. We have to check for a reply from a
481 * non-existent device and flag it as an error or the system
486 "I2O Block: Data transfer to deleted device!\n");
487 spin_lock_irqsave(q->queue_lock, flags);
488 while (end_that_request_chunk
489 (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
490 end_that_request_last(req);
492 dev->open_queue_depth--;
493 list_del(&ireq->queue);
496 spin_unlock_irqrestore(q->queue_lock, flags);
501 * Lets see what is cooking. We stuffed the
502 * request in the context.
505 st = le32_to_cpu(msg->body[0]) >> 24;
509 char *bsa_errors[] = {
512 "Failure communicating to device",
514 "Device is not ready",
516 "Media is locked by another user",
518 "Failure communicating to device",
519 "Device bus failure",
520 "Device is locked by another user",
521 "Device is write protected",
523 "Volume has changed, waiting for acknowledgement"
526 err = le32_to_cpu(msg->body[0]) & 0xffff;
529 * Device not ready means two things. One is that the
530 * the thing went offline (but not a removal media)
532 * The second is that you have a SuperTrak 100 and the
533 * firmware got constipated. Unlike standard i2o card
534 * setups the supertrak returns an error rather than
535 * blocking for the timeout in these cases.
537 * Don't stick a supertrak100 into cache aggressive modes
540 printk(KERN_ERR "/dev/%s error: %s", dev->gd->disk_name,
541 bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
542 if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
543 printk(KERN_ERR " - DDM attempted %d retries",
544 (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
545 printk(KERN_ERR ".\n");
550 if (!end_that_request_chunk
551 (req, !req->errors, le32_to_cpu(msg->body[1]))) {
552 add_disk_randomness(req->rq_disk);
553 spin_lock_irqsave(q->queue_lock, flags);
555 end_that_request_last(req);
557 dev->open_queue_depth--;
558 list_del(&ireq->queue);
561 spin_unlock_irqrestore(q->queue_lock, flags);
563 i2o_block_sglist_free(ireq);
564 i2o_block_request_free(ireq);
566 printk(KERN_ERR "i2o_block: still remaining chunks\n");
571 static void i2o_block_event(struct i2o_event *evt)
573 printk(KERN_INFO "block-osm: event received\n");
577 * SCSI-CAM for ioctl geometry mapping
578 * Duplicated with SCSI - this should be moved into somewhere common
581 * LBA -> CHS mapping table taken from:
583 * "Incorporating the I2O Architecture into BIOS for Intel Architecture
586 * This is an I2O document that is only available to I2O members,
589 * From my understanding, this is how all the I2O cards do this
591 * Disk Size | Sectors | Heads | Cylinders
592 * ---------------+---------+-------+-------------------
593 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
594 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
595 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
596 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
599 #define BLOCK_SIZE_528M 1081344
600 #define BLOCK_SIZE_1G 2097152
601 #define BLOCK_SIZE_21G 4403200
602 #define BLOCK_SIZE_42G 8806400
603 #define BLOCK_SIZE_84G 17612800
605 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
606 unsigned char *hds, unsigned char *secs)
608 unsigned long heads, sectors, cylinders;
610 sectors = 63L; /* Maximize sectors per track */
611 if (capacity <= BLOCK_SIZE_528M)
613 else if (capacity <= BLOCK_SIZE_1G)
615 else if (capacity <= BLOCK_SIZE_21G)
617 else if (capacity <= BLOCK_SIZE_42G)
622 cylinders = (unsigned long)capacity / (heads * sectors);
624 *cyls = (unsigned short)cylinders; /* Stuff return values */
625 *secs = (unsigned char)sectors;
626 *hds = (unsigned char)heads;
630 * i2o_block_open - Open the block device
632 * Power up the device, mount and lock the media. This function is called,
633 * if the block device is opened for access.
635 * Returns 0 on success or negative error code on failure.
637 static int i2o_block_open(struct inode *inode, struct file *file)
639 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
644 if (dev->power > 0x1f)
645 i2o_block_device_power(dev, 0x02);
647 i2o_block_device_mount(dev->i2o_dev, -1);
649 i2o_block_device_lock(dev->i2o_dev, -1);
651 pr_debug("Ready.\n");
657 * i2o_block_release - Release the I2O block device
659 * Unlock and unmount the media, and power down the device. Gets called if
660 * the block device is closed.
662 * Returns 0 on success or negative error code on failure.
664 static int i2o_block_release(struct inode *inode, struct file *file)
666 struct gendisk *disk = inode->i_bdev->bd_disk;
667 struct i2o_block_device *dev = disk->private_data;
671 * This is to deail with the case of an application
672 * opening a device and then the device dissapears while
673 * it's in use, and then the application tries to release
674 * it. ex: Unmounting a deleted RAID volume at reboot.
675 * If we send messages, it will just cause FAILs since
676 * the TID no longer exists.
681 i2o_block_device_flush(dev->i2o_dev);
683 i2o_block_device_unlock(dev->i2o_dev, -1);
685 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
690 i2o_block_device_power(dev, operation);
696 * i2o_block_ioctl - Issue device specific ioctl calls.
697 * @cmd: ioctl command
700 * Handles ioctl request for the block device.
702 * Return 0 on success or negative error on failure.
704 static int i2o_block_ioctl(struct inode *inode, struct file *file,
705 unsigned int cmd, unsigned long arg)
707 struct gendisk *disk = inode->i_bdev->bd_disk;
708 struct i2o_block_device *dev = disk->private_data;
709 void __user *argp = (void __user *)arg;
711 /* Anyone capable of this syscall can do *real bad* things */
713 if (!capable(CAP_SYS_ADMIN))
719 struct hd_geometry g;
720 i2o_block_biosparam(get_capacity(disk),
721 &g.cylinders, &g.heads, &g.sectors);
722 g.start = get_start_sect(inode->i_bdev);
723 return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
727 return put_user(dev->rcache, (int __user *)arg);
729 return put_user(dev->wcache, (int __user *)arg);
731 if (arg < 0 || arg > CACHE_SMARTFETCH)
737 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
746 * i2o_block_media_changed - Have we seen a media change?
747 * @disk: gendisk which should be verified
749 * Verifies if the media has changed.
751 * Returns 1 if the media was changed or 0 otherwise.
753 static int i2o_block_media_changed(struct gendisk *disk)
755 struct i2o_block_device *p = disk->private_data;
757 if (p->media_change_flag) {
758 p->media_change_flag = 0;
765 * i2o_block_transfer - Transfer a request to/from the I2O controller
766 * @req: the request which should be transfered
768 * This function converts the request into a I2O message. The necessary
769 * DMA buffers are allocated and after everything is setup post the message
770 * to the I2O controller. No cleanup is done by this function. It is done
771 * on the interrupt side when the reply arrives.
773 * Return 0 on success or negative error code on failure.
775 static int i2o_block_transfer(struct request *req)
777 struct i2o_block_device *dev = req->rq_disk->private_data;
778 struct i2o_controller *c = dev->i2o_dev->iop;
779 int tid = dev->i2o_dev->lct_data.tid;
780 struct i2o_message __iomem *msg;
782 struct i2o_block_request *ireq = req->special;
783 struct scatterlist *sg;
791 m = i2o_msg_get(c, &msg);
792 if (m == I2O_QUEUE_EMPTY) {
797 tcntxt = i2o_cntxt_list_add(c, req);
803 if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
808 /* Build the message based on the request. */
809 writel(i2o_block_driver.context, &msg->u.s.icntxt);
810 writel(tcntxt, &msg->u.s.tcntxt);
811 writel(req->nr_sectors << 9, &msg->body[1]);
813 writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
814 writel(req->sector >> 23, &msg->body[3]);
816 mptr = &msg->body[4];
820 if (rq_data_dir(req) == READ) {
821 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
823 sg_flags = 0x10000000;
824 switch (dev->rcache) {
826 writel(0, &msg->body[0]);
829 writel(0x201F0008, &msg->body[0]);
831 case CACHE_SMARTFETCH:
832 if (req->nr_sectors > 16)
833 writel(0x201F0008, &msg->body[0]);
835 writel(0x001F0000, &msg->body[0]);
839 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
841 sg_flags = 0x14000000;
842 switch (dev->wcache) {
844 writel(0, &msg->body[0]);
846 case CACHE_WRITETHROUGH:
847 writel(0x001F0008, &msg->body[0]);
849 case CACHE_WRITEBACK:
850 writel(0x001F0010, &msg->body[0]);
852 case CACHE_SMARTBACK:
853 if (req->nr_sectors > 16)
854 writel(0x001F0004, &msg->body[0]);
856 writel(0x001F0010, &msg->body[0]);
858 case CACHE_SMARTTHROUGH:
859 if (req->nr_sectors > 16)
860 writel(0x001F0004, &msg->body[0]);
862 writel(0x001F0010, &msg->body[0]);
866 for (i = sgnum; i > 0; i--) {
868 sg_flags |= 0x80000000;
869 writel(sg_flags | sg_dma_len(sg), mptr);
870 writel(sg_dma_address(sg), mptr + 4);
875 writel(I2O_MESSAGE_SIZE
876 (((unsigned long)mptr -
877 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
882 list_add_tail(&ireq->queue, &dev->open_queue);
883 dev->open_queue_depth++;
888 i2o_cntxt_list_remove(c, req);
898 * i2o_block_request_fn - request queue handling function
899 * q: request queue from which the request could be fetched
901 * Takes the next request from the queue, transfers it and if no error
902 * occurs dequeue it from the queue. On arrival of the reply the message
903 * will be processed further. If an error occurs requeue the request.
905 static void i2o_block_request_fn(struct request_queue *q)
909 while (!blk_queue_plugged(q)) {
910 req = elv_next_request(q);
914 if (blk_fs_request(req)) {
915 struct i2o_block_delayed_request *dreq;
916 struct i2o_block_request *ireq = req->special;
917 unsigned int queue_depth;
919 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
921 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
922 if (!i2o_block_transfer(req)) {
923 blkdev_dequeue_request(req);
930 /* stop the queue and retry later */
931 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
936 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
939 printk(KERN_INFO "block-osm: transfer error\n");
940 if (!queue_delayed_work(i2o_block_driver.event_queue,
942 I2O_BLOCK_RETRY_TIME))
953 /* I2O Block device operations definition */
954 static struct block_device_operations i2o_block_fops = {
955 .owner = THIS_MODULE,
956 .open = i2o_block_open,
957 .release = i2o_block_release,
958 .ioctl = i2o_block_ioctl,
959 .media_changed = i2o_block_media_changed
963 * i2o_block_device_alloc - Allocate memory for a I2O Block device
965 * Allocate memory for the i2o_block_device struct, gendisk and request
966 * queue and initialize them as far as no additional information is needed.
968 * Returns a pointer to the allocated I2O Block device on succes or a
969 * negative error code on failure.
971 static struct i2o_block_device *i2o_block_device_alloc(void)
973 struct i2o_block_device *dev;
975 struct request_queue *queue;
978 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
980 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
981 "I2O Block disk.\n");
985 memset(dev, 0, sizeof(*dev));
987 INIT_LIST_HEAD(&dev->open_queue);
988 spin_lock_init(&dev->lock);
989 dev->rcache = CACHE_PREFETCH;
990 dev->wcache = CACHE_WRITEBACK;
992 /* allocate a gendisk with 16 partitions */
995 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1001 /* initialize the request queue */
1002 queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1004 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1005 "request queue.\n");
1010 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1012 gd->major = I2O_MAJOR;
1014 gd->fops = &i2o_block_fops;
1015 gd->private_data = dev;
1032 * i2o_block_probe - verify if dev is a I2O Block device and install it
1033 * @dev: device to verify if it is a I2O Block device
1035 * We only verify if the user_tid of the device is 0xfff and then install
1036 * the device. Otherwise it is used by some other device (e. g. RAID).
1038 * Returns 0 on success or negative error code on failure.
1040 static int i2o_block_probe(struct device *dev)
1042 struct i2o_device *i2o_dev = to_i2o_device(dev);
1043 struct i2o_block_device *i2o_blk_dev;
1044 struct i2o_controller *c = i2o_dev->iop;
1046 struct request_queue *queue;
1047 static int unit = 0;
1055 /* skip devices which are used by IOP */
1056 if (i2o_dev->lct_data.user_tid != 0xfff) {
1057 pr_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1061 printk(KERN_INFO "block-osm: New device detected (TID: %03x)\n",
1062 i2o_dev->lct_data.tid);
1064 if (i2o_device_claim(i2o_dev)) {
1065 printk(KERN_WARNING "block-osm: Unable to claim device. "
1066 "Installation aborted\n");
1071 i2o_blk_dev = i2o_block_device_alloc();
1072 if (IS_ERR(i2o_blk_dev)) {
1073 printk(KERN_ERR "block-osm: could not alloc a new I2O block"
1075 rc = PTR_ERR(i2o_blk_dev);
1079 i2o_blk_dev->i2o_dev = i2o_dev;
1080 dev_set_drvdata(dev, i2o_blk_dev);
1083 gd = i2o_blk_dev->gd;
1084 gd->first_minor = unit << 4;
1085 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1086 sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1087 gd->driverfs_dev = &i2o_dev->device;
1089 /* setup request queue */
1091 queue->queuedata = i2o_blk_dev;
1093 blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1094 blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1099 i2o_status_block *sb;
1101 sb = c->status_block.virt;
1103 segments = (sb->inbound_frame_size -
1104 sizeof(struct i2o_message) / 4 - 4) / 2;
1107 blk_queue_max_hw_segments(queue, segments);
1109 pr_debug("max sectors: %d\n", I2O_MAX_SECTORS);
1110 pr_debug("phys segments: %d\n", I2O_MAX_SEGMENTS);
1111 pr_debug("hw segments: %d\n", segments);
1114 * Ask for the current media data. If that isn't supported
1115 * then we ask for the device capacity data
1117 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1118 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1119 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1120 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1122 pr_debug("blocksize: %d\n", blocksize);
1124 if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1126 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1127 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1129 set_capacity(gd, size >> 9);
1131 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1140 i2o_device_claim_release(i2o_dev);
1146 /* Block OSM driver struct */
1147 static struct i2o_driver i2o_block_driver = {
1148 .name = "block-osm",
1149 .event = i2o_block_event,
1150 .reply = i2o_block_reply,
1151 .classes = i2o_block_class_id,
1153 .probe = i2o_block_probe,
1154 .remove = i2o_block_remove,
1159 * i2o_block_init - Block OSM initialization function
1161 * Allocate the slab and mempool for request structs, registers i2o_block
1162 * block device and finally register the Block OSM in the I2O core.
1164 * Returns 0 on success or negative error code on failure.
1166 static int __init i2o_block_init(void)
1171 printk(KERN_INFO "I2O Block Storage OSM v0.9\n");
1172 printk(KERN_INFO " (c) Copyright 1999-2001 Red Hat Software.\n");
1174 /* Allocate request mempool and slab */
1175 size = sizeof(struct i2o_block_request);
1176 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1177 SLAB_HWCACHE_ALIGN, NULL,
1179 if (!i2o_blk_req_pool.slab) {
1180 printk(KERN_ERR "block-osm: can't init request slab\n");
1185 i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1188 i2o_blk_req_pool.slab);
1189 if (!i2o_blk_req_pool.pool) {
1190 printk(KERN_ERR "block-osm: can't init request mempool\n");
1195 /* Register the block device interfaces */
1196 rc = register_blkdev(I2O_MAJOR, "i2o_block");
1198 printk(KERN_ERR "block-osm: unable to register block device\n");
1202 printk(KERN_INFO "block-osm: registered device at major %d\n",
1206 /* Register Block OSM into I2O core */
1207 rc = i2o_driver_register(&i2o_block_driver);
1209 printk(KERN_ERR "block-osm: Could not register Block driver\n");
1210 goto unregister_blkdev;
1216 unregister_blkdev(I2O_MAJOR, "i2o_block");
1219 mempool_destroy(i2o_blk_req_pool.pool);
1222 kmem_cache_destroy(i2o_blk_req_pool.slab);
1229 * i2o_block_exit - Block OSM exit function
1231 * Unregisters Block OSM from I2O core, unregisters i2o_block block device
1232 * and frees the mempool and slab.
1234 static void __exit i2o_block_exit(void)
1236 /* Unregister I2O Block OSM from I2O core */
1237 i2o_driver_unregister(&i2o_block_driver);
1239 /* Unregister block device */
1240 unregister_blkdev(I2O_MAJOR, "i2o_block");
1242 /* Free request mempool and slab */
1243 mempool_destroy(i2o_blk_req_pool.pool);
1244 kmem_cache_destroy(i2o_blk_req_pool.slab);
1247 MODULE_AUTHOR("Red Hat");
1248 MODULE_DESCRIPTION("I2O Block Device OSM");
1249 MODULE_LICENSE("GPL");
1251 module_init(i2o_block_init);
1252 module_exit(i2o_block_exit);