vserver 1.9.3
[linux-2.6.git] / drivers / message / i2o / i2o_block.c
1 /*
2  *      Block OSM
3  *
4  *      Copyright (C) 1999-2002 Red Hat Software
5  *
6  *      Written by Alan Cox, Building Number Three Ltd
7  *
8  *      This program is free software; you can redistribute it and/or modify it
9  *      under the terms of the GNU General Public License as published by the
10  *      Free Software Foundation; either version 2 of the License, or (at your
11  *      option) any later version.
12  *
13  *      This program is distributed in the hope that it will be useful, but
14  *      WITHOUT ANY WARRANTY; without even the implied warranty of
15  *      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  *      General Public License for more details.
17  *
18  *      For the purpose of avoiding doubt the preferred form of the work
19  *      for making modifications shall be a standards compliant form such
20  *      gzipped tar and not one requiring a proprietary or patent encumbered
21  *      tool to unpack.
22  *
23  *      Fixes/additions:
24  *              Steve Ralston:
25  *                      Multiple device handling error fixes,
26  *                      Added a queue depth.
27  *              Alan Cox:
28  *                      FC920 has an rmw bug. Dont or in the end marker.
29  *                      Removed queue walk, fixed for 64bitness.
30  *                      Rewrote much of the code over time
31  *                      Added indirect block lists
32  *                      Handle 64K limits on many controllers
33  *                      Don't use indirects on the Promise (breaks)
34  *                      Heavily chop down the queue depths
35  *              Deepak Saxena:
36  *                      Independent queues per IOP
37  *                      Support for dynamic device creation/deletion
38  *                      Code cleanup
39  *                      Support for larger I/Os through merge* functions
40  *                      (taken from DAC960 driver)
41  *              Boji T Kannanthanam:
42  *                      Set the I2O Block devices to be detected in increasing
43  *                      order of TIDs during boot.
44  *                      Search and set the I2O block device that we boot off
45  *                      from as the first device to be claimed (as /dev/i2o/hda)
46  *                      Properly attach/detach I2O gendisk structure from the
47  *                      system gendisk list. The I2O block devices now appear in
48  *                      /proc/partitions.
49  *              Markus Lidel <Markus.Lidel@shadowconnect.com>:
50  *                      Minor bugfixes for 2.6.
51  */
52
53 #include <linux/module.h>
54 #include <linux/i2o.h>
55
56 #include <linux/mempool.h>
57
58 #include <linux/genhd.h>
59 #include <linux/blkdev.h>
60 #include <linux/hdreg.h>
61
62 #include "i2o_block.h"
63
64 static struct i2o_driver i2o_block_driver;
65
66 /* global Block OSM request mempool */
67 static struct i2o_block_mempool i2o_blk_req_pool;
68
69 /* Block OSM class handling definition */
70 static struct i2o_class_id i2o_block_class_id[] = {
71         {I2O_CLASS_RANDOM_BLOCK_STORAGE},
72         {I2O_CLASS_END}
73 };
74
75 /**
76  *      i2o_block_device_free - free the memory of the I2O Block device
77  *      @dev: I2O Block device, which should be cleaned up
78  *
79  *      Frees the request queue, gendisk and the i2o_block_device structure.
80  */
81 static void i2o_block_device_free(struct i2o_block_device *dev)
82 {
83         blk_cleanup_queue(dev->gd->queue);
84
85         put_disk(dev->gd);
86
87         kfree(dev);
88 };
89
90 /**
91  *      i2o_block_remove - remove the I2O Block device from the system again
92  *      @dev: I2O Block device which should be removed
93  *
94  *      Remove gendisk from system and free all allocated memory.
95  *
96  *      Always returns 0.
97  */
98 static int i2o_block_remove(struct device *dev)
99 {
100         struct i2o_device *i2o_dev = to_i2o_device(dev);
101         struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
102
103         printk(KERN_INFO "block-osm: Device removed %s\n",
104                i2o_blk_dev->gd->disk_name);
105
106         i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
107
108         del_gendisk(i2o_blk_dev->gd);
109
110         dev_set_drvdata(dev, NULL);
111
112         i2o_device_claim_release(i2o_dev);
113
114         i2o_block_device_free(i2o_blk_dev);
115
116         return 0;
117 };
118
119 /**
120  *      i2o_block_device flush - Flush all dirty data of I2O device dev
121  *      @dev: I2O device which should be flushed
122  *
123  *      Flushes all dirty data on device dev.
124  *
125  *      Returns 0 on success or negative error code on failure.
126  */
127 static int i2o_block_device_flush(struct i2o_device *dev)
128 {
129         struct i2o_message *msg;
130         u32 m;
131
132         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
133         if (m == I2O_QUEUE_EMPTY)
134                 return -ETIMEDOUT;
135
136         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
137         writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
138                &msg->u.head[1]);
139         writel(60 << 16, &msg->body[0]);
140         pr_debug("Flushing...\n");
141
142         return i2o_msg_post_wait(dev->iop, m, 60);
143 };
144
145 /**
146  *      i2o_block_device_mount - Mount (load) the media of device dev
147  *      @dev: I2O device which should receive the mount request
148  *      @media_id: Media Identifier
149  *
150  *      Load a media into drive. Identifier should be set to -1, because the
151  *      spec does not support any other value.
152  *
153  *      Returns 0 on success or negative error code on failure.
154  */
155 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
156 {
157         struct i2o_message *msg;
158         u32 m;
159
160         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
161         if (m == I2O_QUEUE_EMPTY)
162                 return -ETIMEDOUT;
163
164         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
165         writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
166                &msg->u.head[1]);
167         writel(-1, &msg->body[0]);
168         writel(0, &msg->body[1]);
169         pr_debug("Mounting...\n");
170
171         return i2o_msg_post_wait(dev->iop, m, 2);
172 };
173
174 /**
175  *      i2o_block_device_lock - Locks the media of device dev
176  *      @dev: I2O device which should receive the lock request
177  *      @media_id: Media Identifier
178  *
179  *      Lock media of device dev to prevent removal. The media identifier
180  *      should be set to -1, because the spec does not support any other value.
181  *
182  *      Returns 0 on success or negative error code on failure.
183  */
184 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
185 {
186         struct i2o_message *msg;
187         u32 m;
188
189         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
190         if (m == I2O_QUEUE_EMPTY)
191                 return -ETIMEDOUT;
192
193         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
194         writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
195                &msg->u.head[1]);
196         writel(-1, &msg->body[0]);
197         pr_debug("Locking...\n");
198
199         return i2o_msg_post_wait(dev->iop, m, 2);
200 };
201
202 /**
203  *      i2o_block_device_unlock - Unlocks the media of device dev
204  *      @dev: I2O device which should receive the unlocked request
205  *      @media_id: Media Identifier
206  *
207  *      Unlocks the media in device dev. The media identifier should be set to
208  *      -1, because the spec does not support any other value.
209  *
210  *      Returns 0 on success or negative error code on failure.
211  */
212 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
213 {
214         struct i2o_message *msg;
215         u32 m;
216
217         m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
218         if (m == I2O_QUEUE_EMPTY)
219                 return -ETIMEDOUT;
220
221         writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
222         writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
223                &msg->u.head[1]);
224         writel(media_id, &msg->body[0]);
225         pr_debug("Unlocking...\n");
226
227         return i2o_msg_post_wait(dev->iop, m, 2);
228 };
229
230 /**
231  *      i2o_block_device_power - Power management for device dev
232  *      @dev: I2O device which should receive the power management request
233  *      @operation: Operation which should be send
234  *
235  *      Send a power management request to the device dev.
236  *
237  *      Returns 0 on success or negative error code on failure.
238  */
239 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
240 {
241         struct i2o_device *i2o_dev = dev->i2o_dev;
242         struct i2o_controller *c = i2o_dev->iop;
243         struct i2o_message *msg;
244         u32 m;
245         int rc;
246
247         m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
248         if (m == I2O_QUEUE_EMPTY)
249                 return -ETIMEDOUT;
250
251         writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
252         writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
253                tid, &msg->u.head[1]);
254         writel(op << 24, &msg->body[0]);
255         pr_debug("Power...\n");
256
257         rc = i2o_msg_post_wait(c, m, 60);
258         if (!rc)
259                 dev->power = op;
260
261         return rc;
262 };
263
264 /**
265  *      i2o_block_request_alloc - Allocate an I2O block request struct
266  *
267  *      Allocates an I2O block request struct and initialize the list.
268  *
269  *      Returns a i2o_block_request pointer on success or negative error code
270  *      on failure.
271  */
272 static inline struct i2o_block_request *i2o_block_request_alloc(void)
273 {
274         struct i2o_block_request *ireq;
275
276         ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
277         if (!ireq)
278                 return ERR_PTR(-ENOMEM);
279
280         INIT_LIST_HEAD(&ireq->queue);
281
282         return ireq;
283 };
284
285 /**
286  *      i2o_block_request_free - Frees a I2O block request
287  *      @ireq: I2O block request which should be freed
288  *
289  *      Fres the allocated memory (give it back to the request mempool).
290  */
291 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
292 {
293         mempool_free(ireq, i2o_blk_req_pool.pool);
294 };
295
296 /**
297  *      i2o_block_sglist_alloc - Allocate the SG list and map it
298  *      @ireq: I2O block request
299  *
300  *      Builds the SG list and map it into to be accessable by the controller.
301  *
302  *      Returns the number of elements in the SG list or 0 on failure.
303  */
304 static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
305 {
306         struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
307         int nents;
308
309         nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
310
311         if (rq_data_dir(ireq->req) == READ)
312                 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
313         else
314                 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
315
316         ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
317                                     ireq->sg_dma_direction);
318
319         return ireq->sg_nents;
320 };
321
322 /**
323  *      i2o_block_sglist_free - Frees the SG list
324  *      @ireq: I2O block request from which the SG should be freed
325  *
326  *      Frees the SG list from the I2O block request.
327  */
328 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
329 {
330         struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
331
332         dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
333                      ireq->sg_dma_direction);
334 };
335
336 /**
337  *      i2o_block_prep_req_fn - Allocates I2O block device specific struct
338  *      @q: request queue for the request
339  *      @req: the request to prepare
340  *
341  *      Allocate the necessary i2o_block_request struct and connect it to
342  *      the request. This is needed that we not loose the SG list later on.
343  *
344  *      Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
345  */
346 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
347 {
348         struct i2o_block_device *i2o_blk_dev = q->queuedata;
349         struct i2o_block_request *ireq;
350
351         /* request is already processed by us, so return */
352         if (req->flags & REQ_SPECIAL) {
353                 pr_debug("REQ_SPECIAL already set!\n");
354                 req->flags |= REQ_DONTPREP;
355                 return BLKPREP_OK;
356         }
357
358         /* connect the i2o_block_request to the request */
359         if (!req->special) {
360                 ireq = i2o_block_request_alloc();
361                 if (unlikely(IS_ERR(ireq))) {
362                         pr_debug("unable to allocate i2o_block_request!\n");
363                         return BLKPREP_DEFER;
364                 }
365
366                 ireq->i2o_blk_dev = i2o_blk_dev;
367                 req->special = ireq;
368                 ireq->req = req;
369         } else
370                 ireq = req->special;
371
372         /* do not come back here */
373         req->flags |= REQ_DONTPREP | REQ_SPECIAL;
374
375         return BLKPREP_OK;
376 };
377
378 /**
379  *      i2o_block_delayed_request_fn - delayed request queue function
380  *      delayed_request: the delayed request with the queue to start
381  *
382  *      If the request queue is stopped for a disk, and there is no open
383  *      request, a new event is created, which calls this function to start
384  *      the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
385  *      be started again.
386  */
387 static void i2o_block_delayed_request_fn(void *delayed_request)
388 {
389         struct i2o_block_delayed_request *dreq = delayed_request;
390         struct request_queue *q = dreq->queue;
391         unsigned long flags;
392
393         spin_lock_irqsave(q->queue_lock, flags);
394         blk_start_queue(q);
395         spin_unlock_irqrestore(q->queue_lock, flags);
396         kfree(dreq);
397 };
398
399 /**
400  *      i2o_block_reply - Block OSM reply handler.
401  *      @c: I2O controller from which the message arrives
402  *      @m: message id of reply
403  *      qmsg: the actuall I2O message reply
404  *
405  *      This function gets all the message replies.
406  *
407  */
408 static int i2o_block_reply(struct i2o_controller *c, u32 m,
409                            struct i2o_message *msg)
410 {
411         struct i2o_block_request *ireq;
412         struct request *req;
413         struct i2o_block_device *dev;
414         struct request_queue *q;
415         u8 st;
416         unsigned long flags;
417
418         /* FAILed message */
419         if (unlikely(readl(&msg->u.head[0]) & (1 << 13))) {
420                 struct i2o_message *pmsg;
421                 u32 pm;
422
423                 printk(KERN_WARNING "FAIL");
424                 /*
425                  * FAILed message from controller
426                  * We increment the error count and abort it
427                  *
428                  * In theory this will never happen.  The I2O block class
429                  * specification states that block devices never return
430                  * FAILs but instead use the REQ status field...but
431                  * better be on the safe side since no one really follows
432                  * the spec to the book :)
433                  */
434                 pm = readl(&msg->body[3]);
435                 pmsg = c->in_queue.virt + pm;
436
437                 req = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
438                 if (unlikely(!req)) {
439                         printk(KERN_ERR "block-osm: NULL reply received!\n");
440                         return -1;
441                 }
442
443                 ireq = req->special;
444                 dev = ireq->i2o_blk_dev;
445                 q = dev->gd->queue;
446
447                 req->errors++;
448
449                 spin_lock_irqsave(q->queue_lock, flags);
450
451                 while (end_that_request_chunk(req, !req->errors,
452                                               readl(&pmsg->body[1]))) ;
453                 end_that_request_last(req);
454
455                 dev->open_queue_depth--;
456                 list_del(&ireq->queue);
457                 blk_start_queue(q);
458
459                 spin_unlock_irqrestore(q->queue_lock, flags);
460
461                 /* Now flush the message by making it a NOP */
462                 i2o_msg_nop(c, pm);
463
464                 return -1;
465         }
466
467         req = i2o_cntxt_list_get(c, readl(&msg->u.s.tcntxt));
468         if (unlikely(!req)) {
469                 printk(KERN_ERR "block-osm: NULL reply received!\n");
470                 return -1;
471         }
472
473         ireq = req->special;
474         dev = ireq->i2o_blk_dev;
475         q = dev->gd->queue;
476
477         if (unlikely(!dev->i2o_dev)) {
478                 /*
479                  * This is HACK, but Intel Integrated RAID allows user
480                  * to delete a volume that is claimed, locked, and in use
481                  * by the OS. We have to check for a reply from a
482                  * non-existent device and flag it as an error or the system
483                  * goes kaput...
484                  */
485                 req->errors++;
486                 printk(KERN_WARNING
487                        "I2O Block: Data transfer to deleted device!\n");
488                 spin_lock_irqsave(q->queue_lock, flags);
489                 while (end_that_request_chunk
490                        (req, !req->errors, readl(&msg->body[1]))) ;
491                 end_that_request_last(req);
492
493                 dev->open_queue_depth--;
494                 list_del(&ireq->queue);
495                 blk_start_queue(q);
496
497                 spin_unlock_irqrestore(q->queue_lock, flags);
498                 return -1;
499         }
500
501         /*
502          *      Lets see what is cooking. We stuffed the
503          *      request in the context.
504          */
505
506         st = readl(&msg->body[0]) >> 24;
507
508         if (st != 0) {
509                 int err;
510                 char *bsa_errors[] = {
511                         "Success",
512                         "Media Error",
513                         "Failure communicating to device",
514                         "Device Failure",
515                         "Device is not ready",
516                         "Media not present",
517                         "Media is locked by another user",
518                         "Media has failed",
519                         "Failure communicating to device",
520                         "Device bus failure",
521                         "Device is locked by another user",
522                         "Device is write protected",
523                         "Device has reset",
524                         "Volume has changed, waiting for acknowledgement"
525                 };
526
527                 err = readl(&msg->body[0]) & 0xffff;
528
529                 /*
530                  *      Device not ready means two things. One is that the
531                  *      the thing went offline (but not a removal media)
532                  *
533                  *      The second is that you have a SuperTrak 100 and the
534                  *      firmware got constipated. Unlike standard i2o card
535                  *      setups the supertrak returns an error rather than
536                  *      blocking for the timeout in these cases.
537                  *
538                  *      Don't stick a supertrak100 into cache aggressive modes
539                  */
540
541                 printk(KERN_ERR "\n/dev/%s error: %s", dev->gd->disk_name,
542                        bsa_errors[readl(&msg->body[0]) & 0xffff]);
543                 if (readl(&msg->body[0]) & 0x00ff0000)
544                         printk(" - DDM attempted %d retries",
545                                (readl(&msg->body[0]) >> 16) & 0x00ff);
546                 printk(".\n");
547                 req->errors++;
548         } else
549                 req->errors = 0;
550
551         if (!end_that_request_chunk(req, !req->errors, readl(&msg->body[1]))) {
552                 add_disk_randomness(req->rq_disk);
553                 spin_lock_irqsave(q->queue_lock, flags);
554
555                 end_that_request_last(req);
556
557                 dev->open_queue_depth--;
558                 list_del(&ireq->queue);
559                 blk_start_queue(q);
560
561                 spin_unlock_irqrestore(q->queue_lock, flags);
562
563                 i2o_block_sglist_free(ireq);
564                 i2o_block_request_free(ireq);
565         } else
566                 printk(KERN_ERR "still remaining chunks\n");
567
568         return 1;
569 };
570
571 static void i2o_block_event(struct i2o_event *evt)
572 {
573         printk(KERN_INFO "block-osm: event received\n");
574 };
575
576 #if 0
577 static int i2o_block_event(void *dummy)
578 {
579         unsigned int evt;
580         unsigned long flags;
581         struct i2o_block_device *dev;
582         int unit;
583         //The only event that has data is the SCSI_SMART event.
584         struct i2o_reply {
585                 u32 header[4];
586                 u32 evt_indicator;
587                 u8 ASC;
588                 u8 ASCQ;
589                 u16 pad;
590                 u8 data[16];
591         } *evt_local;
592
593         daemonize("i2oblock");
594         allow_signal(SIGKILL);
595
596         evt_running = 1;
597
598         while (1) {
599                 if (down_interruptible(&i2ob_evt_sem)) {
600                         evt_running = 0;
601                         printk("exiting...");
602                         break;
603                 }
604
605                 /*
606                  * Keep another CPU/interrupt from overwriting the
607                  * message while we're reading it
608                  *
609                  * We stuffed the unit in the TxContext and grab the event mask
610                  * None of the BSA we care about events have EventData
611                  */
612                 spin_lock_irqsave(&i2ob_evt_lock, flags);
613                 evt_local = (struct i2o_reply *)evt_msg;
614                 spin_unlock_irqrestore(&i2ob_evt_lock, flags);
615
616                 unit = le32_to_cpu(evt_local->header[3]);
617                 evt = le32_to_cpu(evt_local->evt_indicator);
618
619                 dev = &i2o_blk_dev[unit];
620                 switch (evt) {
621                         /*
622                          * New volume loaded on same TID, so we just re-install.
623                          * The TID/controller don't change as it is the same
624                          * I2O device.  It's just new media that we have to
625                          * rescan.
626                          */
627                 case I2O_EVT_IND_BSA_VOLUME_LOAD:
628                         {
629                                 i2ob_install_device(dev->i2o_device->iop,
630                                                     dev->i2o_device, unit);
631                                 add_disk(dev->gendisk);
632                                 break;
633                         }
634
635                         /*
636                          * No media, so set all parameters to 0 and set the media
637                          * change flag. The I2O device is still valid, just doesn't
638                          * have media, so we don't want to clear the controller or
639                          * device pointer.
640                          */
641                 case I2O_EVT_IND_BSA_VOLUME_UNLOAD:
642                         {
643                                 struct gendisk *p = dev->gendisk;
644                                 blk_queue_max_sectors(dev->gendisk->queue, 0);
645                                 del_gendisk(p);
646                                 put_disk(p);
647                                 dev->gendisk = NULL;
648                                 dev->media_change_flag = 1;
649                                 break;
650                         }
651
652                 case I2O_EVT_IND_BSA_VOLUME_UNLOAD_REQ:
653                         printk(KERN_WARNING
654                                "%s: Attempt to eject locked media\n",
655                                dev->i2o_device->dev_name);
656                         break;
657
658                         /*
659                          * The capacity has changed and we are going to be
660                          * updating the max_sectors and other information
661                          * about this disk.  We try a revalidate first. If
662                          * the block device is in use, we don't want to
663                          * do that as there may be I/Os bound for the disk
664                          * at the moment.  In that case we read the size
665                          * from the device and update the information ourselves
666                          * and the user can later force a partition table
667                          * update through an ioctl.
668                          */
669                 case I2O_EVT_IND_BSA_CAPACITY_CHANGE:
670                         {
671                                 u64 size;
672
673                                 if (i2ob_query_device(dev, 0x0004, 0, &size, 8)
674                                     != 0)
675                                         i2ob_query_device(dev, 0x0000, 4, &size,
676                                                           8);
677
678                                 spin_lock_irqsave(dev->req_queue->queue_lock,
679                                                   flags);
680                                 set_capacity(dev->gendisk, size >> 9);
681                                 spin_unlock_irqrestore(dev->req_queue->
682                                                        queue_lock, flags);
683                                 break;
684                         }
685
686                         /*
687                          * We got a SCSI SMART event, we just log the relevant
688                          * information and let the user decide what they want
689                          * to do with the information.
690                          */
691                 case I2O_EVT_IND_BSA_SCSI_SMART:
692                         {
693                                 char buf[16];
694                                 printk(KERN_INFO
695                                        "I2O Block: %s received a SCSI SMART Event\n",
696                                        dev->i2o_device->dev_name);
697                                 evt_local->data[16] = '\0';
698                                 sprintf(buf, "%s", &evt_local->data[0]);
699                                 printk(KERN_INFO "      Disk Serial#:%s\n",
700                                        buf);
701                                 printk(KERN_INFO "      ASC 0x%02x \n",
702                                        evt_local->ASC);
703                                 printk(KERN_INFO "      ASCQ 0x%02x \n",
704                                        evt_local->ASCQ);
705                                 break;
706                         }
707
708                         /*
709                          *      Non event
710                          */
711
712                 case 0:
713                         break;
714
715                         /*
716                          * An event we didn't ask for.  Call the card manufacturer
717                          * and tell them to fix their firmware :)
718                          */
719
720                 case 0x20:
721                         /*
722                          * If a promise card reports 0x20 event then the brown stuff
723                          * hit the fan big time. The card seems to recover but loses
724                          * the pending writes. Deeply ungood except for testing fsck
725                          */
726                         if (dev->i2o_device->iop->promise)
727                                 panic
728                                     ("I2O controller firmware failed. Reboot and force a filesystem check.\n");
729                 default:
730                         printk(KERN_INFO
731                                "%s: Received event 0x%X we didn't register for\n"
732                                KERN_INFO
733                                "   Blame the I2O card manufacturer 8)\n",
734                                dev->i2o_device->dev_name, evt);
735                         break;
736                 }
737         };
738
739         complete_and_exit(&i2ob_thread_dead, 0);
740         return 0;
741 }
742 #endif
743
744 /*
745  *      SCSI-CAM for ioctl geometry mapping
746  *      Duplicated with SCSI - this should be moved into somewhere common
747  *      perhaps genhd ?
748  *
749  * LBA -> CHS mapping table taken from:
750  *
751  * "Incorporating the I2O Architecture into BIOS for Intel Architecture
752  *  Platforms"
753  *
754  * This is an I2O document that is only available to I2O members,
755  * not developers.
756  *
757  * From my understanding, this is how all the I2O cards do this
758  *
759  * Disk Size      | Sectors | Heads | Cylinders
760  * ---------------+---------+-------+-------------------
761  * 1 < X <= 528M  | 63      | 16    | X/(63 * 16 * 512)
762  * 528M < X <= 1G | 63      | 32    | X/(63 * 32 * 512)
763  * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
764  * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
765  *
766  */
767 #define BLOCK_SIZE_528M         1081344
768 #define BLOCK_SIZE_1G           2097152
769 #define BLOCK_SIZE_21G          4403200
770 #define BLOCK_SIZE_42G          8806400
771 #define BLOCK_SIZE_84G          17612800
772
773 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
774                                 unsigned char *hds, unsigned char *secs)
775 {
776         unsigned long heads, sectors, cylinders;
777
778         sectors = 63L;          /* Maximize sectors per track */
779         if (capacity <= BLOCK_SIZE_528M)
780                 heads = 16;
781         else if (capacity <= BLOCK_SIZE_1G)
782                 heads = 32;
783         else if (capacity <= BLOCK_SIZE_21G)
784                 heads = 64;
785         else if (capacity <= BLOCK_SIZE_42G)
786                 heads = 128;
787         else
788                 heads = 255;
789
790         cylinders = (unsigned long)capacity / (heads * sectors);
791
792         *cyls = (unsigned short)cylinders;      /* Stuff return values */
793         *secs = (unsigned char)sectors;
794         *hds = (unsigned char)heads;
795 }
796
797 /**
798  *      i2o_block_open - Open the block device
799  *
800  *      Power up the device, mount and lock the media. This function is called,
801  *      if the block device is opened for access.
802  *
803  *      Returns 0 on success or negative error code on failure.
804  */
805 static int i2o_block_open(struct inode *inode, struct file *file)
806 {
807         struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
808
809         if (!dev->i2o_dev)
810                 return -ENODEV;
811
812         if (dev->power > 0x1f)
813                 i2o_block_device_power(dev, 0x02);
814
815         i2o_block_device_mount(dev->i2o_dev, -1);
816
817         i2o_block_device_lock(dev->i2o_dev, -1);
818
819         pr_debug("Ready.\n");
820
821         return 0;
822 };
823
824 /**
825  *      i2o_block_release - Release the I2O block device
826  *
827  *      Unlock and unmount the media, and power down the device. Gets called if
828  *      the block device is closed.
829  *
830  *      Returns 0 on success or negative error code on failure.
831  */
832 static int i2o_block_release(struct inode *inode, struct file *file)
833 {
834         struct gendisk *disk = inode->i_bdev->bd_disk;
835         struct i2o_block_device *dev = disk->private_data;
836         u8 operation;
837
838         /*
839          * This is to deail with the case of an application
840          * opening a device and then the device dissapears while
841          * it's in use, and then the application tries to release
842          * it.  ex: Unmounting a deleted RAID volume at reboot.
843          * If we send messages, it will just cause FAILs since
844          * the TID no longer exists.
845          */
846         if (!dev->i2o_dev)
847                 return 0;
848
849         i2o_block_device_flush(dev->i2o_dev);
850
851         i2o_block_device_unlock(dev->i2o_dev, -1);
852
853         if (dev->flags & (1 << 3 | 1 << 4))     /* Removable */
854                 operation = 0x21;
855         else
856                 operation = 0x24;
857
858         i2o_block_device_power(dev, operation);
859
860         return 0;
861 }
862
863 /**
864  *      i2o_block_ioctl - Issue device specific ioctl calls.
865  *      @cmd: ioctl command
866  *      @arg: arg
867  *
868  *      Handles ioctl request for the block device.
869  *
870  *      Return 0 on success or negative error on failure.
871  */
872 static int i2o_block_ioctl(struct inode *inode, struct file *file,
873                            unsigned int cmd, unsigned long arg)
874 {
875         struct gendisk *disk = inode->i_bdev->bd_disk;
876         struct i2o_block_device *dev = disk->private_data;
877         void __user *argp = (void __user *)arg;
878
879         /* Anyone capable of this syscall can do *real bad* things */
880
881         if (!capable(CAP_SYS_ADMIN))
882                 return -EPERM;
883
884         switch (cmd) {
885         case HDIO_GETGEO:
886                 {
887                         struct hd_geometry g;
888                         i2o_block_biosparam(get_capacity(disk),
889                                             &g.cylinders, &g.heads, &g.sectors);
890                         g.start = get_start_sect(inode->i_bdev);
891                         return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
892                 }
893
894         case BLKI2OGRSTRAT:
895                 return put_user(dev->rcache, (int __user *)arg);
896         case BLKI2OGWSTRAT:
897                 return put_user(dev->wcache, (int __user *)arg);
898         case BLKI2OSRSTRAT:
899                 if (arg < 0 || arg > CACHE_SMARTFETCH)
900                         return -EINVAL;
901                 dev->rcache = arg;
902                 break;
903         case BLKI2OSWSTRAT:
904                 if (arg != 0
905                     && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
906                         return -EINVAL;
907                 dev->wcache = arg;
908                 break;
909         }
910         return -ENOTTY;
911 };
912
913 /**
914  *      i2o_block_media_changed - Have we seen a media change?
915  *      @disk: gendisk which should be verified
916  *
917  *      Verifies if the media has changed.
918  *
919  *      Returns 1 if the media was changed or 0 otherwise.
920  */
921 static int i2o_block_media_changed(struct gendisk *disk)
922 {
923         struct i2o_block_device *p = disk->private_data;
924
925         if (p->media_change_flag) {
926                 p->media_change_flag = 0;
927                 return 1;
928         }
929         return 0;
930 }
931
932 /**
933  *      i2o_block_transfer - Transfer a request to/from the I2O controller
934  *      @req: the request which should be transfered
935  *
936  *      This function converts the request into a I2O message. The necessary
937  *      DMA buffers are allocated and after everything is setup post the message
938  *      to the I2O controller. No cleanup is done by this function. It is done
939  *      on the interrupt side when the reply arrives.
940  *
941  *      Return 0 on success or negative error code on failure.
942  */
943 static int i2o_block_transfer(struct request *req)
944 {
945         struct i2o_block_device *dev = req->rq_disk->private_data;
946         struct i2o_controller *c = dev->i2o_dev->iop;
947         int tid = dev->i2o_dev->lct_data.tid;
948         struct i2o_message *msg;
949         void *mptr;
950         struct i2o_block_request *ireq = req->special;
951         struct scatterlist *sg;
952         int sgnum;
953         int i;
954         u32 m;
955         u32 tcntxt;
956         u32 sg_flags;
957         int rc;
958
959         m = i2o_msg_get(c, &msg);
960         if (m == I2O_QUEUE_EMPTY) {
961                 rc = -EBUSY;
962                 goto exit;
963         }
964
965         tcntxt = i2o_cntxt_list_add(c, req);
966         if (!tcntxt) {
967                 rc = -ENOMEM;
968                 goto nop_msg;
969         }
970
971         if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
972                 rc = -ENOMEM;
973                 goto context_remove;
974         }
975
976         /* Build the message based on the request. */
977         writel(i2o_block_driver.context, &msg->u.s.icntxt);
978         writel(tcntxt, &msg->u.s.tcntxt);
979         writel(req->nr_sectors << 9, &msg->body[1]);
980
981         writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
982         writel(req->sector >> 23, &msg->body[3]);
983
984         mptr = &msg->body[4];
985
986         sg = ireq->sg_table;
987
988         if (rq_data_dir(req) == READ) {
989                 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
990                        &msg->u.head[1]);
991                 sg_flags = 0x10000000;
992                 switch (dev->rcache) {
993                 case CACHE_NULL:
994                         writel(0, &msg->body[0]);
995                         break;
996                 case CACHE_PREFETCH:
997                         writel(0x201F0008, &msg->body[0]);
998                         break;
999                 case CACHE_SMARTFETCH:
1000                         if (req->nr_sectors > 16)
1001                                 writel(0x201F0008, &msg->body[0]);
1002                         else
1003                                 writel(0x001F0000, &msg->body[0]);
1004                         break;
1005                 }
1006         } else {
1007                 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
1008                        &msg->u.head[1]);
1009                 sg_flags = 0x14000000;
1010                 switch (dev->wcache) {
1011                 case CACHE_NULL:
1012                         writel(0, &msg->body[0]);
1013                         break;
1014                 case CACHE_WRITETHROUGH:
1015                         writel(0x001F0008, &msg->body[0]);
1016                         break;
1017                 case CACHE_WRITEBACK:
1018                         writel(0x001F0010, &msg->body[0]);
1019                         break;
1020                 case CACHE_SMARTBACK:
1021                         if (req->nr_sectors > 16)
1022                                 writel(0x001F0004, &msg->body[0]);
1023                         else
1024                                 writel(0x001F0010, &msg->body[0]);
1025                         break;
1026                 case CACHE_SMARTTHROUGH:
1027                         if (req->nr_sectors > 16)
1028                                 writel(0x001F0004, &msg->body[0]);
1029                         else
1030                                 writel(0x001F0010, &msg->body[0]);
1031                 }
1032         }
1033
1034         for (i = sgnum; i > 0; i--) {
1035                 if (i == 1)
1036                         sg_flags |= 0x80000000;
1037                 writel(sg_flags | sg_dma_len(sg), mptr);
1038                 writel(sg_dma_address(sg), mptr + 4);
1039                 mptr += 8;
1040                 sg++;
1041         }
1042
1043         writel(I2O_MESSAGE_SIZE
1044                (((unsigned long)mptr -
1045                  (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
1046                &msg->u.head[0]);
1047
1048         i2o_msg_post(c, m);
1049
1050         list_add_tail(&ireq->queue, &dev->open_queue);
1051         dev->open_queue_depth++;
1052
1053         return 0;
1054
1055       context_remove:
1056         i2o_cntxt_list_remove(c, req);
1057
1058       nop_msg:
1059         i2o_msg_nop(c, m);
1060
1061       exit:
1062         return rc;
1063 };
1064
1065 /**
1066  *      i2o_block_request_fn - request queue handling function
1067  *      q: request queue from which the request could be fetched
1068  *
1069  *      Takes the next request from the queue, transfers it and if no error
1070  *      occurs dequeue it from the queue. On arrival of the reply the message
1071  *      will be processed further. If an error occurs requeue the request.
1072  */
1073 static void i2o_block_request_fn(struct request_queue *q)
1074 {
1075         struct request *req;
1076
1077         while (!blk_queue_plugged(q)) {
1078                 req = elv_next_request(q);
1079                 if (!req)
1080                         break;
1081
1082                 if (blk_fs_request(req)) {
1083                         struct i2o_block_delayed_request *dreq;
1084                         struct i2o_block_request *ireq = req->special;
1085                         unsigned int queue_depth;
1086
1087                         queue_depth = ireq->i2o_blk_dev->open_queue_depth;
1088
1089                         if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
1090                                 if (!i2o_block_transfer(req)) {
1091                                         blkdev_dequeue_request(req);
1092                                         continue;
1093                                 }
1094
1095                         if (queue_depth)
1096                                 break;
1097
1098                         /* stop the queue and retry later */
1099                         dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
1100                         if (!dreq)
1101                                 continue;
1102
1103                         dreq->queue = q;
1104                         INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
1105                                   dreq);
1106
1107                         printk(KERN_INFO "block-osm: transfer error\n");
1108                         if (!queue_delayed_work(i2o_block_driver.event_queue,
1109                                                 &dreq->work,
1110                                                 I2O_BLOCK_RETRY_TIME))
1111                                 kfree(dreq);
1112                         else {
1113                                 blk_stop_queue(q);
1114                                 break;
1115                         }
1116                 } else
1117                         end_request(req, 0);
1118         }
1119 };
1120
1121 /* I2O Block device operations definition */
1122 static struct block_device_operations i2o_block_fops = {
1123         .owner = THIS_MODULE,
1124         .open = i2o_block_open,
1125         .release = i2o_block_release,
1126         .ioctl = i2o_block_ioctl,
1127         .media_changed = i2o_block_media_changed
1128 };
1129
1130 /**
1131  *      i2o_block_device_alloc - Allocate memory for a I2O Block device
1132  *
1133  *      Allocate memory for the i2o_block_device struct, gendisk and request
1134  *      queue and initialize them as far as no additional information is needed.
1135  *
1136  *      Returns a pointer to the allocated I2O Block device on succes or a
1137  *      negative error code on failure.
1138  */
1139 static struct i2o_block_device *i2o_block_device_alloc(void)
1140 {
1141         struct i2o_block_device *dev;
1142         struct gendisk *gd;
1143         struct request_queue *queue;
1144         int rc;
1145
1146         dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1147         if (!dev) {
1148                 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1149                        "I2O Block disk.\n");
1150                 rc = -ENOMEM;
1151                 goto exit;
1152         }
1153         memset(dev, 0, sizeof(*dev));
1154
1155         INIT_LIST_HEAD(&dev->open_queue);
1156         spin_lock_init(&dev->lock);
1157         dev->rcache = CACHE_PREFETCH;
1158         dev->wcache = CACHE_WRITEBACK;
1159
1160         /* allocate a gendisk with 16 partitions */
1161         gd = alloc_disk(16);
1162         if (!gd) {
1163                 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1164                        "gendisk.\n");
1165                 rc = -ENOMEM;
1166                 goto cleanup_dev;
1167         }
1168
1169         /* initialize the request queue */
1170         queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1171         if (!queue) {
1172                 printk(KERN_ERR "block-osm: Insufficient memory to allocate "
1173                        "request queue.\n");
1174                 rc = -ENOMEM;
1175                 goto cleanup_queue;
1176         }
1177
1178         blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1179
1180         gd->major = I2O_MAJOR;
1181         gd->queue = queue;
1182         gd->fops = &i2o_block_fops;
1183         gd->private_data = dev;
1184
1185         dev->gd = gd;
1186
1187         return dev;
1188
1189       cleanup_queue:
1190         put_disk(gd);
1191
1192       cleanup_dev:
1193         kfree(dev);
1194
1195       exit:
1196         return ERR_PTR(rc);
1197 };
1198
1199 /**
1200  *      i2o_block_probe - verify if dev is a I2O Block device and install it
1201  *      @dev: device to verify if it is a I2O Block device
1202  *
1203  *      We only verify if the user_tid of the device is 0xfff and then install
1204  *      the device. Otherwise it is used by some other device (e. g. RAID).
1205  *
1206  *      Returns 0 on success or negative error code on failure.
1207  */
1208 static int i2o_block_probe(struct device *dev)
1209 {
1210         struct i2o_device *i2o_dev = to_i2o_device(dev);
1211         struct i2o_block_device *i2o_blk_dev;
1212         struct i2o_controller *c = i2o_dev->iop;
1213         struct gendisk *gd;
1214         struct request_queue *queue;
1215         static int unit = 0;
1216         int rc;
1217         u64 size;
1218         u32 blocksize;
1219         u16 power;
1220         u32 flags, status;
1221         int segments;
1222
1223         /* skip devices which are used by IOP */
1224         if (i2o_dev->lct_data.user_tid != 0xfff) {
1225                 pr_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1226                 return -ENODEV;
1227         }
1228
1229         printk(KERN_INFO "block-osm: New device detected (TID: %03x)\n",
1230                i2o_dev->lct_data.tid);
1231
1232         if (i2o_device_claim(i2o_dev)) {
1233                 printk(KERN_WARNING "block-osm: Unable to claim device. "
1234                        "Installation aborted\n");
1235                 rc = -EFAULT;
1236                 goto exit;
1237         }
1238
1239         i2o_blk_dev = i2o_block_device_alloc();
1240         if (IS_ERR(i2o_blk_dev)) {
1241                 printk(KERN_ERR "block-osm: could not alloc a new I2O block"
1242                        "device");
1243                 rc = PTR_ERR(i2o_blk_dev);
1244                 goto claim_release;
1245         }
1246
1247         i2o_blk_dev->i2o_dev = i2o_dev;
1248         dev_set_drvdata(dev, i2o_blk_dev);
1249
1250         /* setup gendisk */
1251         gd = i2o_blk_dev->gd;
1252         gd->first_minor = unit << 4;
1253         sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1254         sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1255         gd->driverfs_dev = &i2o_dev->device;
1256
1257         /* setup request queue */
1258         queue = gd->queue;
1259         queue->queuedata = i2o_blk_dev;
1260
1261         blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1262         blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1263
1264         if (c->short_req)
1265                 segments = 8;
1266         else {
1267                 i2o_status_block *sb;
1268
1269                 sb = c->status_block.virt;
1270
1271                 segments = (sb->inbound_frame_size -
1272                             sizeof(struct i2o_message) / 4 - 4) / 2;
1273         }
1274
1275         blk_queue_max_hw_segments(queue, segments);
1276
1277         pr_debug("max sectors:   %d\n", I2O_MAX_SECTORS);
1278         pr_debug("phys segments: %d\n", I2O_MAX_SEGMENTS);
1279         pr_debug("hw segments:   %d\n", segments);
1280
1281         /*
1282          *      Ask for the current media data. If that isn't supported
1283          *      then we ask for the device capacity data
1284          */
1285         if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1286             || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1287                 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1288                 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1289         }
1290         pr_debug("blocksize:     %d\n", blocksize);
1291
1292         if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1293                 power = 0;
1294         i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1295         i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1296
1297         set_capacity(gd, size >> 9);
1298
1299         i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1300
1301         add_disk(gd);
1302
1303         unit++;
1304
1305         return 0;
1306
1307       claim_release:
1308         i2o_device_claim_release(i2o_dev);
1309
1310       exit:
1311         return rc;
1312 };
1313
1314 /* Block OSM driver struct */
1315 static struct i2o_driver i2o_block_driver = {
1316         .name = "block-osm",
1317         .event = i2o_block_event,
1318         .reply = i2o_block_reply,
1319         .classes = i2o_block_class_id,
1320         .driver = {
1321                    .probe = i2o_block_probe,
1322                    .remove = i2o_block_remove,
1323                    },
1324 };
1325
1326 /**
1327  *      i2o_block_init - Block OSM initialization function
1328  *
1329  *      Allocate the slab and mempool for request structs, registers i2o_block
1330  *      block device and finally register the Block OSM in the I2O core.
1331  *
1332  *      Returns 0 on success or negative error code on failure.
1333  */
1334 static int __init i2o_block_init(void)
1335 {
1336         int rc;
1337         int size;
1338
1339         printk(KERN_INFO "I2O Block Storage OSM v0.9\n");
1340         printk(KERN_INFO "   (c) Copyright 1999-2001 Red Hat Software.\n");
1341
1342         /* Allocate request mempool and slab */
1343         size = sizeof(struct i2o_block_request);
1344         i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1345                                                   SLAB_HWCACHE_ALIGN, NULL,
1346                                                   NULL);
1347         if (!i2o_blk_req_pool.slab) {
1348                 printk(KERN_ERR "block-osm: can't init request slab\n");
1349                 rc = -ENOMEM;
1350                 goto exit;
1351         }
1352
1353         i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1354                                                mempool_alloc_slab,
1355                                                mempool_free_slab,
1356                                                i2o_blk_req_pool.slab);
1357         if (!i2o_blk_req_pool.pool) {
1358                 printk(KERN_ERR "block-osm: can't init request mempool\n");
1359                 rc = -ENOMEM;
1360                 goto free_slab;
1361         }
1362
1363         /* Register the block device interfaces */
1364         rc = register_blkdev(I2O_MAJOR, "i2o_block");
1365         if (rc) {
1366                 printk(KERN_ERR "block-osm: unable to register block device\n");
1367                 goto free_mempool;
1368         }
1369 #ifdef MODULE
1370         printk(KERN_INFO "block-osm: registered device at major %d\n",
1371                I2O_MAJOR);
1372 #endif
1373
1374         /* Register Block OSM into I2O core */
1375         rc = i2o_driver_register(&i2o_block_driver);
1376         if (rc) {
1377                 printk(KERN_ERR "block-osm: Could not register Block driver\n");
1378                 goto unregister_blkdev;
1379         }
1380
1381         return 0;
1382
1383       unregister_blkdev:
1384         unregister_blkdev(I2O_MAJOR, "i2o_block");
1385
1386       free_mempool:
1387         mempool_destroy(i2o_blk_req_pool.pool);
1388
1389       free_slab:
1390         kmem_cache_destroy(i2o_blk_req_pool.slab);
1391
1392       exit:
1393         return rc;
1394 };
1395
1396 /**
1397  *      i2o_block_exit - Block OSM exit function
1398  *
1399  *      Unregisters Block OSM from I2O core, unregisters i2o_block block device
1400  *      and frees the mempool and slab.
1401  */
1402 static void __exit i2o_block_exit(void)
1403 {
1404         /* Unregister I2O Block OSM from I2O core */
1405         i2o_driver_unregister(&i2o_block_driver);
1406
1407         /* Unregister block device */
1408         unregister_blkdev(I2O_MAJOR, "i2o_block");
1409
1410         /* Free request mempool and slab */
1411         mempool_destroy(i2o_blk_req_pool.pool);
1412         kmem_cache_destroy(i2o_blk_req_pool.slab);
1413 };
1414
1415 MODULE_AUTHOR("Red Hat");
1416 MODULE_DESCRIPTION("I2O Block Device OSM");
1417 MODULE_LICENSE("GPL");
1418
1419 module_init(i2o_block_init);
1420 module_exit(i2o_block_exit);