2 * Implements the dump driver interface for saving a dump to
3 * a block device through the kernel's generic low level block i/o
6 * Started: June 2002 - Mohamed Abbas <mohamed.abbas@intel.com>
7 * Moved original lkcd kiobuf dump i/o code from dump_base.c
8 * to use generic dump device interfaces
10 * Sept 2002 - Bharata B. Rao <bharata@in.ibm.com>
11 * Convert dump i/o to directly use bio instead of kiobuf for 2.5
13 * Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
14 * Rework to new dumpdev.h structures, implement open/close/
15 * silence, misc fixes (blocknr removal, bio_add_page usage)
17 * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
18 * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
19 * Copyright (C) 2002 International Business Machines Corp.
21 * This code is released under version 2 of the GNU GPL.
24 #include <linux/types.h>
25 #include <linux/proc_fs.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/bio.h>
30 #include <asm/hardirq.h>
31 #include <linux/dump.h>
32 #include "dump_methods.h"
34 extern void *dump_page_buf;
36 /* The end_io callback for dump i/o completion */
38 dump_bio_end_io(struct bio *bio, unsigned int bytes_done, int error)
40 struct dump_blockdev *dump_bdev;
43 /* some bytes still left to transfer */
44 return 1; /* not complete */
47 dump_bdev = (struct dump_blockdev *)bio->bi_private;
49 printk("IO error while writing the dump, aborting\n");
52 dump_bdev->err = error;
54 /* no wakeup needed, since caller polls for completion */
58 /* Check if the dump bio is already mapped to the specified buffer */
60 dump_block_map_valid(struct dump_blockdev *dev, struct page *page,
63 struct bio *bio = dev->bio;
64 unsigned long bsize = 0;
67 return 0; /* first time, not mapped */
70 if ((bio_page(bio) != page) || (len > bio->bi_vcnt << PAGE_SHIFT))
71 return 0; /* buffer not mapped */
73 bsize = bdev_hardsect_size(bio->bi_bdev);
74 if ((len & (PAGE_SIZE - 1)) || (len & bsize))
75 return 0; /* alignment checks needed */
77 /* quick check to decide if we need to redo bio_add_page */
78 if (bdev_get_queue(bio->bi_bdev)->merge_bvec_fn)
79 return 0; /* device may have other restrictions */
81 return 1; /* already mapped */
85 * Set up the dump bio for i/o from the specified buffer
86 * Return value indicates whether the full buffer could be mapped or not
89 dump_block_map(struct dump_blockdev *dev, void *buf, int len)
91 struct page *page = virt_to_page(buf);
92 struct bio *bio = dev->bio;
93 unsigned long bsize = 0;
95 bio->bi_bdev = dev->bdev;
96 bio->bi_sector = (dev->start_offset + dev->ddev.curr_offset) >> 9;
97 bio->bi_idx = 0; /* reset index to the beginning */
99 if (dump_block_map_valid(dev, page, len)) {
100 /* already mapped and usable rightaway */
101 bio->bi_size = len; /* reset size to the whole bio */
103 /* need to map the bio */
106 bsize = bdev_hardsect_size(bio->bi_bdev);
108 /* first a few sanity checks */
110 printk("map: len less than hardsect size \n");
114 if ((unsigned long)buf & bsize) {
115 printk("map: not aligned \n");
119 /* assume contig. page aligned low mem buffer( no vmalloc) */
120 if ((page_address(page) != buf) || (len & (PAGE_SIZE - 1))) {
121 printk("map: invalid buffer alignment!\n");
124 /* finally we can go ahead and map it */
125 while (bio->bi_size < len)
126 if (bio_add_page(bio, page++, PAGE_SIZE, 0) == 0) {
130 bio->bi_end_io = dump_bio_end_io;
131 bio->bi_private = dev;
134 if (bio->bi_size != len) {
135 printk("map: bio size = %d not enough for len = %d!\n",
143 dump_free_bio(struct bio *bio)
146 kfree(bio->bi_io_vec);
151 * Prepares the dump device so we can take a dump later.
152 * The caller is expected to have filled up the dev_id field in the
153 * block dump dev structure.
155 * At dump time when dump_block_write() is invoked it will be too
156 * late to recover, so as far as possible make sure obvious errors
157 * get caught right here and reported back to the caller.
160 dump_block_open(struct dump_dev *dev, unsigned long arg)
162 struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
163 struct block_device *bdev;
165 struct bio_vec *bvec;
167 /* make sure this is a valid block device */
173 /* Convert it to the new dev_t format */
174 arg = MKDEV((arg >> OLDMINORBITS), (arg & OLDMINORMASK));
176 /* get a corresponding block_dev struct for this */
177 bdev = bdget((dev_t)arg);
183 /* get the block device opened */
184 if ((retval = blkdev_get(bdev, O_RDWR | O_LARGEFILE, 0))) {
188 if ((dump_bdev->bio = kmalloc(sizeof(struct bio), GFP_KERNEL))
190 printk("Cannot allocate bio\n");
195 bio_init(dump_bdev->bio);
197 if ((bvec = kmalloc(sizeof(struct bio_vec) *
198 (DUMP_BUFFER_SIZE >> PAGE_SHIFT), GFP_KERNEL)) == NULL) {
203 /* assign the new dump dev structure */
204 dump_bdev->dev_id = (dev_t)arg;
205 dump_bdev->bdev = bdev;
207 /* make a note of the limit */
208 dump_bdev->limit = bdev->bd_inode->i_size;
210 /* now make sure we can map the dump buffer */
211 dump_bdev->bio->bi_io_vec = bvec;
212 dump_bdev->bio->bi_max_vecs = DUMP_BUFFER_SIZE >> PAGE_SHIFT;
214 retval = dump_block_map(dump_bdev, dump_config.dumper->dump_buf,
218 printk("open: dump_block_map failed, ret %d\n", retval);
222 printk("Block device (%d,%d) successfully configured for dumping\n",
223 MAJOR(dump_bdev->dev_id),
224 MINOR(dump_bdev->dev_id));
227 /* after opening the block device, return */
230 err3: dump_free_bio(dump_bdev->bio);
231 dump_bdev->bio = NULL;
232 err2: if (bdev) blkdev_put(bdev);
234 err1: if (bdev) bdput(bdev);
235 dump_bdev->bdev = NULL;
240 * Close the dump device and release associated resources
241 * Invoked when unconfiguring the dump device.
244 dump_block_release(struct dump_dev *dev)
246 struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
248 /* release earlier bdev if present */
249 if (dump_bdev->bdev) {
250 blkdev_put(dump_bdev->bdev);
251 dump_bdev->bdev = NULL;
254 dump_free_bio(dump_bdev->bio);
255 dump_bdev->bio = NULL;
262 * Prepare the dump device for use (silence any ongoing activity
263 * and quiesce state) when the system crashes.
266 dump_block_silence(struct dump_dev *dev)
268 struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
269 struct request_queue *q = bdev_get_queue(dump_bdev->bdev);
272 /* If we can't get request queue lock, refuse to take the dump */
273 if (!spin_trylock(q->queue_lock))
276 ret = elv_queue_empty(q);
277 spin_unlock(q->queue_lock);
279 /* For now we assume we have the device to ourselves */
280 /* Just a quick sanity check */
282 /* Warn the user and move on */
283 printk(KERN_ALERT "Warning: Non-empty request queue\n");
284 printk(KERN_ALERT "I/O requests in flight at dump time\n");
288 * Move to a softer level of silencing where no spin_lock_irqs
289 * are held on other cpus
291 dump_silence_level = DUMP_SOFT_SPIN_CPUS;
293 ret = __dump_irq_enable();
298 printk("Dumping to block device (%d,%d) on CPU %d ...\n",
299 MAJOR(dump_bdev->dev_id), MINOR(dump_bdev->dev_id),
306 * Invoked when dumping is done. This is the time to put things back
307 * (i.e. undo the effects of dump_block_silence) so the device is
308 * available for normal use.
311 dump_block_resume(struct dump_dev *dev)
313 __dump_irq_restore();
319 * Seek to the specified offset in the dump device.
320 * Makes sure this is a valid offset, otherwise returns an error.
323 dump_block_seek(struct dump_dev *dev, loff_t off)
325 struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
326 loff_t offset = off + dump_bdev->start_offset;
328 if (offset & ( PAGE_SIZE - 1)) {
329 printk("seek: non-page aligned\n");
333 if (offset & (bdev_hardsect_size(dump_bdev->bdev) - 1)) {
334 printk("seek: not sector aligned \n");
338 if (offset > dump_bdev->limit) {
339 printk("seek: not enough space left on device!\n");
342 dev->curr_offset = off;
347 * Write out a buffer after checking the device limitations,
348 * sector sizes, etc. Assumes the buffer is in directly mapped
349 * kernel address space (not vmalloc'ed).
351 * Returns: number of bytes written or -ERRNO.
354 dump_block_write(struct dump_dev *dev, void *buf,
357 struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
358 loff_t offset = dev->curr_offset + dump_bdev->start_offset;
359 int retval = -ENOSPC;
361 if (offset >= dump_bdev->limit) {
362 printk("write: not enough space left on device!\n");
366 /* don't write more blocks than our max limit */
367 if (offset + len > dump_bdev->limit)
368 len = dump_bdev->limit - offset;
371 retval = dump_block_map(dump_bdev, buf, len);
373 printk("write: dump_block_map failed! err %d\n", retval);
378 * Write out the data to disk.
379 * Assumes the entire buffer mapped to a single bio, which we can
380 * submit and wait for io completion. In the future, may consider
381 * increasing the dump buffer size and submitting multiple bio s
382 * for better throughput.
384 dump_bdev->err = -EAGAIN;
385 submit_bio(WRITE, dump_bdev->bio);
387 dump_bdev->ddev.curr_offset += len;
394 * Name: dump_block_ready()
395 * Func: check if the last dump i/o is over and ready for next request
398 dump_block_ready(struct dump_dev *dev, void *buf)
400 struct dump_blockdev *dump_bdev = DUMP_BDEV(dev);
401 request_queue_t *q = bdev_get_queue(dump_bdev->bio->bi_bdev);
403 /* check for io completion */
404 if (dump_bdev->err == -EAGAIN) {
409 if (dump_bdev->err) {
410 printk("dump i/o err\n");
411 return dump_bdev->err;
418 struct dump_dev_ops dump_blockdev_ops = {
419 .open = dump_block_open,
420 .release = dump_block_release,
421 .silence = dump_block_silence,
422 .resume = dump_block_resume,
423 .seek = dump_block_seek,
424 .write = dump_block_write,
425 /* .read not implemented */
426 .ready = dump_block_ready
429 static struct dump_blockdev default_dump_blockdev = {
430 .ddev = {.type_name = "blockdev", .ops = &dump_blockdev_ops,
433 * leave enough room for the longest swap header possibly written
434 * written by mkswap (likely the largest page size supported by
437 .start_offset = DUMP_HEADER_OFFSET,
439 /* assume the rest of the fields are zeroed by default */
442 struct dump_blockdev *dump_blockdev = &default_dump_blockdev;
445 dump_blockdev_init(void)
447 if (dump_register_device(&dump_blockdev->ddev) < 0) {
448 printk("block device driver registration failed\n");
452 printk("block device driver for LKCD registered\n");
457 dump_blockdev_cleanup(void)
459 dump_unregister_device(&dump_blockdev->ddev);
460 printk("block device driver for LKCD unregistered\n");
463 MODULE_AUTHOR("LKCD Development Team <lkcd-devel@lists.sourceforge.net>");
464 MODULE_DESCRIPTION("Block Dump Driver for Linux Kernel Crash Dump (LKCD)");
465 MODULE_LICENSE("GPL");
467 module_init(dump_blockdev_init);
468 module_exit(dump_blockdev_cleanup);