2 * Two-stage soft-boot based dump scheme methods (memory overlay
3 * with post soft-boot writeout)
5 * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
7 * This approach of saving the dump in memory and writing it
8 * out after a softboot without clearing memory is derived from the
9 * Mission Critical Linux dump implementation. Credits and a big
10 * thanks for letting the lkcd project make use of the excellent
11 * piece of work and also for helping with clarifications and
12 * tips along the way are due to:
13 * Dave Winchell <winchell@mclx.com> (primary author of mcore)
15 * Jeff Moyer <moyer@mclx.com>
16 * Josh Huber <huber@mclx.com>
18 * For those familiar with the mcore implementation, the key
19 * differences/extensions here are in allowing entire memory to be
20 * saved (in compressed form) through a careful ordering scheme
21 * on both the way down as well on the way up after boot, the latter
22 * for supporting the LKCD notion of passes in which most critical
23 * data is the first to be saved to the dump device. Also the post
24 * boot writeout happens from within the kernel rather than driven
27 * The sequence is orchestrated through the abstraction of "dumpers",
28 * one for the first stage which then sets up the dumper for the next
29 * stage, providing for a smooth and flexible reuse of the singlestage
30 * dump scheme methods and a handle to pass dump device configuration
31 * information across the soft boot.
33 * Copyright (C) 2002 International Business Machines Corp.
35 * This code is released under version 2 of the GNU GPL.
39 * Disruptive dumping using the second kernel soft-boot option
40 * for issuing dump i/o operates in 2 stages:
42 * (1) - Saves the (compressed & formatted) dump in memory using a
43 * carefully ordered overlay scheme designed to capture the
44 * entire physical memory or selective portions depending on
45 * dump config settings,
46 * - Registers the stage 2 dumper and
47 * - Issues a soft reboot w/o clearing memory.
49 * The overlay scheme starts with a small bootstrap free area
50 * and follows a reverse ordering of passes wherein it
51 * compresses and saves data starting with the least critical
52 * areas first, thus freeing up the corresponding pages to
53 * serve as destination for subsequent data to be saved, and
54 * so on. With a good compression ratio, this makes it feasible
55 * to capture an entire physical memory dump without significantly
56 * reducing memory available during regular operation.
58 * (2) Post soft-reboot, runs through the saved memory dump and
59 * writes it out to disk, this time around, taking care to
60 * save the more critical data first (i.e. pages which figure
61 * in early passes for a regular dump). Finally issues a
64 * Since the data was saved in memory after selection/filtering
65 * and formatted as per the chosen output dump format, at this
66 * stage the filter and format actions are just dummy (or
67 * passthrough) actions, except for influence on ordering of
71 #include <linux/types.h>
72 #include <linux/kernel.h>
73 #include <linux/highmem.h>
74 #include <linux/bootmem.h>
75 #include <linux/dump.h>
77 #include <linux/delay.h>
78 #include <linux/reboot.h>
79 #include <linux/kexec.h>
81 #include "dump_methods.h"
83 extern struct list_head dumper_list_head;
84 extern struct dump_memdev *dump_memdev;
85 extern struct dumper dumper_stage2;
86 struct dump_config_block *dump_saved_config = NULL;
87 extern struct dump_blockdev *dump_blockdev;
88 static struct dump_memdev *saved_dump_memdev = NULL;
89 static struct dumper *saved_dumper = NULL;
92 extern int panic_timeout;
96 extern void dump_display_map(struct dump_memdev *);
99 struct dumper *dumper_by_name(char *name)
102 struct dumper *dumper;
103 list_for_each_entry(dumper, &dumper_list_head, dumper_list)
104 if (!strncmp(dumper->name, name, 32))
110 /* Temporary proof of concept */
111 if (!strncmp(dumper_stage2.name, name, 32))
112 return &dumper_stage2;
117 #ifdef CONFIG_CRASH_DUMP_SOFTBOOT
118 extern void dump_early_reserve_map(struct dump_memdev *);
120 void crashdump_reserve(void)
122 extern unsigned long crashdump_addr;
124 if (crashdump_addr == 0xdeadbeef)
127 /* reserve dump config and saved dump pages */
128 dump_saved_config = (struct dump_config_block *)crashdump_addr;
129 /* magic verification */
130 if (dump_saved_config->magic != DUMP_MAGIC_LIVE) {
131 printk("Invalid dump magic. Ignoring dump\n");
132 dump_saved_config = NULL;
136 printk("Dump may be available from previous boot\n");
138 reserve_bootmem(virt_to_phys((void *)crashdump_addr),
139 PAGE_ALIGN(sizeof(struct dump_config_block)));
140 dump_early_reserve_map(&dump_saved_config->memdev);
146 * Loads the dump configuration from a memory block saved across soft-boot
147 * The ops vectors need fixing up as the corresp. routines may have
148 * relocated in the new soft-booted kernel.
150 int dump_load_config(struct dump_config_block *config)
152 struct dumper *dumper;
153 struct dump_data_filter *filter_table, *filter;
154 struct dump_dev *dev;
157 if (config->magic != DUMP_MAGIC_LIVE)
158 return -ENOENT; /* not a valid config */
160 /* initialize generic config data */
161 memcpy(&dump_config, &config->config, sizeof(dump_config));
163 /* initialize dumper state */
164 if (!(dumper = dumper_by_name(config->dumper.name))) {
165 printk("dumper name mismatch\n");
166 return -ENOENT; /* dumper mismatch */
169 /* verify and fixup schema */
170 if (strncmp(dumper->scheme->name, config->scheme.name, 32)) {
171 printk("dumper scheme mismatch\n");
172 return -ENOENT; /* mismatch */
174 config->scheme.ops = dumper->scheme->ops;
175 config->dumper.scheme = &config->scheme;
177 /* verify and fixup filter operations */
178 filter_table = dumper->filter;
179 for (i = 0, filter = config->filter_table;
180 ((i < MAX_PASSES) && filter_table[i].selector);
182 if (strncmp(filter_table[i].name, filter->name, 32)) {
183 printk("dump filter mismatch\n");
184 return -ENOENT; /* filter name mismatch */
186 filter->selector = filter_table[i].selector;
188 config->dumper.filter = config->filter_table;
191 if (strncmp(dumper->fmt->name, config->fmt.name, 32)) {
192 printk("dump format mismatch\n");
193 return -ENOENT; /* mismatch */
195 config->fmt.ops = dumper->fmt->ops;
196 config->dumper.fmt = &config->fmt;
198 /* fixup target device */
199 dev = (struct dump_dev *)(&config->dev[0]);
200 if (dumper->dev == NULL) {
201 pr_debug("Vanilla dumper - assume default\n");
202 if (dump_dev == NULL)
204 dumper->dev = dump_dev;
207 if (strncmp(dumper->dev->type_name, dev->type_name, 32)) {
208 printk("dump dev type mismatch %s instead of %s\n",
209 dev->type_name, dumper->dev->type_name);
210 return -ENOENT; /* mismatch */
212 dev->ops = dumper->dev->ops;
213 config->dumper.dev = dev;
215 /* fixup memory device containing saved dump pages */
216 /* assume statically init'ed dump_memdev */
217 config->memdev.ddev.ops = dump_memdev->ddev.ops;
218 /* switch to memdev from prev boot */
219 saved_dump_memdev = dump_memdev; /* remember current */
220 dump_memdev = &config->memdev;
222 /* Make this the current primary dumper */
223 dump_config.dumper = &config->dumper;
228 /* Saves the dump configuration in a memory block for use across a soft-boot */
229 int dump_save_config(struct dump_config_block *config)
231 printk("saving dump config settings\n");
233 /* dump config settings */
234 memcpy(&config->config, &dump_config, sizeof(dump_config));
237 memcpy(&config->dumper, dump_config.dumper, sizeof(struct dumper));
238 memcpy(&config->scheme, dump_config.dumper->scheme,
239 sizeof(struct dump_scheme));
240 memcpy(&config->fmt, dump_config.dumper->fmt, sizeof(struct dump_fmt));
241 memcpy(&config->dev[0], dump_config.dumper->dev,
242 sizeof(struct dump_anydev));
243 memcpy(&config->filter_table, dump_config.dumper->filter,
244 sizeof(struct dump_data_filter)*MAX_PASSES);
246 /* handle to saved mem pages */
247 memcpy(&config->memdev, dump_memdev, sizeof(struct dump_memdev));
249 config->magic = DUMP_MAGIC_LIVE;
254 int dump_init_stage2(struct dump_config_block *saved_config)
258 pr_debug("dump_init_stage2\n");
259 /* Check if dump from previous boot exists */
261 printk("loading dumper from previous boot \n");
262 /* load and configure dumper from previous boot */
263 if ((err = dump_load_config(saved_config)))
267 if ((err = dump_configure(dump_config.dump_device))) {
268 printk("Stage 2 dump configure failed\n");
274 dump_dev = dump_config.dumper->dev;
275 /* write out the dump */
276 err = dump_generic_execute(NULL, NULL);
278 dump_saved_config = NULL;
287 /* no dump to write out */
288 printk("no dumper from previous boot \n");
293 extern void dump_mem_markpages(struct dump_memdev *);
295 int dump_switchover_stage(void)
299 /* trigger stage 2 rightaway - in real life would be after soft-boot */
300 /* dump_saved_config would be a boot param */
301 saved_dump_memdev = dump_memdev;
302 saved_dumper = dump_config.dumper;
303 ret = dump_init_stage2(dump_saved_config);
304 dump_memdev = saved_dump_memdev;
305 dump_config.dumper = saved_dumper;
309 int dump_activate_softboot(void)
313 int num_cpus_online = 0;
314 struct kimage *image;
317 /* temporary - switchover to writeout previously saved dump */
319 err = dump_switchover_stage(); /* non-disruptive case */
321 dump_config.dumper = &dumper_stage1; /* set things back */
326 dump_silence_level = DUMP_HALT_CPUS;
327 /* wait till we become the only cpu */
328 /* maybe by checking for online cpus ? */
330 while((num_cpus_online = num_online_cpus()) > 1);
332 /* now call into kexec */
334 image = xchg(&kexec_image, 0);
336 mdelay(panic_timeout*1000);
337 machine_kexec(image);
342 * * should we call reboot notifiers ? inappropriate for panic ?
343 * * what about device_shutdown() ?
344 * * is explicit bus master disabling needed or can we do that
345 * * through driverfs ?
351 /* --- DUMP SCHEME ROUTINES --- */
353 static inline int dump_buf_pending(struct dumper *dumper)
355 return (dumper->curr_buf - dumper->dump_buf);
358 /* Invoked during stage 1 of soft-reboot based dumping */
359 int dump_overlay_sequencer(void)
361 struct dump_data_filter *filter = dump_config.dumper->filter;
362 struct dump_data_filter *filter2 = dumper_stage2.filter;
363 int pass = 0, err = 0, save = 0;
364 int (*action)(unsigned long, unsigned long);
366 /* Make sure gzip compression is being used */
367 if (dump_config.dumper->compress->compress_type != DUMP_COMPRESS_GZIP) {
368 printk(" Please set GZIP compression \n");
372 /* start filling in dump data right after the header */
373 dump_config.dumper->curr_offset =
374 PAGE_ALIGN(dump_config.dumper->header_len);
376 /* Locate the last pass */
377 for (;filter->selector; filter++, pass++);
380 * Start from the end backwards: overlay involves a reverse
381 * ordering of passes, since less critical pages are more
382 * likely to be reusable as scratch space once we are through
385 for (--pass, --filter; pass >= 0; pass--, filter--)
387 /* Assumes passes are exclusive (even across dumpers) */
388 /* Requires care when coding the selection functions */
389 if ((save = filter->level_mask & dump_config.level))
390 action = dump_save_data;
392 action = dump_skip_data;
394 /* Remember the offset where this pass started */
395 /* The second stage dumper would use this */
396 if (dump_buf_pending(dump_config.dumper) & (PAGE_SIZE - 1)) {
397 pr_debug("Starting pass %d with pending data\n", pass);
398 pr_debug("filling dummy data to page-align it\n");
399 dump_config.dumper->curr_buf = (void *)PAGE_ALIGN(
400 (unsigned long)dump_config.dumper->curr_buf);
403 filter2[pass].start[0] = dump_config.dumper->curr_offset
404 + dump_buf_pending(dump_config.dumper);
406 err = dump_iterator(pass, action, filter);
408 filter2[pass].end[0] = dump_config.dumper->curr_offset
409 + dump_buf_pending(dump_config.dumper);
410 filter2[pass].num_mbanks = 1;
413 printk("dump_overlay_seq: failure %d in pass %d\n",
417 printk("\n %d overlay pages %s of %d each in pass %d\n",
418 err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
424 /* from dump_memdev.c */
425 extern struct page *dump_mem_lookup(struct dump_memdev *dev, unsigned long loc);
426 extern struct page *dump_mem_next_page(struct dump_memdev *dev);
428 static inline struct page *dump_get_saved_page(loff_t loc)
430 return (dump_mem_lookup(dump_memdev, loc >> PAGE_SHIFT));
433 static inline struct page *dump_next_saved_page(void)
435 return (dump_mem_next_page(dump_memdev));
439 * Iterates over list of saved dump pages. Invoked during second stage of
442 * Observation: If additional selection is desired at this stage then
443 * a different iterator could be written which would advance
444 * to the next page header everytime instead of blindly picking up
445 * the data. In such a case loc would be interpreted differently.
446 * At this moment however a blind pass seems sufficient, cleaner and
449 int dump_saved_data_iterator(int pass, int (*action)(unsigned long,
450 unsigned long), struct dump_data_filter *filter)
454 unsigned long count = 0;
458 for (i = 0; i < filter->num_mbanks; i++) {
459 loc = filter->start[i];
460 end = filter->end[i];
461 printk("pass %d, start off 0x%llx end offset 0x%llx\n", pass,
464 /* loc will get treated as logical offset into stage 1 */
465 page = dump_get_saved_page(loc);
467 for (; loc < end; loc += PAGE_SIZE) {
468 dump_config.dumper->curr_loc = loc;
470 printk("no more saved data for pass %d\n",
474 sz = (loc + PAGE_SIZE > end) ? end - loc : PAGE_SIZE;
476 if (page && filter->selector(pass, (unsigned long)page,
478 pr_debug("mem offset 0x%llx\n", loc);
479 if ((err = action((unsigned long)page, sz)))
483 /* clear the contents of page */
484 /* fixme: consider using KM_DUMP instead */
485 clear_highpage(page);
488 page = dump_next_saved_page();
492 return err ? err : count;
495 static inline int dump_overlay_pages_done(struct page *page, int nr)
499 for (; nr ; page++, nr--) {
500 if (dump_check_and_free_page(dump_memdev, page))
506 int dump_overlay_save_data(unsigned long loc, unsigned long len)
509 struct page *page = (struct page *)loc;
510 static unsigned long cnt = 0;
512 if ((err = dump_generic_save_data(loc, len)))
515 if (dump_overlay_pages_done(page, len >> PAGE_SHIFT)) {
518 pr_debug("released page 0x%lx\n", page_to_pfn(page));
525 int dump_overlay_skip_data(unsigned long loc, unsigned long len)
527 struct page *page = (struct page *)loc;
529 dump_overlay_pages_done(page, len >> PAGE_SHIFT);
533 int dump_overlay_resume(void)
538 * switch to stage 2 dumper, save dump_config_block
539 * and then trigger a soft-boot
541 dumper_stage2.header_len = dump_config.dumper->header_len;
542 dump_config.dumper = &dumper_stage2;
543 if ((err = dump_save_config(dump_saved_config)))
546 dump_dev = dump_config.dumper->dev;
549 /* If we are doing a disruptive dump, activate softboot now */
550 if((panic_timeout > 0) && (!(dump_config.flags & DUMP_FLAGS_NONDISRUPT)))
551 err = dump_activate_softboot();
555 err = dump_switchover_stage(); /* plugs into soft boot mechanism */
556 dump_config.dumper = &dumper_stage1; /* set things back */
560 int dump_overlay_configure(unsigned long devid)
562 struct dump_dev *dev;
563 struct dump_config_block *saved_config = dump_saved_config;
566 /* If there is a previously saved dump, write it out first */
568 printk("Processing old dump pending writeout\n");
569 err = dump_switchover_stage();
571 printk("failed to writeout saved dump\n");
574 dump_free_mem(saved_config); /* testing only: not after boot */
577 dev = dumper_stage2.dev = dump_config.dumper->dev;
578 /* From here on the intermediate dump target is memory-only */
579 dump_dev = dump_config.dumper->dev = &dump_memdev->ddev;
580 if ((err = dump_generic_configure(0))) {
581 printk("dump generic configure failed: err %d\n", err);
585 dumper_stage2.dump_buf = dump_config.dumper->dump_buf;
587 /* Sanity check on the actual target dump device */
588 if (!dev || (err = dev->ops->open(dev, devid))) {
591 /* TBD: should we release the target if this is soft-boot only ? */
593 /* alloc a dump config block area to save across reboot */
594 if (!(dump_saved_config = dump_alloc_mem(sizeof(struct
595 dump_config_block)))) {
596 printk("dump config block alloc failed\n");
598 dump_generic_unconfigure();
601 dump_config.dump_addr = (unsigned long)dump_saved_config;
602 printk("Dump config block of size %d set up at 0x%lx\n",
603 sizeof(*dump_saved_config), (unsigned long)dump_saved_config);
607 int dump_overlay_unconfigure(void)
609 struct dump_dev *dev = dumper_stage2.dev;
612 pr_debug("dump_overlay_unconfigure\n");
613 /* Close the secondary device */
614 dev->ops->release(dev);
615 pr_debug("released secondary device\n");
617 err = dump_generic_unconfigure();
618 pr_debug("Unconfigured generic portions\n");
619 dump_free_mem(dump_saved_config);
620 dump_saved_config = NULL;
621 pr_debug("Freed saved config block\n");
622 dump_dev = dump_config.dumper->dev = dumper_stage2.dev;
624 printk("Unconfigured overlay dumper\n");
628 int dump_staged_unconfigure(void)
631 struct dump_config_block *saved_config = dump_saved_config;
632 struct dump_dev *dev;
634 pr_debug("dump_staged_unconfigure\n");
635 err = dump_generic_unconfigure();
637 /* now check if there is a saved dump waiting to be written out */
639 printk("Processing saved dump pending writeout\n");
640 if ((err = dump_switchover_stage())) {
641 printk("Error in commiting saved dump at 0x%lx\n",
642 (unsigned long)saved_config);
643 printk("Old dump may hog memory\n");
645 dump_free_mem(saved_config);
646 pr_debug("Freed saved config block\n");
648 dump_saved_config = NULL;
650 dev = &dump_memdev->ddev;
651 dev->ops->release(dev);
653 printk("Unconfigured second stage dumper\n");
658 /* ----- PASSTHRU FILTER ROUTINE --------- */
660 /* transparent - passes everything through */
661 int dump_passthru_filter(int pass, unsigned long loc, unsigned long sz)
666 /* ----- PASSTRU FORMAT ROUTINES ---- */
669 int dump_passthru_configure_header(const char *panic_str, const struct pt_regs *regs)
671 dump_config.dumper->header_dirty++;
675 /* Copies bytes of data from page(s) to the specified buffer */
676 int dump_copy_pages(void *buf, struct page *page, unsigned long sz)
678 unsigned long len = 0, bytes;
682 addr = kmap_atomic(page, KM_DUMP);
683 bytes = (sz > len + PAGE_SIZE) ? PAGE_SIZE : sz - len;
684 memcpy(buf, addr, bytes);
685 kunmap_atomic(addr, KM_DUMP);
690 /* memset(dump_config.dumper->curr_buf, 0x57, len); temporary */
695 int dump_passthru_update_header(void)
697 long len = dump_config.dumper->header_len;
699 void *buf = dump_config.dumper->dump_buf;
702 if (!dump_config.dumper->header_dirty)
705 pr_debug("Copying header of size %ld bytes from memory\n", len);
706 if (len > DUMP_BUFFER_SIZE)
709 page = dump_mem_lookup(dump_memdev, 0);
710 for (; (len > 0) && page; buf += PAGE_SIZE, len -= PAGE_SIZE) {
711 if ((err = dump_copy_pages(buf, page, PAGE_SIZE)))
713 page = dump_mem_next_page(dump_memdev);
716 printk("Incomplete header saved in mem\n");
720 if ((err = dump_dev_seek(0))) {
721 printk("Unable to seek to dump header offset\n");
724 err = dump_ll_write(dump_config.dumper->dump_buf,
725 buf - dump_config.dumper->dump_buf);
726 if (err < dump_config.dumper->header_len)
727 return (err < 0) ? err : -ENOSPC;
729 dump_config.dumper->header_dirty = 0;
733 static loff_t next_dph_offset = 0;
735 static int dph_valid(struct __dump_page *dph)
737 if ((dph->dp_address & (PAGE_SIZE - 1)) || (dph->dp_flags
738 > DUMP_DH_COMPRESSED) || (!dph->dp_flags) ||
739 (dph->dp_size > PAGE_SIZE)) {
740 printk("dp->address = 0x%llx, dp->size = 0x%x, dp->flag = 0x%x\n",
741 dph->dp_address, dph->dp_size, dph->dp_flags);
747 int dump_verify_lcrash_data(void *buf, unsigned long sz)
749 struct __dump_page *dph;
751 /* sanity check for page headers */
752 while (next_dph_offset + sizeof(*dph) < sz) {
753 dph = (struct __dump_page *)(buf + next_dph_offset);
754 if (!dph_valid(dph)) {
755 printk("Invalid page hdr at offset 0x%llx\n",
759 next_dph_offset += dph->dp_size + sizeof(*dph);
762 next_dph_offset -= sz;
767 * TBD/Later: Consider avoiding the copy by using a scatter/gather
768 * vector representation for the dump buffer
770 int dump_passthru_add_data(unsigned long loc, unsigned long sz)
772 struct page *page = (struct page *)loc;
773 void *buf = dump_config.dumper->curr_buf;
776 if ((err = dump_copy_pages(buf, page, sz))) {
777 printk("dump_copy_pages failed");
781 if ((err = dump_verify_lcrash_data(buf, sz))) {
782 printk("dump_verify_lcrash_data failed\n");
783 printk("Invalid data for pfn 0x%lx\n", page_to_pfn(page));
784 printk("Page flags 0x%lx\n", page->flags);
785 printk("Page count 0x%x\n", atomic_read(&page->count));
789 dump_config.dumper->curr_buf = buf + sz;
795 /* Stage 1 dumper: Saves compressed dump in memory and soft-boots system */
797 /* Scheme to overlay saved data in memory for writeout after a soft-boot */
798 struct dump_scheme_ops dump_scheme_overlay_ops = {
799 .configure = dump_overlay_configure,
800 .unconfigure = dump_overlay_unconfigure,
801 .sequencer = dump_overlay_sequencer,
802 .iterator = dump_page_iterator,
803 .save_data = dump_overlay_save_data,
804 .skip_data = dump_overlay_skip_data,
805 .write_buffer = dump_generic_write_buffer
808 struct dump_scheme dump_scheme_overlay = {
810 .ops = &dump_scheme_overlay_ops
814 /* Stage 1 must use a good compression scheme - default to gzip */
815 extern struct __dump_compress dump_gzip_compression;
817 struct dumper dumper_stage1 = {
819 .scheme = &dump_scheme_overlay,
820 .fmt = &dump_fmt_lcrash,
821 .compress = &dump_none_compression, /* needs to be gzip */
822 .filter = dump_filter_table,
826 /* Stage 2 dumper: Activated after softboot to write out saved dump to device */
828 /* Formatter that transfers data as is (transparent) w/o further conversion */
829 struct dump_fmt_ops dump_fmt_passthru_ops = {
830 .configure_header = dump_passthru_configure_header,
831 .update_header = dump_passthru_update_header,
832 .save_context = NULL, /* unused */
833 .add_data = dump_passthru_add_data,
834 .update_end_marker = dump_lcrash_update_end_marker
837 struct dump_fmt dump_fmt_passthru = {
839 .ops = &dump_fmt_passthru_ops
842 /* Filter that simply passes along any data within the range (transparent)*/
843 /* Note: The start and end ranges in the table are filled in at run-time */
845 extern int dump_filter_none(int pass, unsigned long loc, unsigned long sz);
847 struct dump_data_filter dump_passthru_filtertable[MAX_PASSES] = {
848 {.name = "passkern", .selector = dump_passthru_filter,
849 .level_mask = DUMP_MASK_KERN },
850 {.name = "passuser", .selector = dump_passthru_filter,
851 .level_mask = DUMP_MASK_USED },
852 {.name = "passunused", .selector = dump_passthru_filter,
853 .level_mask = DUMP_MASK_UNUSED },
854 {.name = "none", .selector = dump_filter_none,
855 .level_mask = DUMP_MASK_REST }
859 /* Scheme to handle data staged / preserved across a soft-boot */
860 struct dump_scheme_ops dump_scheme_staged_ops = {
861 .configure = dump_generic_configure,
862 .unconfigure = dump_staged_unconfigure,
863 .sequencer = dump_generic_sequencer,
864 .iterator = dump_saved_data_iterator,
865 .save_data = dump_generic_save_data,
866 .skip_data = dump_generic_skip_data,
867 .write_buffer = dump_generic_write_buffer
870 struct dump_scheme dump_scheme_staged = {
872 .ops = &dump_scheme_staged_ops
875 /* The stage 2 dumper comprising all these */
876 struct dumper dumper_stage2 = {
878 .scheme = &dump_scheme_staged,
879 .fmt = &dump_fmt_passthru,
880 .compress = &dump_none_compression,
881 .filter = dump_passthru_filtertable,