2 * Default single stage dump scheme methods
4 * Previously a part of dump_base.c
6 * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
7 * Split and rewrote LKCD dump scheme to generic dump method
9 * Derived from original code created by
10 * Matt Robinson <yakker@sourceforge.net>)
12 * Contributions from SGI, IBM, HP, MCL, and others.
14 * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
15 * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
16 * Copyright (C) 2002 International Business Machines Corp.
18 * This code is released under version 2 of the GNU GPL.
22 * Implements the default dump scheme, i.e. single-stage gathering and
23 * saving of dump data directly to the target device, which operates in
24 * a push mode, where the dumping system decides what data it saves
25 * taking into account pre-specified dump config options.
27 * Aside: The 2-stage dump scheme, where there is a soft-reset between
28 * the gathering and saving phases, also reuses some of these
29 * default routines (see dump_overlay.c)
31 #include <linux/types.h>
32 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/delay.h>
36 #include <linux/reboot.h>
37 #include <linux/nmi.h>
38 #include <linux/dump.h>
39 #include "dump_methods.h"
41 extern int panic_timeout; /* time before reboot */
43 extern void dump_speedo(int);
45 /* Default sequencer used during single stage dumping */
46 /* Also invoked during stage 2 of soft-boot based dumping */
47 int dump_generic_sequencer(void)
49 struct dump_data_filter *filter = dump_config.dumper->filter;
50 int pass = 0, err = 0, save = 0;
51 int (*action)(unsigned long, unsigned long);
54 * We want to save the more critical data areas first in
55 * case we run out of space, encounter i/o failures, or get
56 * interrupted otherwise and have to give up midway
57 * So, run through the passes in increasing order
59 for (;filter->selector; filter++, pass++)
61 /* Assumes passes are exclusive (even across dumpers) */
62 /* Requires care when coding the selection functions */
63 if ((save = filter->level_mask & dump_config.level))
64 action = dump_save_data;
66 action = dump_skip_data;
68 if ((err = dump_iterator(pass, action, filter)) < 0)
71 printk("\n %d dump pages %s of %d each in pass %d\n",
72 err, save ? "saved" : "skipped", DUMP_PAGE_SIZE, pass);
76 return (err < 0) ? err : 0;
79 static inline struct page *dump_get_page(loff_t loc)
82 unsigned long page_index = loc >> PAGE_SHIFT;
84 /* todo: complete this to account for ia64/discontig mem */
85 /* todo: and to check for validity, ram page, no i/o mem etc */
86 /* need to use pfn/physaddr equiv of kern_addr_valid */
89 * On ARM/XScale system, the physical address starts from
90 * PHYS_OFFSET, and it maybe the situation that PHYS_OFFSET != 0.
91 * For example on Intel's PXA250, PHYS_OFFSET = 0xa0000000. And the
92 * page index starts from PHYS_PFN_OFFSET. When configuring
93 * filter, filter->start is assigned to 0 in dump_generic_configure.
94 * Here we want to adjust it by adding PHYS_PFN_OFFSET to it!
97 page_index += PHYS_PFN_OFFSET;
99 if (__dump_page_valid(page_index))
100 return pfn_to_page(page_index);
106 /* Default iterator: for singlestage and stage 1 of soft-boot dumping */
107 /* Iterates over range of physical memory pages in DUMP_PAGE_SIZE increments */
108 int dump_page_iterator(int pass, int (*action)(unsigned long, unsigned long),
109 struct dump_data_filter *filter)
111 /* Todo : fix unit, type */
112 loff_t loc, start, end;
113 int i, count = 0, err = 0;
116 /* Todo: Add membanks code */
117 /* TBD: Check if we need to address DUMP_PAGE_SIZE < PAGE_SIZE */
119 for (i = 0; i < filter->num_mbanks; i++) {
120 start = filter->start[i];
121 end = filter->end[i];
122 for (loc = start; loc < end; loc += DUMP_PAGE_SIZE) {
123 dump_config.dumper->curr_loc = loc;
124 page = dump_get_page(loc);
125 if (page && filter->selector(pass,
126 (unsigned long) page, DUMP_PAGE_SIZE)) {
127 if ((err = action((unsigned long)page,
129 printk("dump_page_iterator: err %d for "
130 "loc 0x%llx, in pass %d\n",
132 return err ? err : count;
139 return err ? err : count;
143 * Base function that saves the selected block of data in the dump
144 * Action taken when iterator decides that data needs to be saved
146 int dump_generic_save_data(unsigned long loc, unsigned long sz)
149 void *dump_buf = dump_config.dumper->dump_buf;
150 int left, bytes, ret;
152 if ((ret = dump_add_data(loc, sz))) {
155 buf = dump_config.dumper->curr_buf;
157 /* If we've filled up the buffer write it out */
158 if ((left = buf - dump_buf) >= DUMP_BUFFER_SIZE) {
159 bytes = dump_write_buffer(dump_buf, DUMP_BUFFER_SIZE);
160 if (bytes < DUMP_BUFFER_SIZE) {
161 printk("dump_write_buffer failed %d\n", bytes);
162 return bytes ? -ENOSPC : bytes;
167 /* -- A few chores to do from time to time -- */
168 dump_config.dumper->count++;
170 if (!(dump_config.dumper->count & 0x3f)) {
171 /* Update the header every one in a while */
172 memset((void *)dump_buf, 'b', DUMP_BUFFER_SIZE);
173 if ((ret = dump_update_header()) < 0) {
179 touch_nmi_watchdog();
180 } else if (!(dump_config.dumper->count & 0x7)) {
181 /* Show progress so the user knows we aren't hung */
182 dump_speedo(dump_config.dumper->count >> 3);
184 /* Todo: Touch/Refresh watchdog */
186 /* --- Done with periodic chores -- */
189 * extra bit of copying to simplify verification
190 * in the second kernel boot based scheme
192 memcpy(dump_buf - DUMP_PAGE_SIZE, dump_buf +
193 DUMP_BUFFER_SIZE - DUMP_PAGE_SIZE, DUMP_PAGE_SIZE);
195 /* now adjust the leftover bits back to the top of the page */
196 /* this case would not arise during stage 2 (passthru) */
197 memset(dump_buf, 'z', DUMP_BUFFER_SIZE);
199 memcpy(dump_buf, dump_buf + DUMP_BUFFER_SIZE, left);
201 buf -= DUMP_BUFFER_SIZE;
202 dump_config.dumper->curr_buf = buf;
208 int dump_generic_skip_data(unsigned long loc, unsigned long sz)
210 /* dummy by default */
215 * Common low level routine to write a buffer to current dump device
216 * Expects checks for space etc to have been taken care of by the caller
217 * Operates serially at the moment for simplicity.
218 * TBD/Todo: Consider batching for improved throughput
220 int dump_ll_write(void *buf, unsigned long len)
222 long transferred = 0, last_transfer = 0;
225 /* make sure device is ready */
226 while ((ret = dump_dev_ready(NULL)) == -EAGAIN);
228 printk("dump_dev_ready failed !err %d\n", ret);
233 if ((last_transfer = dump_dev_write(buf, len)) <= 0) {
235 printk("dump_dev_write failed !err %d\n",
239 /* wait till complete */
240 while ((ret = dump_dev_ready(buf)) == -EAGAIN)
244 printk("i/o failed !err %d\n", ret);
248 len -= last_transfer;
249 buf += last_transfer;
250 transferred += last_transfer;
252 return (ret < 0) ? ret : transferred;
255 /* default writeout routine for single dump device */
256 /* writes out the dump data ensuring enough space is left for the end marker */
257 int dump_generic_write_buffer(void *buf, unsigned long len)
262 /* check for space */
263 if ((err = dump_dev_seek(dump_config.dumper->curr_offset + len +
264 2*DUMP_BUFFER_SIZE)) < 0) {
265 printk("dump_write_buffer: insuff space after offset 0x%llx\n",
266 dump_config.dumper->curr_offset);
269 /* alignment check would happen as a side effect of this */
270 if ((err = dump_dev_seek(dump_config.dumper->curr_offset)) < 0)
273 written = dump_ll_write(buf, len);
278 written = written ? -ENOSPC : written;
280 dump_config.dumper->curr_offset += len;
285 int dump_generic_configure(unsigned long devid)
287 struct dump_dev *dev = dump_config.dumper->dev;
288 struct dump_data_filter *filter;
292 /* Allocate the dump buffer and initialize dumper state */
293 /* Assume that we get aligned addresses */
294 if (!(buf = dump_alloc_mem(DUMP_BUFFER_SIZE + 3 * DUMP_PAGE_SIZE)))
297 if ((unsigned long)buf & (PAGE_SIZE - 1)) {
298 /* sanity check for page aligned address */
300 return -ENOMEM; /* fixme: better error code */
303 /* Initialize the rest of the fields */
304 dump_config.dumper->dump_buf = buf + DUMP_PAGE_SIZE;
307 /* Open the dump device */
311 if ((ret = dev->ops->open(dev, devid))) {
315 /* Initialise the memory ranges in the dump filter */
316 for (filter = dump_config.dumper->filter ;filter->selector; filter++) {
317 if (!filter->start[0] && !filter->end[0]) {
320 for_each_pgdat(pgdat) {
322 (loff_t)pgdat->node_start_pfn << PAGE_SHIFT;
324 (loff_t)(pgdat->node_start_pfn + pgdat->node_spanned_pages) << PAGE_SHIFT;
327 filter->num_mbanks = i;
334 int dump_generic_unconfigure(void)
336 struct dump_dev *dev = dump_config.dumper->dev;
337 void *buf = dump_config.dumper->dump_buf;
340 pr_debug("Generic unconfigure\n");
341 /* Close the dump device */
342 if (dev && (ret = dev->ops->release(dev)))
345 printk("Closed dump device\n");
348 dump_free_mem((buf - DUMP_PAGE_SIZE));
350 dump_config.dumper->curr_buf = dump_config.dumper->dump_buf = NULL;
351 pr_debug("Released dump buffer\n");
357 /* Set up the default dump scheme */
359 struct dump_scheme_ops dump_scheme_singlestage_ops = {
360 .configure = dump_generic_configure,
361 .unconfigure = dump_generic_unconfigure,
362 .sequencer = dump_generic_sequencer,
363 .iterator = dump_page_iterator,
364 .save_data = dump_generic_save_data,
365 .skip_data = dump_generic_skip_data,
366 .write_buffer = dump_generic_write_buffer,
369 struct dump_scheme dump_scheme_singlestage = {
370 .name = "single-stage",
371 .ops = &dump_scheme_singlestage_ops
374 /* The single stage dumper comprising all these */
375 struct dumper dumper_singlestage = {
376 .name = "single-stage",
377 .scheme = &dump_scheme_singlestage,
378 .fmt = &dump_fmt_lcrash,
379 .compress = &dump_none_compression,
380 .filter = dump_filter_table,