2 * Implements the routines which handle the format specific
3 * aspects of dump for the default dump format.
5 * Used in single stage dumping and stage 1 of soft-boot based dumping
6 * Saves data in LKCD (lcrash) format
8 * Previously a part of dump_base.c
10 * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
11 * Split off and reshuffled LKCD dump format code around generic
12 * dump method interfaces.
14 * Derived from original code created by
15 * Matt Robinson <yakker@sourceforge.net>)
17 * Contributions from SGI, IBM, HP, MCL, and others.
19 * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
20 * Copyright (C) 2000 - 2002 TurboLinux, Inc. All rights reserved.
21 * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
22 * Copyright (C) 2002 International Business Machines Corp.
24 * This code is released under version 2 of the GNU GPL.
27 #include <linux/types.h>
28 #include <linux/kernel.h>
29 #include <linux/time.h>
30 #include <linux/sched.h>
31 #include <linux/ptrace.h>
32 #include <linux/utsname.h>
34 #include <linux/dump.h>
35 #include "dump_methods.h"
40 * System dumps are currently the combination of a dump header and a set
41 * of data pages which contain the system memory. The layout of the dump
42 * (for full dumps) is as follows:
44 * +-----------------------------+
45 * | generic dump header |
46 * +-----------------------------+
47 * | architecture dump header |
48 * +-----------------------------+
50 * +-----------------------------+
52 * +-----------------------------+
54 * +-----------------------------+
56 * +-----------------------------+
62 * +-----------------------------+
64 * +-----------------------------+
66 * There are two dump headers, the first which is architecture
67 * independent, and the other which is architecture dependent. This
68 * allows different architectures to dump different data structures
69 * which are specific to their chipset, CPU, etc.
71 * After the dump headers come a succession of dump page headers along
72 * with dump pages. The page header contains information about the page
73 * size, any flags associated with the page (whether it's compressed or
74 * not), and the address of the page. After the page header is the page
75 * data, which is either compressed (or not). Each page of data is
76 * dumped in succession, until the final dump header (PAGE_END) is
77 * placed at the end of the dump, assuming the dump device isn't out
80 * This mechanism allows for multiple compression types, different
81 * types of data structures, different page ordering, etc., etc., etc.
82 * It's a very straightforward mechanism for dumping system memory.
85 struct __dump_header dump_header; /* the primary dump header */
86 struct __dump_header_asm dump_header_asm; /* the arch-specific dump header */
89 * Set up common header fields (mainly the arch indep section)
90 * Per-cpu state is handled by lcrash_save_context
91 * Returns the size of the header in bytes.
93 static int lcrash_init_dump_header(const char *panic_str)
95 struct timeval dh_time;
96 unsigned long temp_dha_stack[DUMP_MAX_NUM_CPUS];
97 u64 temp_memsz = dump_header.dh_memory_size;
99 /* make sure the dump header isn't TOO big */
100 if ((sizeof(struct __dump_header) +
101 sizeof(struct __dump_header_asm)) > DUMP_BUFFER_SIZE) {
102 printk("lcrash_init_header(): combined "
103 "headers larger than DUMP_BUFFER_SIZE!\n");
107 /* initialize the dump headers to zero */
108 /* save dha_stack pointer because it may contains pointer for stack! */
109 memcpy(&(temp_dha_stack[0]), &(dump_header_asm.dha_stack[0]),
110 DUMP_MAX_NUM_CPUS * sizeof(unsigned long));
111 memset(&dump_header, 0, sizeof(dump_header));
112 memset(&dump_header_asm, 0, sizeof(dump_header_asm));
113 dump_header.dh_memory_size = temp_memsz;
114 memcpy(&(dump_header_asm.dha_stack[0]), &(temp_dha_stack[0]),
115 DUMP_MAX_NUM_CPUS * sizeof(unsigned long));
117 /* configure dump header values */
118 dump_header.dh_magic_number = DUMP_MAGIC_NUMBER;
119 dump_header.dh_version = DUMP_VERSION_NUMBER;
120 dump_header.dh_memory_start = PAGE_OFFSET;
121 dump_header.dh_memory_end = DUMP_MAGIC_NUMBER;
122 dump_header.dh_header_size = sizeof(struct __dump_header);
123 dump_header.dh_page_size = PAGE_SIZE;
124 dump_header.dh_dump_level = dump_config.level;
125 dump_header.dh_current_task = (unsigned long) current;
126 dump_header.dh_dump_compress = dump_config.dumper->compress->
128 dump_header.dh_dump_flags = dump_config.flags;
129 dump_header.dh_dump_device = dump_config.dumper->dev->device_id;
132 dump_header.dh_num_bytes = 0;
134 dump_header.dh_num_dump_pages = 0;
135 do_gettimeofday(&dh_time);
136 dump_header.dh_time.tv_sec = dh_time.tv_sec;
137 dump_header.dh_time.tv_usec = dh_time.tv_usec;
139 memcpy((void *)&(dump_header.dh_utsname_sysname),
140 (const void *)&(system_utsname.sysname), __NEW_UTS_LEN + 1);
141 memcpy((void *)&(dump_header.dh_utsname_nodename),
142 (const void *)&(system_utsname.nodename), __NEW_UTS_LEN + 1);
143 memcpy((void *)&(dump_header.dh_utsname_release),
144 (const void *)&(system_utsname.release), __NEW_UTS_LEN + 1);
145 memcpy((void *)&(dump_header.dh_utsname_version),
146 (const void *)&(system_utsname.version), __NEW_UTS_LEN + 1);
147 memcpy((void *)&(dump_header.dh_utsname_machine),
148 (const void *)&(system_utsname.machine), __NEW_UTS_LEN + 1);
149 memcpy((void *)&(dump_header.dh_utsname_domainname),
150 (const void *)&(system_utsname.domainname), __NEW_UTS_LEN + 1);
153 memcpy((void *)&(dump_header.dh_panic_string),
154 (const void *)panic_str, DUMP_PANIC_LEN);
157 dump_header_asm.dha_magic_number = DUMP_ASM_MAGIC_NUMBER;
158 dump_header_asm.dha_version = DUMP_ASM_VERSION_NUMBER;
159 dump_header_asm.dha_header_size = sizeof(dump_header_asm);
161 dump_header_asm.dha_physaddr_start = PHYS_OFFSET;
164 dump_header_asm.dha_smp_num_cpus = num_online_cpus();
165 pr_debug("smp_num_cpus in header %d\n",
166 dump_header_asm.dha_smp_num_cpus);
168 dump_header_asm.dha_dumping_cpu = smp_processor_id();
170 return sizeof(dump_header) + sizeof(dump_header_asm);
174 int dump_lcrash_configure_header(const char *panic_str,
175 const struct pt_regs *regs)
179 dump_config.dumper->header_len = lcrash_init_dump_header(panic_str);
181 /* capture register states for all processors */
182 dump_save_this_cpu(regs);
183 __dump_save_other_cpus(); /* side effect:silence cpus */
185 /* configure architecture-specific dump header values */
186 if ((retval = __dump_configure_header(regs)))
189 dump_config.dumper->header_dirty++;
193 /* save register and task context */
194 void dump_lcrash_save_context(int cpu, const struct pt_regs *regs,
195 struct task_struct *tsk)
197 dump_header_asm.dha_smp_current_task[cpu] = (unsigned long)tsk;
199 __dump_save_regs(&dump_header_asm.dha_smp_regs[cpu], regs);
201 /* take a snapshot of the stack */
202 /* doing this enables us to tolerate slight drifts on this cpu */
203 if (dump_header_asm.dha_stack[cpu]) {
204 memcpy((void *)dump_header_asm.dha_stack[cpu],
205 tsk->thread_info, THREAD_SIZE);
207 dump_header_asm.dha_stack_ptr[cpu] = (unsigned long)(tsk->thread_info);
210 /* write out the header */
211 int dump_write_header(void)
213 int retval = 0, size;
214 void *buf = dump_config.dumper->dump_buf;
216 /* accounts for DUMP_HEADER_OFFSET if applicable */
217 if ((retval = dump_dev_seek(0))) {
218 printk("Unable to seek to dump header offset: %d\n",
223 memcpy(buf, (void *)&dump_header, sizeof(dump_header));
224 size = sizeof(dump_header);
225 memcpy(buf + size, (void *)&dump_header_asm, sizeof(dump_header_asm));
226 size += sizeof(dump_header_asm);
227 size = PAGE_ALIGN(size);
228 retval = dump_ll_write(buf , size);
231 return (retval >= 0) ? ENOSPC : retval;
235 int dump_generic_update_header(void)
239 if (dump_config.dumper->header_dirty) {
240 if ((err = dump_write_header())) {
241 printk("dump write header failed !err %d\n", err);
243 dump_config.dumper->header_dirty = 0;
250 static inline int is_curr_stack_page(struct page *page, unsigned long size)
252 unsigned long thread_addr = (unsigned long)current_thread_info();
253 unsigned long addr = (unsigned long)page_address(page);
255 return !PageHighMem(page) && (addr < thread_addr + THREAD_SIZE)
256 && (addr + size > thread_addr);
259 static inline int is_dump_page(struct page *page, unsigned long size)
261 unsigned long addr = (unsigned long)page_address(page);
262 unsigned long dump_buf = (unsigned long)dump_config.dumper->dump_buf;
264 return !PageHighMem(page) && (addr < dump_buf + DUMP_BUFFER_SIZE)
265 && (addr + size > dump_buf);
268 int dump_allow_compress(struct page *page, unsigned long size)
271 * Don't compress the page if any part of it overlaps
272 * with the current stack or dump buffer (since the contents
273 * in these could be changing while compression is going on)
275 return !is_curr_stack_page(page, size) && !is_dump_page(page, size);
278 void lcrash_init_pageheader(struct __dump_page *dp, struct page *page,
281 memset(dp, sizeof(struct __dump_page), 0);
285 dp->dp_address = (loff_t)page_to_pfn(page) << PAGE_SHIFT;
288 dp->dp_page_index = dump_header.dh_num_dump_pages;
289 dp->dp_byte_offset = dump_header.dh_num_bytes + DUMP_BUFFER_SIZE
290 + DUMP_HEADER_OFFSET; /* ?? */
291 #endif /* DUMP_DEBUG */
294 int dump_lcrash_add_data(unsigned long loc, unsigned long len)
296 struct page *page = (struct page *)loc;
297 void *addr, *buf = dump_config.dumper->curr_buf;
298 struct __dump_page *dp = (struct __dump_page *)buf;
301 if (buf > dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE)
304 lcrash_init_pageheader(dp, page, len);
305 buf += sizeof(struct __dump_page);
308 addr = kmap_atomic(page, KM_CRASHDUMP);
309 size = bytes = (len > PAGE_SIZE) ? PAGE_SIZE : len;
310 /* check for compression */
311 if (dump_allow_compress(page, bytes)) {
312 size = dump_compress_data((char *)addr, bytes, (char *)buf);
314 /* set the compressed flag if the page did compress */
315 if (size && (size < bytes)) {
316 dp->dp_flags |= DUMP_DH_COMPRESSED;
318 /* compression failed -- default to raw mode */
319 dp->dp_flags |= DUMP_DH_RAW;
320 memcpy(buf, addr, bytes);
323 /* memset(buf, 'A', size); temporary: testing only !! */
324 kunmap_atomic(addr, KM_CRASHDUMP);
331 /* now update the header */
333 dump_header.dh_num_bytes += dp->dp_size + sizeof(*dp);
335 dump_header.dh_num_dump_pages++;
336 dump_config.dumper->header_dirty++;
338 dump_config.dumper->curr_buf = buf;
343 int dump_lcrash_update_end_marker(void)
345 struct __dump_page *dp =
346 (struct __dump_page *)dump_config.dumper->curr_buf;
350 lcrash_init_pageheader(dp, NULL, 0);
351 dp->dp_flags |= DUMP_DH_END; /* tbd: truncation test ? */
353 /* now update the header */
355 dump_header.dh_num_bytes += sizeof(*dp);
357 dump_config.dumper->curr_buf += sizeof(*dp);
358 left = dump_config.dumper->curr_buf - dump_config.dumper->dump_buf;
363 if ((ret = dump_dev_seek(dump_config.dumper->curr_offset))) {
364 printk("Seek failed at offset 0x%llx\n",
365 dump_config.dumper->curr_offset);
369 if (DUMP_BUFFER_SIZE > left)
370 memset(dump_config.dumper->curr_buf, 'm',
371 DUMP_BUFFER_SIZE - left);
373 if ((ret = dump_ll_write(dump_config.dumper->dump_buf,
374 DUMP_BUFFER_SIZE)) < DUMP_BUFFER_SIZE) {
375 return (ret < 0) ? ret : -ENOSPC;
378 dump_config.dumper->curr_offset += DUMP_BUFFER_SIZE;
380 if (left > DUMP_BUFFER_SIZE) {
381 left -= DUMP_BUFFER_SIZE;
382 memcpy(dump_config.dumper->dump_buf,
383 dump_config.dumper->dump_buf + DUMP_BUFFER_SIZE, left);
384 dump_config.dumper->curr_buf -= DUMP_BUFFER_SIZE;
393 /* Default Formatter (lcrash) */
394 struct dump_fmt_ops dump_fmt_lcrash_ops = {
395 .configure_header = dump_lcrash_configure_header,
396 .update_header = dump_generic_update_header,
397 .save_context = dump_lcrash_save_context,
398 .add_data = dump_lcrash_add_data,
399 .update_end_marker = dump_lcrash_update_end_marker
402 struct dump_fmt dump_fmt_lcrash = {
404 .ops = &dump_fmt_lcrash_ops