2 * Default filters to select data to dump for various passes.
4 * Started: Oct 2002 - Suparna Bhattacharya <suparna@in.ibm.com>
5 * Split and rewrote default dump selection logic to generic dump
7 * Derived from a portion of dump_base.c created by
8 * Matt Robinson <yakker@sourceforge.net>)
10 * Copyright (C) 1999 - 2002 Silicon Graphics, Inc. All rights reserved.
11 * Copyright (C) 2001 - 2002 Matt D. Robinson. All rights reserved.
12 * Copyright (C) 2002 International Business Machines Corp.
14 * Used during single-stage dumping and during stage 1 of the 2-stage scheme
15 * (Stage 2 of the 2-stage scheme uses the fully transparent filters
16 * i.e. passthru filters in dump_overlay.c)
18 * Future: Custom selective dump may involve a different set of filters.
20 * This code is released under version 2 of the GNU GPL.
23 #include <linux/kernel.h>
24 #include <linux/bootmem.h>
26 #include <linux/slab.h>
27 #include <linux/dump.h>
28 #include "dump_methods.h"
30 #define DUMP_PFN_SAFETY_MARGIN 1024 /* 4 MB */
31 static unsigned long bootmap_pages;
33 /* Copied from mm/bootmem.c - FIXME */
34 /* return the number of _pages_ that will be allocated for the boot bitmap */
35 void dump_calc_bootmap_pages (void)
37 unsigned long mapsize;
38 unsigned long pages = num_physpages;
40 mapsize = (pages+7)/8;
41 mapsize = (mapsize + ~PAGE_MASK) & PAGE_MASK;
42 mapsize >>= PAGE_SHIFT;
43 bootmap_pages = mapsize + DUMP_PFN_SAFETY_MARGIN + 1;
48 extern unsigned long min_low_pfn;
51 int dump_low_page(struct page *p)
53 return ((page_to_pfn(p) >= min_low_pfn) &&
54 (page_to_pfn(p) < (min_low_pfn + bootmap_pages)));
57 static inline int kernel_page(struct page *p)
59 /* FIXME: Need to exclude hugetlb pages. Clue: reserved but inuse */
60 return (PageReserved(p) && !PageInuse(p)) || (!PageLRU(p) && PageInuse(p));
63 static inline int user_page(struct page *p)
65 return PageInuse(p) && (!PageReserved(p) && PageLRU(p));
68 static inline int unreferenced_page(struct page *p)
70 return !PageInuse(p) && !PageReserved(p);
74 /* loc marks the beginning of a range of pages */
75 int dump_filter_kernpages(int pass, unsigned long loc, unsigned long sz)
77 struct page *page = (struct page *)loc;
78 /* if any of the pages is a kernel page, select this set */
80 if (dump_low_page(page) || kernel_page(page))
89 /* loc marks the beginning of a range of pages */
90 int dump_filter_userpages(int pass, unsigned long loc, unsigned long sz)
92 struct page *page = (struct page *)loc;
94 /* select if the set has any user page, and no kernel pages */
96 if (user_page(page) && !dump_low_page(page)) {
98 } else if (kernel_page(page) || dump_low_page(page)) {
109 /* loc marks the beginning of a range of pages */
110 int dump_filter_unusedpages(int pass, unsigned long loc, unsigned long sz)
112 struct page *page = (struct page *)loc;
114 /* select if the set does not have any used pages */
116 if (!unreferenced_page(page) || dump_low_page(page)) {
125 /* dummy: last (non-existent) pass */
126 int dump_filter_none(int pass, unsigned long loc, unsigned long sz)
131 /* TBD: resolve level bitmask ? */
132 struct dump_data_filter dump_filter_table[] = {
133 { .name = "kern", .selector = dump_filter_kernpages,
134 .level_mask = DUMP_MASK_KERN},
135 { .name = "user", .selector = dump_filter_userpages,
136 .level_mask = DUMP_MASK_USED},
137 { .name = "unused", .selector = dump_filter_unusedpages,
138 .level_mask = DUMP_MASK_UNUSED},
139 { .name = "none", .selector = dump_filter_none,
140 .level_mask = DUMP_MASK_REST},
141 { .name = "", .selector = NULL, .level_mask = 0}