1 #ifndef _LINUX_MMZONE_H
2 #define _LINUX_MMZONE_H
7 #include <linux/config.h>
8 #include <linux/spinlock.h>
9 #include <linux/list.h>
10 #include <linux/wait.h>
11 #include <linux/cache.h>
12 #include <linux/threads.h>
13 #include <linux/numa.h>
14 #include <asm/atomic.h>
16 /* Free memory management - zoned buddy allocator. */
17 #ifndef CONFIG_FORCE_MAX_ZONEORDER
20 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
24 struct list_head free_list;
31 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
32 * So add a wild amount of padding here to ensure that they fall into separate
33 * cachelines. There are very few zone structures in the machine, so space
34 * consumption is not a concern here.
36 #if defined(CONFIG_SMP)
39 } ____cacheline_maxaligned_in_smp;
40 #define ZONE_PADDING(name) struct zone_padding name;
42 #define ZONE_PADDING(name)
45 struct per_cpu_pages {
46 int count; /* number of pages in the list */
47 int low; /* low watermark, refill needed */
48 int high; /* high watermark, emptying needed */
49 int batch; /* chunk size for buddy add/remove */
50 struct list_head list; /* the list of pages */
53 struct per_cpu_pageset {
54 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
55 } ____cacheline_aligned_in_smp;
59 #define ZONE_HIGHMEM 2
61 #define MAX_NR_ZONES 3 /* Sync this with ZONES_SHIFT */
62 #define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
64 #define GFP_ZONEMASK 0x03
67 * On machines where it is needed (eg PCs) we divide physical memory
68 * into multiple physical zones. On a PC we have 3 zones:
70 * ZONE_DMA < 16 MB ISA DMA capable memory
71 * ZONE_NORMAL 16-896 MB direct mapped by the kernel
72 * ZONE_HIGHMEM > 896 MB only page cache and user processes
77 * Commonly accessed fields:
80 unsigned long free_pages;
81 unsigned long pages_min, pages_low, pages_high;
83 * protection[] is a pre-calculated number of extra pages that must be
84 * available in a zone in order for __alloc_pages() to allocate memory
85 * from the zone. i.e., for a GFP_KERNEL alloc of "order" there must
86 * be "(1<<order) + protection[ZONE_NORMAL]" free pages in the zone
87 * for us to choose to allocate the page from that zone.
89 * It uses both min_free_kbytes and sysctl_lower_zone_protection.
90 * The protection values are recalculated if either of these values
91 * change. The array elements are in zonelist order:
92 * [0] == GFP_DMA, [1] == GFP_KERNEL, [2] == GFP_HIGHMEM.
94 unsigned long protection[MAX_NR_ZONES];
99 struct list_head active_list;
100 struct list_head inactive_list;
101 atomic_t nr_scan_active;
102 atomic_t nr_scan_inactive;
103 unsigned long nr_active;
104 unsigned long nr_inactive;
105 int all_unreclaimable; /* All pages pinned */
106 unsigned long pages_scanned; /* since last reclaim */
111 * prev_priority holds the scanning priority for this zone. It is
112 * defined as the scanning priority at which we achieved our reclaim
113 * target at the previous try_to_free_pages() or balance_pgdat()
116 * We use prev_priority as a measure of how much stress page reclaim is
117 * under - it drives the swappiness decision: whether to unmap mapped
120 * temp_priority is used to remember the scanning priority at which
121 * this zone was successfully refilled to free_pages == pages_high.
123 * Access to both these fields is quite racy even on uniprocessor. But
124 * it is expected to average out OK.
130 * free areas of different sizes
132 struct free_area free_area[MAX_ORDER];
135 * wait_table -- the array holding the hash table
136 * wait_table_size -- the size of the hash table array
137 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
139 * The purpose of all these is to keep track of the people
140 * waiting for a page to become available and make them
141 * runnable again when possible. The trouble is that this
142 * consumes a lot of space, especially when so few things
143 * wait on pages at a given time. So instead of using
144 * per-page waitqueues, we use a waitqueue hash table.
146 * The bucket discipline is to sleep on the same queue when
147 * colliding and wake all in that wait queue when removing.
148 * When something wakes, it must check to be sure its page is
149 * truly available, a la thundering herd. The cost of a
150 * collision is great, but given the expected load of the
151 * table, they should be so rare as to be outweighed by the
152 * benefits from the saved space.
154 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
155 * primary users of these fields, and in mm/page_alloc.c
156 * free_area_init_core() performs the initialization of them.
158 wait_queue_head_t * wait_table;
159 unsigned long wait_table_size;
160 unsigned long wait_table_bits;
164 struct per_cpu_pageset pageset[NR_CPUS];
167 * Discontig memory support fields.
169 struct pglist_data *zone_pgdat;
170 struct page *zone_mem_map;
171 /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
172 unsigned long zone_start_pfn;
175 * rarely used fields:
178 unsigned long spanned_pages; /* total size, including holes */
179 unsigned long present_pages; /* amount of memory (excluding holes) */
180 } ____cacheline_maxaligned_in_smp;
184 * The "priority" of VM scanning is how much of the queues we will scan in one
185 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
186 * queues ("queue_length >> 12") during an aging round.
188 #define DEF_PRIORITY 12
191 * One allocation request operates on a zonelist. A zonelist
192 * is a list of zones, the first one is the 'goal' of the
193 * allocation, the other zones are fallback zones, in decreasing
196 * Right now a zonelist takes up less than a cacheline. We never
197 * modify it apart from boot-up, and only a few indices are used,
198 * so despite the zonelist table being relatively big, the cache
199 * footprint of this construct is very small.
202 struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
207 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
208 * (mostly NUMA machines?) to denote a higher-level memory zone than the
211 * On NUMA machines, each NUMA node would have a pg_data_t to describe
212 * it's memory layout.
214 * Memory statistics and page replacement data structures are maintained on a
218 typedef struct pglist_data {
219 struct zone node_zones[MAX_NR_ZONES];
220 struct zonelist node_zonelists[MAX_NR_ZONES];
222 struct page *node_mem_map;
223 struct bootmem_data *bdata;
224 unsigned long node_start_pfn;
225 unsigned long node_present_pages; /* total number of physical pages */
226 unsigned long node_spanned_pages; /* total size of physical page
227 range, including holes */
229 struct pglist_data *pgdat_next;
230 wait_queue_head_t kswapd_wait;
231 struct task_struct *kswapd;
234 #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
235 #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
238 extern struct pglist_data *pgdat_list;
240 void get_zone_counts(unsigned long *active, unsigned long *inactive,
241 unsigned long *free);
242 void build_all_zonelists(void);
243 void wakeup_kswapd(struct zone *zone);
246 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
248 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
251 * for_each_pgdat - helper macro to iterate over all nodes
252 * @pgdat - pointer to a pg_data_t variable
254 * Meant to help with common loops of the form
255 * pgdat = pgdat_list;
258 * pgdat = pgdat->pgdat_next;
261 #define for_each_pgdat(pgdat) \
262 for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next)
265 * next_zone - helper magic for for_each_zone()
266 * Thanks to William Lee Irwin III for this piece of ingenuity.
268 static inline struct zone *next_zone(struct zone *zone)
270 pg_data_t *pgdat = zone->zone_pgdat;
272 if (zone - pgdat->node_zones < MAX_NR_ZONES - 1)
274 else if (pgdat->pgdat_next) {
275 pgdat = pgdat->pgdat_next;
276 zone = pgdat->node_zones;
284 * for_each_zone - helper macro to iterate over all memory zones
285 * @zone - pointer to struct zone variable
287 * The user only needs to declare the zone variable, for_each_zone
288 * fills it in. This basically means for_each_zone() is an
289 * easier to read version of this piece of code:
291 * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next)
292 * for (i = 0; i < MAX_NR_ZONES; ++i) {
293 * struct zone * z = pgdat->node_zones + i;
298 #define for_each_zone(zone) \
299 for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone))
302 * is_highmem - helper function to quickly check if a struct zone is a
303 * highmem zone or not. This is an attempt to keep references
304 * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
305 * @zone - pointer to struct zone variable
307 static inline int is_highmem(struct zone *zone)
309 return (zone - zone->zone_pgdat->node_zones == ZONE_HIGHMEM);
312 static inline int is_normal(struct zone *zone)
314 return (zone - zone->zone_pgdat->node_zones == ZONE_NORMAL);
317 /* These two functions are used to setup the per zone pages min values */
320 int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *,
321 void __user *, size_t *);
322 int lower_zone_protection_sysctl_handler(struct ctl_table *, int, struct file *,
323 void __user *, size_t *);
325 #include <linux/topology.h>
326 /* Returns the number of the current Node. */
327 #define numa_node_id() (cpu_to_node(smp_processor_id()))
329 #ifndef CONFIG_DISCONTIGMEM
331 extern struct pglist_data contig_page_data;
332 #define NODE_DATA(nid) (&contig_page_data)
333 #define NODE_MEM_MAP(nid) mem_map
334 #define MAX_NODES_SHIFT 1
335 #define pfn_to_nid(pfn) (0)
337 #else /* CONFIG_DISCONTIGMEM */
339 #include <asm/mmzone.h>
341 #if BITS_PER_LONG == 32 || defined(ARCH_HAS_ATOMIC_UNSIGNED)
343 * with 32 bit page->flags field, we reserve 8 bits for node/zone info.
344 * there are 3 zones (2 bits) and this leaves 8-2=6 bits for nodes.
346 #define MAX_NODES_SHIFT 6
347 #elif BITS_PER_LONG == 64
349 * with 64 bit flags field, there's plenty of room.
351 #define MAX_NODES_SHIFT 10
354 #endif /* !CONFIG_DISCONTIGMEM */
356 #if NODES_SHIFT > MAX_NODES_SHIFT
357 #error NODES_SHIFT > MAX_NODES_SHIFT
360 /* There are currently 3 zones: DMA, Normal & Highmem, thus we need 2 bits */
361 #define MAX_ZONES_SHIFT 2
363 #if ZONES_SHIFT > MAX_ZONES_SHIFT
364 #error ZONES_SHIFT > MAX_ZONES_SHIFT
367 extern DECLARE_BITMAP(node_online_map, MAX_NUMNODES);
369 #if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_NUMA)
371 #define node_online(node) test_bit(node, node_online_map)
372 #define node_set_online(node) set_bit(node, node_online_map)
373 #define node_set_offline(node) clear_bit(node, node_online_map)
374 static inline unsigned int num_online_nodes(void)
378 for(i = 0; i < MAX_NUMNODES; i++){
385 #else /* !CONFIG_DISCONTIGMEM && !CONFIG_NUMA */
387 #define node_online(node) \
388 ({ BUG_ON((node) != 0); test_bit(node, node_online_map); })
389 #define node_set_online(node) \
390 ({ BUG_ON((node) != 0); set_bit(node, node_online_map); })
391 #define node_set_offline(node) \
392 ({ BUG_ON((node) != 0); clear_bit(node, node_online_map); })
393 #define num_online_nodes() 1
395 #endif /* CONFIG_DISCONTIGMEM || CONFIG_NUMA */
396 #endif /* !__ASSEMBLY__ */
397 #endif /* __KERNEL__ */
398 #endif /* _LINUX_MMZONE_H */