#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
#define ZONE_PADDING(name)
#endif
+enum zone_stat_item {
+ NR_ANON_PAGES, /* Mapped anonymous pages */
+ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
+ only modified from process context */
+ NR_FILE_PAGES,
+ NR_SLAB, /* Pages used by slab allocator */
+ NR_PAGETABLE, /* used for pagetables */
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ NR_UNSTABLE_NFS, /* NFS unstable pages */
+ NR_BOUNCE,
+#ifdef CONFIG_NUMA
+ NUMA_HIT, /* allocated in intended node */
+ NUMA_MISS, /* allocated in non intended node */
+ NUMA_FOREIGN, /* was intended here, hit elsewhere */
+ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
+ NUMA_LOCAL, /* allocation from local node */
+ NUMA_OTHER, /* allocation from other node */
+#endif
+ NR_VM_ZONE_STAT_ITEMS };
+
struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
struct per_cpu_pageset {
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
-#ifdef CONFIG_NUMA
- unsigned long numa_hit; /* allocated in intended node */
- unsigned long numa_miss; /* allocated in non intended node */
- unsigned long numa_foreign; /* was intended here, hit elsewhere */
- unsigned long interleave_hit; /* interleaver prefered this zone */
- unsigned long local_node; /* allocation from local node */
- unsigned long other_node; /* allocation from other node */
+#ifdef CONFIG_SMP
+ s8 stat_threshold;
+ s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
} ____cacheline_aligned_in_smp;
unsigned long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
+ /*
+ * zone reclaim becomes active if more unmapped pages exist.
+ */
+ unsigned long min_unmapped_ratio;
+ unsigned long min_slab_pages;
struct per_cpu_pageset *pageset[NR_CPUS];
#else
struct per_cpu_pageset pageset[NR_CPUS];
/* A count of how many reclaimers are scanning this zone */
atomic_t reclaim_in_progress;
- /*
- * timestamp (in jiffies) of the last zone reclaim that did not
- * result in freeing of pages. This is used to avoid repeated scans
- * if all memory in the zone is in use.
- */
- unsigned long last_unsuccessful_zone_reclaim;
+ /* Zone statistics */
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
/*
* prev_priority holds the scanning priority for this zone. It is
* under - it drives the swappiness decision: whether to unmap mapped
* pages.
*
- * temp_priority is used to remember the scanning priority at which
- * this zone was successfully refilled to free_pages == pages_high.
- *
- * Access to both these fields is quite racy even on uniprocessor. But
+ * Access to both this field is quite racy even on uniprocessor. But
* it is expected to average out OK.
*/
- int temp_priority;
int prev_priority;
/*
* wait_table -- the array holding the hash table
- * wait_table_size -- the size of the hash table array
+ * wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t * wait_table;
- unsigned long wait_table_size;
+ unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
/*
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
+extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long size);
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
void __user *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
+int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
+ struct file *, void __user *, size_t *, loff_t *);
+int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
+ struct file *, void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
* pages. However, it is stored with some other magic.
* (see sparse.c::sparse_init_one_section())
*
+ * Additionally during early boot we encode node id of
+ * the location of the section here to guide allocation.
+ * (see sparse.c::memory_present())
+ *
* Making it a UL at least makes someone do a cast
* before using it wrong.
*/
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_MAP_LAST_BIT (1UL<<2)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 2
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
#define sparse_index_init(_sec, _nid) do {} while (0)
#endif /* CONFIG_SPARSEMEM */
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
+#else
+#define early_pfn_in_nid(pfn, nid) (1)
+#endif
+
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
#endif