1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mmzone.h>
7 #include <asm/atomic.h>
9 #ifdef CONFIG_VM_EVENT_COUNTERS
11 * Light weight per cpu counter implementation.
13 * Counters should only be incremented and no critical kernel component
14 * should rely on the counter values.
16 * Counters are handled completely inline. On many platforms the code
17 * generated will simply be the increment of a global address.
20 #define FOR_ALL_ZONES(x) x##_DMA, x##_DMA32, x##_NORMAL, x##_HIGH
22 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
23 FOR_ALL_ZONES(PGALLOC),
24 PGFREE, PGACTIVATE, PGDEACTIVATE,
26 FOR_ALL_ZONES(PGREFILL),
27 FOR_ALL_ZONES(PGSTEAL),
28 FOR_ALL_ZONES(PGSCAN_KSWAPD),
29 FOR_ALL_ZONES(PGSCAN_DIRECT),
30 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
31 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
35 struct vm_event_state {
36 unsigned long event[NR_VM_EVENT_ITEMS];
39 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
41 static inline void __count_vm_event(enum vm_event_item item)
43 __get_cpu_var(vm_event_states).event[item]++;
46 static inline void count_vm_event(enum vm_event_item item)
48 get_cpu_var(vm_event_states).event[item]++;
52 static inline void __count_vm_events(enum vm_event_item item, long delta)
54 __get_cpu_var(vm_event_states).event[item] += delta;
57 static inline void count_vm_events(enum vm_event_item item, long delta)
59 get_cpu_var(vm_event_states).event[item] += delta;
63 extern void all_vm_events(unsigned long *);
64 extern void vm_events_fold_cpu(int cpu);
68 /* Disable counters */
69 #define get_cpu_vm_events(e) 0L
70 #define count_vm_event(e) do { } while (0)
71 #define count_vm_events(e,d) do { } while (0)
72 #define __count_vm_event(e) do { } while (0)
73 #define __count_vm_events(e,d) do { } while (0)
74 #define vm_events_fold_cpu(x) do { } while (0)
76 #endif /* CONFIG_VM_EVENT_COUNTERS */
78 #define __count_zone_vm_events(item, zone, delta) \
79 __count_vm_events(item##_DMA + zone_idx(zone), delta)
82 * Zone based page accounting with per cpu differentials.
84 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86 static inline void zone_page_state_add(long x, struct zone *zone,
87 enum zone_stat_item item)
89 atomic_long_add(x, &zone->vm_stat[item]);
90 atomic_long_add(x, &vm_stat[item]);
93 static inline unsigned long global_page_state(enum zone_stat_item item)
95 long x = atomic_long_read(&vm_stat[item]);
103 static inline unsigned long zone_page_state(struct zone *zone,
104 enum zone_stat_item item)
106 long x = atomic_long_read(&zone->vm_stat[item]);
116 * Determine the per node value of a stat item. This function
117 * is called frequently in a NUMA machine, so try to be as
118 * frugal as possible.
120 static inline unsigned long node_page_state(int node,
121 enum zone_stat_item item)
123 struct zone *zones = NODE_DATA(node)->node_zones;
126 #ifndef CONFIG_DMA_IS_NORMAL
127 #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
128 zone_page_state(&zones[ZONE_DMA32], item) +
130 zone_page_state(&zones[ZONE_NORMAL], item) +
132 #ifdef CONFIG_HIGHMEM
133 zone_page_state(&zones[ZONE_HIGHMEM], item) +
135 zone_page_state(&zones[ZONE_DMA], item);
138 extern void zone_statistics(struct zonelist *, struct zone *);
142 #define node_page_state(node, item) global_page_state(item)
143 #define zone_statistics(_zl,_z) do { } while (0)
145 #endif /* CONFIG_NUMA */
147 #define __add_zone_page_state(__z, __i, __d) \
148 __mod_zone_page_state(__z, __i, __d)
149 #define __sub_zone_page_state(__z, __i, __d) \
150 __mod_zone_page_state(__z, __i,-(__d))
152 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
153 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
155 static inline void zap_zone_vm_stats(struct zone *zone)
157 memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
160 extern void inc_zone_state(struct zone *, enum zone_stat_item);
163 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
164 void __inc_zone_page_state(struct page *, enum zone_stat_item);
165 void __dec_zone_page_state(struct page *, enum zone_stat_item);
167 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
168 void inc_zone_page_state(struct page *, enum zone_stat_item);
169 void dec_zone_page_state(struct page *, enum zone_stat_item);
171 extern void inc_zone_state(struct zone *, enum zone_stat_item);
173 void refresh_cpu_vm_stats(int);
174 void refresh_vm_stats(void);
176 #else /* CONFIG_SMP */
179 * We do not maintain differentials in a single processor configuration.
180 * The functions directly modify the zone and global counters.
182 static inline void __mod_zone_page_state(struct zone *zone,
183 enum zone_stat_item item, int delta)
185 zone_page_state_add(delta, zone, item);
188 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
190 atomic_long_inc(&zone->vm_stat[item]);
191 atomic_long_inc(&vm_stat[item]);
194 static inline void __inc_zone_page_state(struct page *page,
195 enum zone_stat_item item)
197 __inc_zone_state(page_zone(page), item);
200 static inline void __dec_zone_page_state(struct page *page,
201 enum zone_stat_item item)
203 atomic_long_dec(&page_zone(page)->vm_stat[item]);
204 atomic_long_dec(&vm_stat[item]);
208 * We only use atomic operations to update counters. So there is no need to
209 * disable interrupts.
211 #define inc_zone_page_state __inc_zone_page_state
212 #define dec_zone_page_state __dec_zone_page_state
213 #define mod_zone_page_state __mod_zone_page_state
215 static inline void refresh_cpu_vm_stats(int cpu) { }
216 static inline void refresh_vm_stats(void) { }
219 #endif /* _LINUX_VMSTAT_H */