blob: 9ac82e29948f5ecad6c44d5edb0af1a1f5ab3576 [file] [log] [blame]
Christoph Lameterf6ac2352006-06-30 01:55:32 -07001#ifndef _LINUX_VMSTAT_H
2#define _LINUX_VMSTAT_H
3
4#include <linux/types.h>
5#include <linux/percpu.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07006#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07007#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07008#include <linux/atomic.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -07009
Adrian Bunkc748e132008-07-23 21:27:03 -070010extern int sysctl_stat_interval;
11
Andrew Morton780a0652007-02-10 01:44:41 -080012#ifdef CONFIG_VM_EVENT_COUNTERS
13/*
14 * Light weight per cpu counter implementation.
15 *
16 * Counters should only be incremented and no critical kernel component
17 * should rely on the counter values.
18 *
19 * Counters are handled completely inline. On many platforms the code
20 * generated will simply be the increment of a global address.
21 */
22
Christoph Lameterf8891e52006-06-30 01:55:45 -070023struct vm_event_state {
24 unsigned long event[NR_VM_EVENT_ITEMS];
25};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070026
Christoph Lameterf8891e52006-06-30 01:55:45 -070027DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070028
Christoph Lameter293b6a42014-04-07 15:39:43 -070029/*
30 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
31 * local_irq_disable overhead.
32 */
Christoph Lameterf8891e52006-06-30 01:55:45 -070033static inline void __count_vm_event(enum vm_event_item item)
34{
Christoph Lameter293b6a42014-04-07 15:39:43 -070035 raw_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070036}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070037
Christoph Lameterf8891e52006-06-30 01:55:45 -070038static inline void count_vm_event(enum vm_event_item item)
39{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090040 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070041}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070042
Christoph Lameterf8891e52006-06-30 01:55:45 -070043static inline void __count_vm_events(enum vm_event_item item, long delta)
44{
Christoph Lameter293b6a42014-04-07 15:39:43 -070045 raw_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070046}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070047
Christoph Lameterf8891e52006-06-30 01:55:45 -070048static inline void count_vm_events(enum vm_event_item item, long delta)
49{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090050 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070051}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070052
Christoph Lameterf8891e52006-06-30 01:55:45 -070053extern void all_vm_events(unsigned long *);
Yijing Wangf1cb0872013-04-29 15:08:14 -070054
Christoph Lameterf8891e52006-06-30 01:55:45 -070055extern void vm_events_fold_cpu(int cpu);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070056
Christoph Lameterf8891e52006-06-30 01:55:45 -070057#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070058
Christoph Lameterf8891e52006-06-30 01:55:45 -070059/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080060static inline void count_vm_event(enum vm_event_item item)
61{
62}
63static inline void count_vm_events(enum vm_event_item item, long delta)
64{
65}
66static inline void __count_vm_event(enum vm_event_item item)
67{
68}
69static inline void __count_vm_events(enum vm_event_item item, long delta)
70{
71}
72static inline void all_vm_events(unsigned long *ret)
73{
74}
75static inline void vm_events_fold_cpu(int cpu)
76{
77}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070078
Christoph Lameterf8891e52006-06-30 01:55:45 -070079#endif /* CONFIG_VM_EVENT_COUNTERS */
80
Mel Gorman03c5a6e2012-11-02 14:52:48 +000081#ifdef CONFIG_NUMA_BALANCING
82#define count_vm_numa_event(x) count_vm_event(x)
83#define count_vm_numa_events(x, y) count_vm_events(x, y)
84#else
85#define count_vm_numa_event(x) do {} while (0)
Mel Gorman3c0ff462013-02-22 16:34:29 -080086#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
Mel Gorman03c5a6e2012-11-02 14:52:48 +000087#endif /* CONFIG_NUMA_BALANCING */
88
Mel Gormanec659932014-01-21 14:33:16 -080089#ifdef CONFIG_DEBUG_TLBFLUSH
90#define count_vm_tlb_event(x) count_vm_event(x)
91#define count_vm_tlb_events(x, y) count_vm_events(x, y)
92#else
93#define count_vm_tlb_event(x) do {} while (0)
94#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
95#endif
96
Davidlohr Bueso4f115142014-06-04 16:06:46 -070097#ifdef CONFIG_DEBUG_VM_VMACACHE
98#define count_vm_vmacache_event(x) count_vm_event(x)
99#else
100#define count_vm_vmacache_event(x) do {} while (0)
101#endif
102
Mel Gorman16709d12016-07-28 15:46:56 -0700103#define __count_zid_vm_events(item, zid, delta) \
104 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700105
Christoph Lameter2244b952006-06-30 01:55:33 -0700106/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700107 * Zone and node-based page accounting with per cpu differentials.
Christoph Lameter2244b952006-06-30 01:55:33 -0700108 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700109extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
Kemi Wang3a321d22017-09-08 16:12:48 -0700110extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
Mel Gorman75ef7182016-07-28 15:45:24 -0700111extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700112
Kemi Wang3a321d22017-09-08 16:12:48 -0700113#ifdef CONFIG_NUMA
114static inline void zone_numa_state_add(long x, struct zone *zone,
115 enum numa_stat_item item)
116{
117 atomic_long_add(x, &zone->vm_numa_stat[item]);
118 atomic_long_add(x, &vm_numa_stat[item]);
119}
120
121static inline unsigned long global_numa_state(enum numa_stat_item item)
122{
123 long x = atomic_long_read(&vm_numa_stat[item]);
124
125 return x;
126}
127
128static inline unsigned long zone_numa_state(struct zone *zone,
129 enum numa_stat_item item)
130{
131 long x = atomic_long_read(&zone->vm_numa_stat[item]);
132
133 return x;
134}
135#endif /* CONFIG_NUMA */
136
Christoph Lameter2244b952006-06-30 01:55:33 -0700137static inline void zone_page_state_add(long x, struct zone *zone,
138 enum zone_stat_item item)
139{
140 atomic_long_add(x, &zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700141 atomic_long_add(x, &vm_zone_stat[item]);
142}
143
144static inline void node_page_state_add(long x, struct pglist_data *pgdat,
145 enum node_stat_item item)
146{
147 atomic_long_add(x, &pgdat->vm_stat[item]);
148 atomic_long_add(x, &vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700149}
150
Michal Hockoc41f0122017-09-06 16:23:36 -0700151static inline unsigned long global_zone_page_state(enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700152{
Mel Gorman75ef7182016-07-28 15:45:24 -0700153 long x = atomic_long_read(&vm_zone_stat[item]);
154#ifdef CONFIG_SMP
155 if (x < 0)
156 x = 0;
157#endif
158 return x;
159}
160
161static inline unsigned long global_node_page_state(enum node_stat_item item)
162{
163 long x = atomic_long_read(&vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700164#ifdef CONFIG_SMP
165 if (x < 0)
166 x = 0;
167#endif
168 return x;
169}
170
171static inline unsigned long zone_page_state(struct zone *zone,
172 enum zone_stat_item item)
173{
174 long x = atomic_long_read(&zone->vm_stat[item]);
175#ifdef CONFIG_SMP
176 if (x < 0)
177 x = 0;
178#endif
179 return x;
180}
181
Christoph Lameteraa454842010-09-09 16:38:17 -0700182/*
183 * More accurate version that also considers the currently pending
184 * deltas. For that we need to loop over all cpus to find the current
185 * deltas. There is no synchronization so the result cannot be
186 * exactly accurate either.
187 */
188static inline unsigned long zone_page_state_snapshot(struct zone *zone,
189 enum zone_stat_item item)
190{
191 long x = atomic_long_read(&zone->vm_stat[item]);
192
193#ifdef CONFIG_SMP
194 int cpu;
195 for_each_online_cpu(cpu)
196 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
197
198 if (x < 0)
199 x = 0;
200#endif
201 return x;
202}
203
Mel Gorman599d0c92016-07-28 15:45:31 -0700204static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat,
205 enum node_stat_item item)
206{
207 long x = atomic_long_read(&pgdat->vm_stat[item]);
208
209#ifdef CONFIG_SMP
210 int cpu;
211 for_each_online_cpu(cpu)
212 x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item];
213
214 if (x < 0)
215 x = 0;
216#endif
217 return x;
218}
219
220
Christoph Lameter2244b952006-06-30 01:55:33 -0700221#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -0700222extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700223extern unsigned long sum_zone_node_page_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -0700224 enum zone_stat_item item);
225extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700226extern unsigned long node_page_state(struct pglist_data *pgdat,
227 enum node_stat_item item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700228#else
Michal Hockoc41f0122017-09-06 16:23:36 -0700229#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700230#define node_page_state(node, item) global_node_page_state(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700231#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700232
Christoph Lameter2244b952006-06-30 01:55:33 -0700233#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
234#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
Mel Gorman75ef7182016-07-28 15:45:24 -0700235#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
236#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
Christoph Lameter2244b952006-06-30 01:55:33 -0700237
Christoph Lameter2244b952006-06-30 01:55:33 -0700238#ifdef CONFIG_SMP
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800239void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700240void __inc_zone_page_state(struct page *, enum zone_stat_item);
241void __dec_zone_page_state(struct page *, enum zone_stat_item);
242
Mel Gorman75ef7182016-07-28 15:45:24 -0700243void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
244void __inc_node_page_state(struct page *, enum node_stat_item);
245void __dec_node_page_state(struct page *, enum node_stat_item);
246
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800247void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700248void inc_zone_page_state(struct page *, enum zone_stat_item);
249void dec_zone_page_state(struct page *, enum zone_stat_item);
250
Mel Gorman75ef7182016-07-28 15:45:24 -0700251void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
252void inc_node_page_state(struct page *, enum node_stat_item);
253void dec_node_page_state(struct page *, enum node_stat_item);
254
Mel Gorman75ef7182016-07-28 15:45:24 -0700255extern void inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800256extern void __inc_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700257extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800258extern void dec_zone_state(struct zone *, enum zone_stat_item);
259extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700260extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700261
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800262void quiet_vmstat(void);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700263void cpu_vm_stats_fold(int cpu);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700264void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800265
Hugh Dickins52b6f462016-05-19 17:12:50 -0700266struct ctl_table;
267int vmstat_refresh(struct ctl_table *, int write,
268 void __user *buffer, size_t *lenp, loff_t *ppos);
269
Minchan Kim5a883812012-10-08 16:33:39 -0700270void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
271
Mel Gormanb44129b2011-01-13 15:45:43 -0800272int calculate_pressure_threshold(struct zone *zone);
273int calculate_normal_threshold(struct zone *zone);
274void set_pgdat_percpu_threshold(pg_data_t *pgdat,
275 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700276#else /* CONFIG_SMP */
277
278/*
279 * We do not maintain differentials in a single processor configuration.
280 * The functions directly modify the zone and global counters.
281 */
282static inline void __mod_zone_page_state(struct zone *zone,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800283 enum zone_stat_item item, long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700284{
285 zone_page_state_add(delta, zone, item);
286}
287
Mel Gorman75ef7182016-07-28 15:45:24 -0700288static inline void __mod_node_page_state(struct pglist_data *pgdat,
289 enum node_stat_item item, int delta)
290{
291 node_page_state_add(delta, pgdat, item);
292}
293
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700294static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
295{
296 atomic_long_inc(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700297 atomic_long_inc(&vm_zone_stat[item]);
298}
299
300static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
301{
302 atomic_long_inc(&pgdat->vm_stat[item]);
303 atomic_long_inc(&vm_node_stat[item]);
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700304}
305
Christoph Lameterc8785382007-02-10 01:43:01 -0800306static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
307{
308 atomic_long_dec(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700309 atomic_long_dec(&vm_zone_stat[item]);
310}
311
312static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
313{
314 atomic_long_dec(&pgdat->vm_stat[item]);
315 atomic_long_dec(&vm_node_stat[item]);
Christoph Lameterc8785382007-02-10 01:43:01 -0800316}
317
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700318static inline void __inc_zone_page_state(struct page *page,
319 enum zone_stat_item item)
320{
321 __inc_zone_state(page_zone(page), item);
322}
323
Mel Gorman75ef7182016-07-28 15:45:24 -0700324static inline void __inc_node_page_state(struct page *page,
325 enum node_stat_item item)
326{
327 __inc_node_state(page_pgdat(page), item);
328}
329
330
Christoph Lameter2244b952006-06-30 01:55:33 -0700331static inline void __dec_zone_page_state(struct page *page,
332 enum zone_stat_item item)
333{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100334 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700335}
336
Mel Gorman75ef7182016-07-28 15:45:24 -0700337static inline void __dec_node_page_state(struct page *page,
338 enum node_stat_item item)
339{
340 __dec_node_state(page_pgdat(page), item);
341}
342
343
Christoph Lameter2244b952006-06-30 01:55:33 -0700344/*
345 * We only use atomic operations to update counters. So there is no need to
346 * disable interrupts.
347 */
348#define inc_zone_page_state __inc_zone_page_state
349#define dec_zone_page_state __dec_zone_page_state
350#define mod_zone_page_state __mod_zone_page_state
351
Mel Gorman75ef7182016-07-28 15:45:24 -0700352#define inc_node_page_state __inc_node_page_state
353#define dec_node_page_state __dec_node_page_state
354#define mod_node_page_state __mod_node_page_state
355
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700356#define inc_zone_state __inc_zone_state
Mel Gorman75ef7182016-07-28 15:45:24 -0700357#define inc_node_state __inc_node_state
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700358#define dec_zone_state __dec_zone_state
359
Mel Gormanb44129b2011-01-13 15:45:43 -0800360#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800361
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700362static inline void refresh_zone_stat_thresholds(void) { }
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700363static inline void cpu_vm_stats_fold(int cpu) { }
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800364static inline void quiet_vmstat(void) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700365
Minchan Kim5a883812012-10-08 16:33:39 -0700366static inline void drain_zonestat(struct zone *zone,
367 struct per_cpu_pageset *pset) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700368#endif /* CONFIG_SMP */
369
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700370static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
371 int migratetype)
372{
373 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
374 if (is_migrate_cma(migratetype))
375 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
376}
377
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700378extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700379
380#endif /* _LINUX_VMSTAT_H */