blob: 241bd0f53fb97625f99c86e924c829e008af96e6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameterf6ac2352006-06-30 01:55:32 -07002#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07008#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Kemi Wang45180852017-11-15 17:38:22 -080010#include <linux/static_key.h>
Roman Gushchinea426c22020-08-06 23:20:35 -070011#include <linux/mmdebug.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070012
Adrian Bunkc748e132008-07-23 21:27:03 -070013extern int sysctl_stat_interval;
14
Kemi Wang45180852017-11-15 17:38:22 -080015#ifdef CONFIG_NUMA
16#define ENABLE_NUMA_STAT 1
17#define DISABLE_NUMA_STAT 0
18extern int sysctl_vm_numa_stat;
19DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
Christoph Hellwig32927392020-04-24 08:43:38 +020020int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21 void *buffer, size_t *length, loff_t *ppos);
Kemi Wang45180852017-11-15 17:38:22 -080022#endif
23
Steven Rostedtd51d1e62018-04-10 16:28:07 -070024struct reclaim_stat {
25 unsigned nr_dirty;
26 unsigned nr_unqueued_dirty;
27 unsigned nr_congested;
28 unsigned nr_writeback;
29 unsigned nr_immediate;
Johannes Weiner96f8bf42020-06-03 16:03:09 -070030 unsigned nr_pageout;
Yu Zhaoed017372020-10-15 20:09:55 -070031 unsigned nr_activate[ANON_AND_FILE];
Steven Rostedtd51d1e62018-04-10 16:28:07 -070032 unsigned nr_ref_keep;
33 unsigned nr_unmap_fail;
Jaewon Kim1f318a9b2020-06-03 16:01:15 -070034 unsigned nr_lazyfree_fail;
Steven Rostedtd51d1e62018-04-10 16:28:07 -070035};
36
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -080037enum writeback_stat_item {
38 NR_DIRTY_THRESHOLD,
39 NR_DIRTY_BG_THRESHOLD,
40 NR_VM_WRITEBACK_STAT_ITEMS,
41};
42
Andrew Morton780a0652007-02-10 01:44:41 -080043#ifdef CONFIG_VM_EVENT_COUNTERS
44/*
45 * Light weight per cpu counter implementation.
46 *
47 * Counters should only be incremented and no critical kernel component
48 * should rely on the counter values.
49 *
50 * Counters are handled completely inline. On many platforms the code
51 * generated will simply be the increment of a global address.
52 */
53
Christoph Lameterf8891e52006-06-30 01:55:45 -070054struct vm_event_state {
55 unsigned long event[NR_VM_EVENT_ITEMS];
56};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070057
Christoph Lameterf8891e52006-06-30 01:55:45 -070058DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070059
Christoph Lameter293b6a42014-04-07 15:39:43 -070060/*
61 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62 * local_irq_disable overhead.
63 */
Christoph Lameterf8891e52006-06-30 01:55:45 -070064static inline void __count_vm_event(enum vm_event_item item)
65{
Christoph Lameter293b6a42014-04-07 15:39:43 -070066 raw_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070067}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070068
Christoph Lameterf8891e52006-06-30 01:55:45 -070069static inline void count_vm_event(enum vm_event_item item)
70{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090071 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070072}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070073
Christoph Lameterf8891e52006-06-30 01:55:45 -070074static inline void __count_vm_events(enum vm_event_item item, long delta)
75{
Christoph Lameter293b6a42014-04-07 15:39:43 -070076 raw_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070077}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070078
Christoph Lameterf8891e52006-06-30 01:55:45 -070079static inline void count_vm_events(enum vm_event_item item, long delta)
80{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090081 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070082}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070083
Christoph Lameterf8891e52006-06-30 01:55:45 -070084extern void all_vm_events(unsigned long *);
Yijing Wangf1cb0872013-04-29 15:08:14 -070085
Christoph Lameterf8891e52006-06-30 01:55:45 -070086extern void vm_events_fold_cpu(int cpu);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070087
Christoph Lameterf8891e52006-06-30 01:55:45 -070088#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070089
Christoph Lameterf8891e52006-06-30 01:55:45 -070090/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080091static inline void count_vm_event(enum vm_event_item item)
92{
93}
94static inline void count_vm_events(enum vm_event_item item, long delta)
95{
96}
97static inline void __count_vm_event(enum vm_event_item item)
98{
99}
100static inline void __count_vm_events(enum vm_event_item item, long delta)
101{
102}
103static inline void all_vm_events(unsigned long *ret)
104{
105}
106static inline void vm_events_fold_cpu(int cpu)
107{
108}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700109
Christoph Lameterf8891e52006-06-30 01:55:45 -0700110#endif /* CONFIG_VM_EVENT_COUNTERS */
111
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000112#ifdef CONFIG_NUMA_BALANCING
113#define count_vm_numa_event(x) count_vm_event(x)
114#define count_vm_numa_events(x, y) count_vm_events(x, y)
115#else
116#define count_vm_numa_event(x) do {} while (0)
Mel Gorman3c0ff462013-02-22 16:34:29 -0800117#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000118#endif /* CONFIG_NUMA_BALANCING */
119
Mel Gormanec659932014-01-21 14:33:16 -0800120#ifdef CONFIG_DEBUG_TLBFLUSH
121#define count_vm_tlb_event(x) count_vm_event(x)
122#define count_vm_tlb_events(x, y) count_vm_events(x, y)
123#else
124#define count_vm_tlb_event(x) do {} while (0)
125#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126#endif
127
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700128#ifdef CONFIG_DEBUG_VM_VMACACHE
129#define count_vm_vmacache_event(x) count_vm_event(x)
130#else
131#define count_vm_vmacache_event(x) do {} while (0)
132#endif
133
Mel Gorman16709d12016-07-28 15:46:56 -0700134#define __count_zid_vm_events(item, zid, delta) \
135 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700136
Christoph Lameter2244b952006-06-30 01:55:33 -0700137/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700138 * Zone and node-based page accounting with per cpu differentials.
Christoph Lameter2244b952006-06-30 01:55:33 -0700139 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700140extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
Mel Gorman75ef7182016-07-28 15:45:24 -0700141extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
Mel Gormanf19298b2021-06-28 19:41:44 -0700142extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700143
Kemi Wang3a321d22017-09-08 16:12:48 -0700144#ifdef CONFIG_NUMA
Mel Gormanf19298b2021-06-28 19:41:44 -0700145static inline void zone_numa_event_add(long x, struct zone *zone,
146 enum numa_stat_item item)
Kemi Wang3a321d22017-09-08 16:12:48 -0700147{
Mel Gormanf19298b2021-06-28 19:41:44 -0700148 atomic_long_add(x, &zone->vm_numa_event[item]);
149 atomic_long_add(x, &vm_numa_event[item]);
Kemi Wang3a321d22017-09-08 16:12:48 -0700150}
151
Mel Gormanf19298b2021-06-28 19:41:44 -0700152static inline unsigned long zone_numa_event_state(struct zone *zone,
Kemi Wang3a321d22017-09-08 16:12:48 -0700153 enum numa_stat_item item)
154{
Mel Gormanf19298b2021-06-28 19:41:44 -0700155 return atomic_long_read(&zone->vm_numa_event[item]);
156}
Kemi Wang63803222017-09-08 16:12:55 -0700157
Mel Gormanf19298b2021-06-28 19:41:44 -0700158static inline unsigned long
159global_numa_event_state(enum numa_stat_item item)
160{
161 return atomic_long_read(&vm_numa_event[item]);
Kemi Wang3a321d22017-09-08 16:12:48 -0700162}
163#endif /* CONFIG_NUMA */
164
Christoph Lameter2244b952006-06-30 01:55:33 -0700165static inline void zone_page_state_add(long x, struct zone *zone,
166 enum zone_stat_item item)
167{
168 atomic_long_add(x, &zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700169 atomic_long_add(x, &vm_zone_stat[item]);
170}
171
172static inline void node_page_state_add(long x, struct pglist_data *pgdat,
173 enum node_stat_item item)
174{
175 atomic_long_add(x, &pgdat->vm_stat[item]);
176 atomic_long_add(x, &vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700177}
178
Michal Hockoc41f0122017-09-06 16:23:36 -0700179static inline unsigned long global_zone_page_state(enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700180{
Mel Gorman75ef7182016-07-28 15:45:24 -0700181 long x = atomic_long_read(&vm_zone_stat[item]);
182#ifdef CONFIG_SMP
183 if (x < 0)
184 x = 0;
185#endif
186 return x;
187}
188
Roman Gushchinea426c22020-08-06 23:20:35 -0700189static inline
190unsigned long global_node_page_state_pages(enum node_stat_item item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700191{
192 long x = atomic_long_read(&vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700193#ifdef CONFIG_SMP
194 if (x < 0)
195 x = 0;
196#endif
197 return x;
198}
199
Roman Gushchinea426c22020-08-06 23:20:35 -0700200static inline unsigned long global_node_page_state(enum node_stat_item item)
201{
202 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
203
204 return global_node_page_state_pages(item);
205}
206
Christoph Lameter2244b952006-06-30 01:55:33 -0700207static inline unsigned long zone_page_state(struct zone *zone,
208 enum zone_stat_item item)
209{
210 long x = atomic_long_read(&zone->vm_stat[item]);
211#ifdef CONFIG_SMP
212 if (x < 0)
213 x = 0;
214#endif
215 return x;
216}
217
Christoph Lameteraa454842010-09-09 16:38:17 -0700218/*
219 * More accurate version that also considers the currently pending
220 * deltas. For that we need to loop over all cpus to find the current
221 * deltas. There is no synchronization so the result cannot be
222 * exactly accurate either.
223 */
224static inline unsigned long zone_page_state_snapshot(struct zone *zone,
225 enum zone_stat_item item)
226{
227 long x = atomic_long_read(&zone->vm_stat[item]);
228
229#ifdef CONFIG_SMP
230 int cpu;
231 for_each_online_cpu(cpu)
Mel Gorman28f836b2021-06-28 19:41:38 -0700232 x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
Christoph Lameteraa454842010-09-09 16:38:17 -0700233
234 if (x < 0)
235 x = 0;
236#endif
237 return x;
238}
239
Christoph Lameter2244b952006-06-30 01:55:33 -0700240#ifdef CONFIG_NUMA
Mel Gorman3ac44a32021-06-28 19:41:47 -0700241/* See __count_vm_event comment on why raw_cpu_inc is used. */
242static inline void
243__count_numa_event(struct zone *zone, enum numa_stat_item item)
244{
245 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
246
247 raw_cpu_inc(pzstats->vm_numa_event[item]);
248}
249
Mel Gorman3e230602021-06-28 19:41:50 -0700250static inline void
251__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
252{
253 struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
254
255 raw_cpu_add(pzstats->vm_numa_event[item], delta);
256}
257
Mel Gorman75ef7182016-07-28 15:45:24 -0700258extern unsigned long sum_zone_node_page_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -0700259 enum zone_stat_item item);
Mel Gormanf19298b2021-06-28 19:41:44 -0700260extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700261extern unsigned long node_page_state(struct pglist_data *pgdat,
262 enum node_stat_item item);
Roman Gushchinea426c22020-08-06 23:20:35 -0700263extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
264 enum node_stat_item item);
Mel Gormanf19298b2021-06-28 19:41:44 -0700265extern void fold_vm_numa_events(void);
Christoph Lameter2244b952006-06-30 01:55:33 -0700266#else
Michal Hockoc41f0122017-09-06 16:23:36 -0700267#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700268#define node_page_state(node, item) global_node_page_state(item)
Roman Gushchinea426c22020-08-06 23:20:35 -0700269#define node_page_state_pages(node, item) global_node_page_state_pages(item)
Mel Gormanf19298b2021-06-28 19:41:44 -0700270static inline void fold_vm_numa_events(void)
271{
272}
Christoph Lameterca889e62006-06-30 01:55:44 -0700273#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700274
Christoph Lameter2244b952006-06-30 01:55:33 -0700275#ifdef CONFIG_SMP
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800276void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700277void __inc_zone_page_state(struct page *, enum zone_stat_item);
278void __dec_zone_page_state(struct page *, enum zone_stat_item);
279
Mel Gorman75ef7182016-07-28 15:45:24 -0700280void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
281void __inc_node_page_state(struct page *, enum node_stat_item);
282void __dec_node_page_state(struct page *, enum node_stat_item);
283
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800284void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700285void inc_zone_page_state(struct page *, enum zone_stat_item);
286void dec_zone_page_state(struct page *, enum zone_stat_item);
287
Mel Gorman75ef7182016-07-28 15:45:24 -0700288void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
289void inc_node_page_state(struct page *, enum node_stat_item);
290void dec_node_page_state(struct page *, enum node_stat_item);
291
Mel Gorman75ef7182016-07-28 15:45:24 -0700292extern void inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800293extern void __inc_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700294extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800295extern void dec_zone_state(struct zone *, enum zone_stat_item);
296extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700297extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700298
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800299void quiet_vmstat(void);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700300void cpu_vm_stats_fold(int cpu);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700301void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800302
Hugh Dickins52b6f462016-05-19 17:12:50 -0700303struct ctl_table;
Christoph Hellwig32927392020-04-24 08:43:38 +0200304int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
305 loff_t *ppos);
Hugh Dickins52b6f462016-05-19 17:12:50 -0700306
Mel Gorman28f836b2021-06-28 19:41:38 -0700307void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
Minchan Kim5a883812012-10-08 16:33:39 -0700308
Mel Gormanb44129b2011-01-13 15:45:43 -0800309int calculate_pressure_threshold(struct zone *zone);
310int calculate_normal_threshold(struct zone *zone);
311void set_pgdat_percpu_threshold(pg_data_t *pgdat,
312 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700313#else /* CONFIG_SMP */
314
315/*
316 * We do not maintain differentials in a single processor configuration.
317 * The functions directly modify the zone and global counters.
318 */
319static inline void __mod_zone_page_state(struct zone *zone,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800320 enum zone_stat_item item, long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700321{
322 zone_page_state_add(delta, zone, item);
323}
324
Mel Gorman75ef7182016-07-28 15:45:24 -0700325static inline void __mod_node_page_state(struct pglist_data *pgdat,
326 enum node_stat_item item, int delta)
327{
Roman Gushchinbe458312020-10-01 13:07:49 -0700328 if (vmstat_item_in_bytes(item)) {
Johannes Weiner629484a2021-02-25 17:16:51 -0800329 /*
330 * Only cgroups use subpage accounting right now; at
331 * the global level, these items still change in
332 * multiples of whole pages. Store them as pages
333 * internally to keep the per-cpu counters compact.
334 */
Roman Gushchinbe458312020-10-01 13:07:49 -0700335 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
336 delta >>= PAGE_SHIFT;
337 }
338
Mel Gorman75ef7182016-07-28 15:45:24 -0700339 node_page_state_add(delta, pgdat, item);
340}
341
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700342static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
343{
344 atomic_long_inc(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700345 atomic_long_inc(&vm_zone_stat[item]);
346}
347
348static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
349{
350 atomic_long_inc(&pgdat->vm_stat[item]);
351 atomic_long_inc(&vm_node_stat[item]);
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700352}
353
Christoph Lameterc8785382007-02-10 01:43:01 -0800354static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
355{
356 atomic_long_dec(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700357 atomic_long_dec(&vm_zone_stat[item]);
358}
359
360static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
361{
362 atomic_long_dec(&pgdat->vm_stat[item]);
363 atomic_long_dec(&vm_node_stat[item]);
Christoph Lameterc8785382007-02-10 01:43:01 -0800364}
365
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700366static inline void __inc_zone_page_state(struct page *page,
367 enum zone_stat_item item)
368{
369 __inc_zone_state(page_zone(page), item);
370}
371
Mel Gorman75ef7182016-07-28 15:45:24 -0700372static inline void __inc_node_page_state(struct page *page,
373 enum node_stat_item item)
374{
375 __inc_node_state(page_pgdat(page), item);
376}
377
378
Christoph Lameter2244b952006-06-30 01:55:33 -0700379static inline void __dec_zone_page_state(struct page *page,
380 enum zone_stat_item item)
381{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100382 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700383}
384
Mel Gorman75ef7182016-07-28 15:45:24 -0700385static inline void __dec_node_page_state(struct page *page,
386 enum node_stat_item item)
387{
388 __dec_node_state(page_pgdat(page), item);
389}
390
391
Christoph Lameter2244b952006-06-30 01:55:33 -0700392/*
393 * We only use atomic operations to update counters. So there is no need to
394 * disable interrupts.
395 */
396#define inc_zone_page_state __inc_zone_page_state
397#define dec_zone_page_state __dec_zone_page_state
398#define mod_zone_page_state __mod_zone_page_state
399
Mel Gorman75ef7182016-07-28 15:45:24 -0700400#define inc_node_page_state __inc_node_page_state
401#define dec_node_page_state __dec_node_page_state
402#define mod_node_page_state __mod_node_page_state
403
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700404#define inc_zone_state __inc_zone_state
Mel Gorman75ef7182016-07-28 15:45:24 -0700405#define inc_node_state __inc_node_state
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700406#define dec_zone_state __dec_zone_state
407
Mel Gormanb44129b2011-01-13 15:45:43 -0800408#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800409
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700410static inline void refresh_zone_stat_thresholds(void) { }
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700411static inline void cpu_vm_stats_fold(int cpu) { }
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800412static inline void quiet_vmstat(void) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700413
Minchan Kim5a883812012-10-08 16:33:39 -0700414static inline void drain_zonestat(struct zone *zone,
Mel Gorman28f836b2021-06-28 19:41:38 -0700415 struct per_cpu_zonestat *pzstats) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700416#endif /* CONFIG_SMP */
417
Matthew Wilcox (Oracle)a53e17e2021-01-18 08:14:00 -0500418static inline void __zone_stat_mod_folio(struct folio *folio,
419 enum zone_stat_item item, long nr)
420{
421 __mod_zone_page_state(folio_zone(folio), item, nr);
422}
423
424static inline void __zone_stat_add_folio(struct folio *folio,
425 enum zone_stat_item item)
426{
427 __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
428}
429
430static inline void __zone_stat_sub_folio(struct folio *folio,
431 enum zone_stat_item item)
432{
433 __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
434}
435
436static inline void zone_stat_mod_folio(struct folio *folio,
437 enum zone_stat_item item, long nr)
438{
439 mod_zone_page_state(folio_zone(folio), item, nr);
440}
441
442static inline void zone_stat_add_folio(struct folio *folio,
443 enum zone_stat_item item)
444{
445 mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio));
446}
447
448static inline void zone_stat_sub_folio(struct folio *folio,
449 enum zone_stat_item item)
450{
451 mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio));
452}
453
454static inline void __node_stat_mod_folio(struct folio *folio,
455 enum node_stat_item item, long nr)
456{
457 __mod_node_page_state(folio_pgdat(folio), item, nr);
458}
459
460static inline void __node_stat_add_folio(struct folio *folio,
461 enum node_stat_item item)
462{
463 __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
464}
465
466static inline void __node_stat_sub_folio(struct folio *folio,
467 enum node_stat_item item)
468{
469 __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
470}
471
472static inline void node_stat_mod_folio(struct folio *folio,
473 enum node_stat_item item, long nr)
474{
475 mod_node_page_state(folio_pgdat(folio), item, nr);
476}
477
478static inline void node_stat_add_folio(struct folio *folio,
479 enum node_stat_item item)
480{
481 mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio));
482}
483
484static inline void node_stat_sub_folio(struct folio *folio,
485 enum node_stat_item item)
486{
487 mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
488}
489
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700490static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
491 int migratetype)
492{
493 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
494 if (is_migrate_cma(migratetype))
495 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
496}
497
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700498extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700499
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800500static inline const char *zone_stat_name(enum zone_stat_item item)
501{
502 return vmstat_text[item];
503}
504
505#ifdef CONFIG_NUMA
506static inline const char *numa_stat_name(enum numa_stat_item item)
507{
508 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
509 item];
510}
511#endif /* CONFIG_NUMA */
512
513static inline const char *node_stat_name(enum node_stat_item item)
514{
515 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
Mel Gormanf19298b2021-06-28 19:41:44 -0700516 NR_VM_NUMA_EVENT_ITEMS +
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800517 item];
518}
519
520static inline const char *lru_list_name(enum lru_list lru)
521{
522 return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
523}
524
525static inline const char *writeback_stat_name(enum writeback_stat_item item)
526{
527 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
Mel Gormanf19298b2021-06-28 19:41:44 -0700528 NR_VM_NUMA_EVENT_ITEMS +
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800529 NR_VM_NODE_STAT_ITEMS +
530 item];
531}
532
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -0800533#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800534static inline const char *vm_event_name(enum vm_event_item item)
535{
536 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
Mel Gormanf19298b2021-06-28 19:41:44 -0700537 NR_VM_NUMA_EVENT_ITEMS +
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800538 NR_VM_NODE_STAT_ITEMS +
539 NR_VM_WRITEBACK_STAT_ITEMS +
540 item];
541}
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -0800542#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800543
Shakeel Buttc47d5032020-12-14 19:07:14 -0800544#ifdef CONFIG_MEMCG
545
546void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
547 int val);
548
549static inline void mod_lruvec_state(struct lruvec *lruvec,
550 enum node_stat_item idx, int val)
551{
552 unsigned long flags;
553
554 local_irq_save(flags);
555 __mod_lruvec_state(lruvec, idx, val);
556 local_irq_restore(flags);
557}
558
559void __mod_lruvec_page_state(struct page *page,
560 enum node_stat_item idx, int val);
561
562static inline void mod_lruvec_page_state(struct page *page,
563 enum node_stat_item idx, int val)
564{
565 unsigned long flags;
566
567 local_irq_save(flags);
568 __mod_lruvec_page_state(page, idx, val);
569 local_irq_restore(flags);
570}
571
572#else
573
574static inline void __mod_lruvec_state(struct lruvec *lruvec,
575 enum node_stat_item idx, int val)
576{
577 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
578}
579
580static inline void mod_lruvec_state(struct lruvec *lruvec,
581 enum node_stat_item idx, int val)
582{
583 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
584}
585
586static inline void __mod_lruvec_page_state(struct page *page,
587 enum node_stat_item idx, int val)
588{
589 __mod_node_page_state(page_pgdat(page), idx, val);
590}
591
592static inline void mod_lruvec_page_state(struct page *page,
593 enum node_stat_item idx, int val)
594{
595 mod_node_page_state(page_pgdat(page), idx, val);
596}
597
598#endif /* CONFIG_MEMCG */
599
Johannes Weiner1c824a62021-04-29 22:55:32 -0700600static inline void inc_lruvec_state(struct lruvec *lruvec,
601 enum node_stat_item idx)
Shakeel Buttc47d5032020-12-14 19:07:14 -0800602{
Johannes Weiner1c824a62021-04-29 22:55:32 -0700603 mod_lruvec_state(lruvec, idx, 1);
Shakeel Buttc47d5032020-12-14 19:07:14 -0800604}
605
606static inline void __inc_lruvec_page_state(struct page *page,
607 enum node_stat_item idx)
608{
609 __mod_lruvec_page_state(page, idx, 1);
610}
611
612static inline void __dec_lruvec_page_state(struct page *page,
613 enum node_stat_item idx)
614{
615 __mod_lruvec_page_state(page, idx, -1);
616}
617
Matthew Wilcox (Oracle)a53e17e2021-01-18 08:14:00 -0500618static inline void __lruvec_stat_mod_folio(struct folio *folio,
619 enum node_stat_item idx, int val)
620{
621 __mod_lruvec_page_state(&folio->page, idx, val);
622}
623
624static inline void __lruvec_stat_add_folio(struct folio *folio,
625 enum node_stat_item idx)
626{
627 __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
628}
629
630static inline void __lruvec_stat_sub_folio(struct folio *folio,
631 enum node_stat_item idx)
632{
633 __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
634}
635
Shakeel Buttc47d5032020-12-14 19:07:14 -0800636static inline void inc_lruvec_page_state(struct page *page,
637 enum node_stat_item idx)
638{
639 mod_lruvec_page_state(page, idx, 1);
640}
641
642static inline void dec_lruvec_page_state(struct page *page,
643 enum node_stat_item idx)
644{
645 mod_lruvec_page_state(page, idx, -1);
646}
647
Matthew Wilcox (Oracle)a53e17e2021-01-18 08:14:00 -0500648static inline void lruvec_stat_mod_folio(struct folio *folio,
649 enum node_stat_item idx, int val)
650{
651 mod_lruvec_page_state(&folio->page, idx, val);
652}
653
654static inline void lruvec_stat_add_folio(struct folio *folio,
655 enum node_stat_item idx)
656{
657 lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio));
658}
659
660static inline void lruvec_stat_sub_folio(struct folio *folio,
661 enum node_stat_item idx)
662{
663 lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio));
664}
Christoph Lameter2244b952006-06-30 01:55:33 -0700665#endif /* _LINUX_VMSTAT_H */