blob: 506d625163a11c5a934ab4a83e1a27c4a28609ea [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Christoph Lameterf6ac2352006-06-30 01:55:32 -07002#ifndef _LINUX_VMSTAT_H
3#define _LINUX_VMSTAT_H
4
5#include <linux/types.h>
6#include <linux/percpu.h>
Christoph Lameter2244b952006-06-30 01:55:33 -07007#include <linux/mmzone.h>
Andrew Mortonf042e702011-05-26 16:25:24 -07008#include <linux/vm_event_item.h>
Arun Sharma600634972011-07-26 16:09:06 -07009#include <linux/atomic.h>
Kemi Wang45180852017-11-15 17:38:22 -080010#include <linux/static_key.h>
Roman Gushchinea426c22020-08-06 23:20:35 -070011#include <linux/mmdebug.h>
Christoph Lameterf6ac2352006-06-30 01:55:32 -070012
Adrian Bunkc748e132008-07-23 21:27:03 -070013extern int sysctl_stat_interval;
14
Kemi Wang45180852017-11-15 17:38:22 -080015#ifdef CONFIG_NUMA
16#define ENABLE_NUMA_STAT 1
17#define DISABLE_NUMA_STAT 0
18extern int sysctl_vm_numa_stat;
19DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
Christoph Hellwig32927392020-04-24 08:43:38 +020020int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21 void *buffer, size_t *length, loff_t *ppos);
Kemi Wang45180852017-11-15 17:38:22 -080022#endif
23
Steven Rostedtd51d1e62018-04-10 16:28:07 -070024struct reclaim_stat {
25 unsigned nr_dirty;
26 unsigned nr_unqueued_dirty;
27 unsigned nr_congested;
28 unsigned nr_writeback;
29 unsigned nr_immediate;
Johannes Weiner96f8bf42020-06-03 16:03:09 -070030 unsigned nr_pageout;
Yu Zhaoed017372020-10-15 20:09:55 -070031 unsigned nr_activate[ANON_AND_FILE];
Steven Rostedtd51d1e62018-04-10 16:28:07 -070032 unsigned nr_ref_keep;
33 unsigned nr_unmap_fail;
Jaewon Kim1f318a9b2020-06-03 16:01:15 -070034 unsigned nr_lazyfree_fail;
Steven Rostedtd51d1e62018-04-10 16:28:07 -070035};
36
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -080037enum writeback_stat_item {
38 NR_DIRTY_THRESHOLD,
39 NR_DIRTY_BG_THRESHOLD,
40 NR_VM_WRITEBACK_STAT_ITEMS,
41};
42
Andrew Morton780a0652007-02-10 01:44:41 -080043#ifdef CONFIG_VM_EVENT_COUNTERS
44/*
45 * Light weight per cpu counter implementation.
46 *
47 * Counters should only be incremented and no critical kernel component
48 * should rely on the counter values.
49 *
50 * Counters are handled completely inline. On many platforms the code
51 * generated will simply be the increment of a global address.
52 */
53
Christoph Lameterf8891e52006-06-30 01:55:45 -070054struct vm_event_state {
55 unsigned long event[NR_VM_EVENT_ITEMS];
56};
Christoph Lameterf6ac2352006-06-30 01:55:32 -070057
Christoph Lameterf8891e52006-06-30 01:55:45 -070058DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070059
Christoph Lameter293b6a42014-04-07 15:39:43 -070060/*
61 * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62 * local_irq_disable overhead.
63 */
Christoph Lameterf8891e52006-06-30 01:55:45 -070064static inline void __count_vm_event(enum vm_event_item item)
65{
Christoph Lameter293b6a42014-04-07 15:39:43 -070066 raw_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070067}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070068
Christoph Lameterf8891e52006-06-30 01:55:45 -070069static inline void count_vm_event(enum vm_event_item item)
70{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090071 this_cpu_inc(vm_event_states.event[item]);
Christoph Lameterf8891e52006-06-30 01:55:45 -070072}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070073
Christoph Lameterf8891e52006-06-30 01:55:45 -070074static inline void __count_vm_events(enum vm_event_item item, long delta)
75{
Christoph Lameter293b6a42014-04-07 15:39:43 -070076 raw_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070077}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070078
Christoph Lameterf8891e52006-06-30 01:55:45 -070079static inline void count_vm_events(enum vm_event_item item, long delta)
80{
Rusty Russelldd17c8f2009-10-29 22:34:15 +090081 this_cpu_add(vm_event_states.event[item], delta);
Christoph Lameterf8891e52006-06-30 01:55:45 -070082}
Christoph Lameterf6ac2352006-06-30 01:55:32 -070083
Christoph Lameterf8891e52006-06-30 01:55:45 -070084extern void all_vm_events(unsigned long *);
Yijing Wangf1cb0872013-04-29 15:08:14 -070085
Christoph Lameterf8891e52006-06-30 01:55:45 -070086extern void vm_events_fold_cpu(int cpu);
Christoph Lameterf6ac2352006-06-30 01:55:32 -070087
Christoph Lameterf8891e52006-06-30 01:55:45 -070088#else
Christoph Lameterf6ac2352006-06-30 01:55:32 -070089
Christoph Lameterf8891e52006-06-30 01:55:45 -070090/* Disable counters */
Andrew Morton780a0652007-02-10 01:44:41 -080091static inline void count_vm_event(enum vm_event_item item)
92{
93}
94static inline void count_vm_events(enum vm_event_item item, long delta)
95{
96}
97static inline void __count_vm_event(enum vm_event_item item)
98{
99}
100static inline void __count_vm_events(enum vm_event_item item, long delta)
101{
102}
103static inline void all_vm_events(unsigned long *ret)
104{
105}
106static inline void vm_events_fold_cpu(int cpu)
107{
108}
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700109
Christoph Lameterf8891e52006-06-30 01:55:45 -0700110#endif /* CONFIG_VM_EVENT_COUNTERS */
111
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000112#ifdef CONFIG_NUMA_BALANCING
113#define count_vm_numa_event(x) count_vm_event(x)
114#define count_vm_numa_events(x, y) count_vm_events(x, y)
115#else
116#define count_vm_numa_event(x) do {} while (0)
Mel Gorman3c0ff462013-02-22 16:34:29 -0800117#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
Mel Gorman03c5a6e2012-11-02 14:52:48 +0000118#endif /* CONFIG_NUMA_BALANCING */
119
Mel Gormanec659932014-01-21 14:33:16 -0800120#ifdef CONFIG_DEBUG_TLBFLUSH
121#define count_vm_tlb_event(x) count_vm_event(x)
122#define count_vm_tlb_events(x, y) count_vm_events(x, y)
123#else
124#define count_vm_tlb_event(x) do {} while (0)
125#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126#endif
127
Davidlohr Bueso4f115142014-06-04 16:06:46 -0700128#ifdef CONFIG_DEBUG_VM_VMACACHE
129#define count_vm_vmacache_event(x) count_vm_event(x)
130#else
131#define count_vm_vmacache_event(x) do {} while (0)
132#endif
133
Mel Gorman16709d12016-07-28 15:46:56 -0700134#define __count_zid_vm_events(item, zid, delta) \
135 __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700136
Christoph Lameter2244b952006-06-30 01:55:33 -0700137/*
Mel Gorman75ef7182016-07-28 15:45:24 -0700138 * Zone and node-based page accounting with per cpu differentials.
Christoph Lameter2244b952006-06-30 01:55:33 -0700139 */
Mel Gorman75ef7182016-07-28 15:45:24 -0700140extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
Kemi Wang3a321d22017-09-08 16:12:48 -0700141extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
Mel Gorman75ef7182016-07-28 15:45:24 -0700142extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
Christoph Lameterf6ac2352006-06-30 01:55:32 -0700143
Kemi Wang3a321d22017-09-08 16:12:48 -0700144#ifdef CONFIG_NUMA
145static inline void zone_numa_state_add(long x, struct zone *zone,
146 enum numa_stat_item item)
147{
148 atomic_long_add(x, &zone->vm_numa_stat[item]);
149 atomic_long_add(x, &vm_numa_stat[item]);
150}
151
152static inline unsigned long global_numa_state(enum numa_stat_item item)
153{
154 long x = atomic_long_read(&vm_numa_stat[item]);
155
156 return x;
157}
158
Kemi Wang63803222017-09-08 16:12:55 -0700159static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
Kemi Wang3a321d22017-09-08 16:12:48 -0700160 enum numa_stat_item item)
161{
162 long x = atomic_long_read(&zone->vm_numa_stat[item]);
Kemi Wang63803222017-09-08 16:12:55 -0700163 int cpu;
164
165 for_each_online_cpu(cpu)
166 x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
Kemi Wang3a321d22017-09-08 16:12:48 -0700167
168 return x;
169}
170#endif /* CONFIG_NUMA */
171
Christoph Lameter2244b952006-06-30 01:55:33 -0700172static inline void zone_page_state_add(long x, struct zone *zone,
173 enum zone_stat_item item)
174{
175 atomic_long_add(x, &zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700176 atomic_long_add(x, &vm_zone_stat[item]);
177}
178
179static inline void node_page_state_add(long x, struct pglist_data *pgdat,
180 enum node_stat_item item)
181{
182 atomic_long_add(x, &pgdat->vm_stat[item]);
183 atomic_long_add(x, &vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700184}
185
Michal Hockoc41f0122017-09-06 16:23:36 -0700186static inline unsigned long global_zone_page_state(enum zone_stat_item item)
Christoph Lameter2244b952006-06-30 01:55:33 -0700187{
Mel Gorman75ef7182016-07-28 15:45:24 -0700188 long x = atomic_long_read(&vm_zone_stat[item]);
189#ifdef CONFIG_SMP
190 if (x < 0)
191 x = 0;
192#endif
193 return x;
194}
195
Roman Gushchinea426c22020-08-06 23:20:35 -0700196static inline
197unsigned long global_node_page_state_pages(enum node_stat_item item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700198{
199 long x = atomic_long_read(&vm_node_stat[item]);
Christoph Lameter2244b952006-06-30 01:55:33 -0700200#ifdef CONFIG_SMP
201 if (x < 0)
202 x = 0;
203#endif
204 return x;
205}
206
Roman Gushchinea426c22020-08-06 23:20:35 -0700207static inline unsigned long global_node_page_state(enum node_stat_item item)
208{
209 VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
210
211 return global_node_page_state_pages(item);
212}
213
Christoph Lameter2244b952006-06-30 01:55:33 -0700214static inline unsigned long zone_page_state(struct zone *zone,
215 enum zone_stat_item item)
216{
217 long x = atomic_long_read(&zone->vm_stat[item]);
218#ifdef CONFIG_SMP
219 if (x < 0)
220 x = 0;
221#endif
222 return x;
223}
224
Christoph Lameteraa454842010-09-09 16:38:17 -0700225/*
226 * More accurate version that also considers the currently pending
227 * deltas. For that we need to loop over all cpus to find the current
228 * deltas. There is no synchronization so the result cannot be
229 * exactly accurate either.
230 */
231static inline unsigned long zone_page_state_snapshot(struct zone *zone,
232 enum zone_stat_item item)
233{
234 long x = atomic_long_read(&zone->vm_stat[item]);
235
236#ifdef CONFIG_SMP
237 int cpu;
238 for_each_online_cpu(cpu)
239 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
240
241 if (x < 0)
242 x = 0;
243#endif
244 return x;
245}
246
Christoph Lameter2244b952006-06-30 01:55:33 -0700247#ifdef CONFIG_NUMA
Kemi Wang3a321d22017-09-08 16:12:48 -0700248extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700249extern unsigned long sum_zone_node_page_state(int node,
Kemi Wang3a321d22017-09-08 16:12:48 -0700250 enum zone_stat_item item);
251extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700252extern unsigned long node_page_state(struct pglist_data *pgdat,
253 enum node_stat_item item);
Roman Gushchinea426c22020-08-06 23:20:35 -0700254extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
255 enum node_stat_item item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700256#else
Michal Hockoc41f0122017-09-06 16:23:36 -0700257#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
Mel Gorman75ef7182016-07-28 15:45:24 -0700258#define node_page_state(node, item) global_node_page_state(item)
Roman Gushchinea426c22020-08-06 23:20:35 -0700259#define node_page_state_pages(node, item) global_node_page_state_pages(item)
Christoph Lameterca889e62006-06-30 01:55:44 -0700260#endif /* CONFIG_NUMA */
Christoph Lameter2244b952006-06-30 01:55:33 -0700261
Christoph Lameter2244b952006-06-30 01:55:33 -0700262#ifdef CONFIG_SMP
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800263void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700264void __inc_zone_page_state(struct page *, enum zone_stat_item);
265void __dec_zone_page_state(struct page *, enum zone_stat_item);
266
Mel Gorman75ef7182016-07-28 15:45:24 -0700267void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
268void __inc_node_page_state(struct page *, enum node_stat_item);
269void __dec_node_page_state(struct page *, enum node_stat_item);
270
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800271void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
Christoph Lameter2244b952006-06-30 01:55:33 -0700272void inc_zone_page_state(struct page *, enum zone_stat_item);
273void dec_zone_page_state(struct page *, enum zone_stat_item);
274
Mel Gorman75ef7182016-07-28 15:45:24 -0700275void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
276void inc_node_page_state(struct page *, enum node_stat_item);
277void dec_node_page_state(struct page *, enum node_stat_item);
278
Mel Gorman75ef7182016-07-28 15:45:24 -0700279extern void inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800280extern void __inc_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700281extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameterc8785382007-02-10 01:43:01 -0800282extern void dec_zone_state(struct zone *, enum zone_stat_item);
283extern void __dec_zone_state(struct zone *, enum zone_stat_item);
Mel Gorman75ef7182016-07-28 15:45:24 -0700284extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700285
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800286void quiet_vmstat(void);
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700287void cpu_vm_stats_fold(int cpu);
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700288void refresh_zone_stat_thresholds(void);
Mel Gormanb44129b2011-01-13 15:45:43 -0800289
Hugh Dickins52b6f462016-05-19 17:12:50 -0700290struct ctl_table;
Christoph Hellwig32927392020-04-24 08:43:38 +0200291int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
292 loff_t *ppos);
Hugh Dickins52b6f462016-05-19 17:12:50 -0700293
Minchan Kim5a883812012-10-08 16:33:39 -0700294void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
295
Mel Gormanb44129b2011-01-13 15:45:43 -0800296int calculate_pressure_threshold(struct zone *zone);
297int calculate_normal_threshold(struct zone *zone);
298void set_pgdat_percpu_threshold(pg_data_t *pgdat,
299 int (*calculate_pressure)(struct zone *));
Christoph Lameter2244b952006-06-30 01:55:33 -0700300#else /* CONFIG_SMP */
301
302/*
303 * We do not maintain differentials in a single processor configuration.
304 * The functions directly modify the zone and global counters.
305 */
306static inline void __mod_zone_page_state(struct zone *zone,
Heiko Carstens6cdb18a2015-12-29 14:54:32 -0800307 enum zone_stat_item item, long delta)
Christoph Lameter2244b952006-06-30 01:55:33 -0700308{
309 zone_page_state_add(delta, zone, item);
310}
311
Mel Gorman75ef7182016-07-28 15:45:24 -0700312static inline void __mod_node_page_state(struct pglist_data *pgdat,
313 enum node_stat_item item, int delta)
314{
Roman Gushchinbe458312020-10-01 13:07:49 -0700315 if (vmstat_item_in_bytes(item)) {
Johannes Weiner629484a2021-02-25 17:16:51 -0800316 /*
317 * Only cgroups use subpage accounting right now; at
318 * the global level, these items still change in
319 * multiples of whole pages. Store them as pages
320 * internally to keep the per-cpu counters compact.
321 */
Roman Gushchinbe458312020-10-01 13:07:49 -0700322 VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
323 delta >>= PAGE_SHIFT;
324 }
325
Mel Gorman75ef7182016-07-28 15:45:24 -0700326 node_page_state_add(delta, pgdat, item);
327}
328
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700329static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
330{
331 atomic_long_inc(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700332 atomic_long_inc(&vm_zone_stat[item]);
333}
334
335static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
336{
337 atomic_long_inc(&pgdat->vm_stat[item]);
338 atomic_long_inc(&vm_node_stat[item]);
Christoph Lameter7f4599e2006-07-10 04:44:30 -0700339}
340
Christoph Lameterc8785382007-02-10 01:43:01 -0800341static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
342{
343 atomic_long_dec(&zone->vm_stat[item]);
Mel Gorman75ef7182016-07-28 15:45:24 -0700344 atomic_long_dec(&vm_zone_stat[item]);
345}
346
347static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
348{
349 atomic_long_dec(&pgdat->vm_stat[item]);
350 atomic_long_dec(&vm_node_stat[item]);
Christoph Lameterc8785382007-02-10 01:43:01 -0800351}
352
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700353static inline void __inc_zone_page_state(struct page *page,
354 enum zone_stat_item item)
355{
356 __inc_zone_state(page_zone(page), item);
357}
358
Mel Gorman75ef7182016-07-28 15:45:24 -0700359static inline void __inc_node_page_state(struct page *page,
360 enum node_stat_item item)
361{
362 __inc_node_state(page_pgdat(page), item);
363}
364
365
Christoph Lameter2244b952006-06-30 01:55:33 -0700366static inline void __dec_zone_page_state(struct page *page,
367 enum zone_stat_item item)
368{
Uwe Kleine-König57ce36f2008-02-25 16:45:03 +0100369 __dec_zone_state(page_zone(page), item);
Christoph Lameter2244b952006-06-30 01:55:33 -0700370}
371
Mel Gorman75ef7182016-07-28 15:45:24 -0700372static inline void __dec_node_page_state(struct page *page,
373 enum node_stat_item item)
374{
375 __dec_node_state(page_pgdat(page), item);
376}
377
378
Christoph Lameter2244b952006-06-30 01:55:33 -0700379/*
380 * We only use atomic operations to update counters. So there is no need to
381 * disable interrupts.
382 */
383#define inc_zone_page_state __inc_zone_page_state
384#define dec_zone_page_state __dec_zone_page_state
385#define mod_zone_page_state __mod_zone_page_state
386
Mel Gorman75ef7182016-07-28 15:45:24 -0700387#define inc_node_page_state __inc_node_page_state
388#define dec_node_page_state __dec_node_page_state
389#define mod_node_page_state __mod_node_page_state
390
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700391#define inc_zone_state __inc_zone_state
Mel Gorman75ef7182016-07-28 15:45:24 -0700392#define inc_node_state __inc_node_state
Johannes Weiner6a3ed212014-04-03 14:47:34 -0700393#define dec_zone_state __dec_zone_state
394
Mel Gormanb44129b2011-01-13 15:45:43 -0800395#define set_pgdat_percpu_threshold(pgdat, callback) { }
Mel Gorman88f5acf2011-01-13 15:45:41 -0800396
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700397static inline void refresh_zone_stat_thresholds(void) { }
Christoph Lameter2bb921e2013-09-11 14:21:30 -0700398static inline void cpu_vm_stats_fold(int cpu) { }
Christoph Lameter0eb77e92016-01-14 15:21:40 -0800399static inline void quiet_vmstat(void) { }
KOSAKI Motohiroa6cccdc2011-05-24 17:11:33 -0700400
Minchan Kim5a883812012-10-08 16:33:39 -0700401static inline void drain_zonestat(struct zone *zone,
402 struct per_cpu_pageset *pset) { }
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700403#endif /* CONFIG_SMP */
404
Bartlomiej Zolnierkiewiczd1ce7492012-10-08 16:32:02 -0700405static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
406 int migratetype)
407{
408 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
409 if (is_migrate_cma(migratetype))
410 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
411}
412
KOSAKI Motohirofa25c502011-05-24 17:11:28 -0700413extern const char * const vmstat_text[];
Christoph Lameter2244b952006-06-30 01:55:33 -0700414
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800415static inline const char *zone_stat_name(enum zone_stat_item item)
416{
417 return vmstat_text[item];
418}
419
420#ifdef CONFIG_NUMA
421static inline const char *numa_stat_name(enum numa_stat_item item)
422{
423 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
424 item];
425}
426#endif /* CONFIG_NUMA */
427
428static inline const char *node_stat_name(enum node_stat_item item)
429{
430 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
431 NR_VM_NUMA_STAT_ITEMS +
432 item];
433}
434
435static inline const char *lru_list_name(enum lru_list lru)
436{
437 return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
438}
439
440static inline const char *writeback_stat_name(enum writeback_stat_item item)
441{
442 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
443 NR_VM_NUMA_STAT_ITEMS +
444 NR_VM_NODE_STAT_ITEMS +
445 item];
446}
447
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -0800448#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800449static inline const char *vm_event_name(enum vm_event_item item)
450{
451 return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
452 NR_VM_NUMA_STAT_ITEMS +
453 NR_VM_NODE_STAT_ITEMS +
454 NR_VM_WRITEBACK_STAT_ITEMS +
455 item];
456}
Konstantin Khlebnikovebc5d83d2019-12-04 16:49:53 -0800457#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
Konstantin Khlebnikov9d7ea9a2019-12-04 16:49:50 -0800458
Shakeel Buttc47d5032020-12-14 19:07:14 -0800459#ifdef CONFIG_MEMCG
460
461void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
462 int val);
463
464static inline void mod_lruvec_state(struct lruvec *lruvec,
465 enum node_stat_item idx, int val)
466{
467 unsigned long flags;
468
469 local_irq_save(flags);
470 __mod_lruvec_state(lruvec, idx, val);
471 local_irq_restore(flags);
472}
473
474void __mod_lruvec_page_state(struct page *page,
475 enum node_stat_item idx, int val);
476
477static inline void mod_lruvec_page_state(struct page *page,
478 enum node_stat_item idx, int val)
479{
480 unsigned long flags;
481
482 local_irq_save(flags);
483 __mod_lruvec_page_state(page, idx, val);
484 local_irq_restore(flags);
485}
486
487#else
488
489static inline void __mod_lruvec_state(struct lruvec *lruvec,
490 enum node_stat_item idx, int val)
491{
492 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
493}
494
495static inline void mod_lruvec_state(struct lruvec *lruvec,
496 enum node_stat_item idx, int val)
497{
498 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
499}
500
501static inline void __mod_lruvec_page_state(struct page *page,
502 enum node_stat_item idx, int val)
503{
504 __mod_node_page_state(page_pgdat(page), idx, val);
505}
506
507static inline void mod_lruvec_page_state(struct page *page,
508 enum node_stat_item idx, int val)
509{
510 mod_node_page_state(page_pgdat(page), idx, val);
511}
512
513#endif /* CONFIG_MEMCG */
514
515static inline void __inc_lruvec_state(struct lruvec *lruvec,
516 enum node_stat_item idx)
517{
518 __mod_lruvec_state(lruvec, idx, 1);
519}
520
521static inline void __dec_lruvec_state(struct lruvec *lruvec,
522 enum node_stat_item idx)
523{
524 __mod_lruvec_state(lruvec, idx, -1);
525}
526
527static inline void __inc_lruvec_page_state(struct page *page,
528 enum node_stat_item idx)
529{
530 __mod_lruvec_page_state(page, idx, 1);
531}
532
533static inline void __dec_lruvec_page_state(struct page *page,
534 enum node_stat_item idx)
535{
536 __mod_lruvec_page_state(page, idx, -1);
537}
538
539static inline void inc_lruvec_state(struct lruvec *lruvec,
540 enum node_stat_item idx)
541{
542 mod_lruvec_state(lruvec, idx, 1);
543}
544
545static inline void dec_lruvec_state(struct lruvec *lruvec,
546 enum node_stat_item idx)
547{
548 mod_lruvec_state(lruvec, idx, -1);
549}
550
551static inline void inc_lruvec_page_state(struct page *page,
552 enum node_stat_item idx)
553{
554 mod_lruvec_page_state(page, idx, 1);
555}
556
557static inline void dec_lruvec_page_state(struct page *page,
558 enum node_stat_item idx)
559{
560 mod_lruvec_page_state(page, idx, -1);
561}
562
Christoph Lameter2244b952006-06-30 01:55:33 -0700563#endif /* _LINUX_VMSTAT_H */