Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_VMSTAT_H |
| 3 | #define _LINUX_VMSTAT_H |
| 4 | |
| 5 | #include <linux/types.h> |
| 6 | #include <linux/percpu.h> |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 7 | #include <linux/mmzone.h> |
Andrew Morton | f042e70 | 2011-05-26 16:25:24 -0700 | [diff] [blame] | 8 | #include <linux/vm_event_item.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 9 | #include <linux/atomic.h> |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 10 | #include <linux/static_key.h> |
Roman Gushchin | ea426c2 | 2020-08-06 23:20:35 -0700 | [diff] [blame] | 11 | #include <linux/mmdebug.h> |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 12 | |
Adrian Bunk | c748e13 | 2008-07-23 21:27:03 -0700 | [diff] [blame] | 13 | extern int sysctl_stat_interval; |
| 14 | |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 15 | #ifdef CONFIG_NUMA |
| 16 | #define ENABLE_NUMA_STAT 1 |
| 17 | #define DISABLE_NUMA_STAT 0 |
| 18 | extern int sysctl_vm_numa_stat; |
| 19 | DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 20 | int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, |
| 21 | void *buffer, size_t *length, loff_t *ppos); |
Kemi Wang | 4518085 | 2017-11-15 17:38:22 -0800 | [diff] [blame] | 22 | #endif |
| 23 | |
Steven Rostedt | d51d1e6 | 2018-04-10 16:28:07 -0700 | [diff] [blame] | 24 | struct reclaim_stat { |
| 25 | unsigned nr_dirty; |
| 26 | unsigned nr_unqueued_dirty; |
| 27 | unsigned nr_congested; |
| 28 | unsigned nr_writeback; |
| 29 | unsigned nr_immediate; |
Johannes Weiner | 96f8bf4 | 2020-06-03 16:03:09 -0700 | [diff] [blame] | 30 | unsigned nr_pageout; |
Yu Zhao | ed01737 | 2020-10-15 20:09:55 -0700 | [diff] [blame] | 31 | unsigned nr_activate[ANON_AND_FILE]; |
Steven Rostedt | d51d1e6 | 2018-04-10 16:28:07 -0700 | [diff] [blame] | 32 | unsigned nr_ref_keep; |
| 33 | unsigned nr_unmap_fail; |
Jaewon Kim | 1f318a9b | 2020-06-03 16:01:15 -0700 | [diff] [blame] | 34 | unsigned nr_lazyfree_fail; |
Steven Rostedt | d51d1e6 | 2018-04-10 16:28:07 -0700 | [diff] [blame] | 35 | }; |
| 36 | |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 37 | enum writeback_stat_item { |
| 38 | NR_DIRTY_THRESHOLD, |
| 39 | NR_DIRTY_BG_THRESHOLD, |
| 40 | NR_VM_WRITEBACK_STAT_ITEMS, |
| 41 | }; |
| 42 | |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 43 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 44 | /* |
| 45 | * Light weight per cpu counter implementation. |
| 46 | * |
| 47 | * Counters should only be incremented and no critical kernel component |
| 48 | * should rely on the counter values. |
| 49 | * |
| 50 | * Counters are handled completely inline. On many platforms the code |
| 51 | * generated will simply be the increment of a global address. |
| 52 | */ |
| 53 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 54 | struct vm_event_state { |
| 55 | unsigned long event[NR_VM_EVENT_ITEMS]; |
| 56 | }; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 57 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 58 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 59 | |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 60 | /* |
| 61 | * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the |
| 62 | * local_irq_disable overhead. |
| 63 | */ |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 64 | static inline void __count_vm_event(enum vm_event_item item) |
| 65 | { |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 66 | raw_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 67 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 68 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 69 | static inline void count_vm_event(enum vm_event_item item) |
| 70 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 71 | this_cpu_inc(vm_event_states.event[item]); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 72 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 73 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 74 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 75 | { |
Christoph Lameter | 293b6a4 | 2014-04-07 15:39:43 -0700 | [diff] [blame] | 76 | raw_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 77 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 78 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 79 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 80 | { |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 81 | this_cpu_add(vm_event_states.event[item], delta); |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 82 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 83 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 84 | extern void all_vm_events(unsigned long *); |
Yijing Wang | f1cb087 | 2013-04-29 15:08:14 -0700 | [diff] [blame] | 85 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 86 | extern void vm_events_fold_cpu(int cpu); |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 87 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 88 | #else |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 89 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 90 | /* Disable counters */ |
Andrew Morton | 780a065 | 2007-02-10 01:44:41 -0800 | [diff] [blame] | 91 | static inline void count_vm_event(enum vm_event_item item) |
| 92 | { |
| 93 | } |
| 94 | static inline void count_vm_events(enum vm_event_item item, long delta) |
| 95 | { |
| 96 | } |
| 97 | static inline void __count_vm_event(enum vm_event_item item) |
| 98 | { |
| 99 | } |
| 100 | static inline void __count_vm_events(enum vm_event_item item, long delta) |
| 101 | { |
| 102 | } |
| 103 | static inline void all_vm_events(unsigned long *ret) |
| 104 | { |
| 105 | } |
| 106 | static inline void vm_events_fold_cpu(int cpu) |
| 107 | { |
| 108 | } |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 109 | |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 110 | #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| 111 | |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 112 | #ifdef CONFIG_NUMA_BALANCING |
| 113 | #define count_vm_numa_event(x) count_vm_event(x) |
| 114 | #define count_vm_numa_events(x, y) count_vm_events(x, y) |
| 115 | #else |
| 116 | #define count_vm_numa_event(x) do {} while (0) |
Mel Gorman | 3c0ff46 | 2013-02-22 16:34:29 -0800 | [diff] [blame] | 117 | #define count_vm_numa_events(x, y) do { (void)(y); } while (0) |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 118 | #endif /* CONFIG_NUMA_BALANCING */ |
| 119 | |
Mel Gorman | ec65993 | 2014-01-21 14:33:16 -0800 | [diff] [blame] | 120 | #ifdef CONFIG_DEBUG_TLBFLUSH |
| 121 | #define count_vm_tlb_event(x) count_vm_event(x) |
| 122 | #define count_vm_tlb_events(x, y) count_vm_events(x, y) |
| 123 | #else |
| 124 | #define count_vm_tlb_event(x) do {} while (0) |
| 125 | #define count_vm_tlb_events(x, y) do { (void)(y); } while (0) |
| 126 | #endif |
| 127 | |
Mel Gorman | 16709d1 | 2016-07-28 15:46:56 -0700 | [diff] [blame] | 128 | #define __count_zid_vm_events(item, zid, delta) \ |
| 129 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta) |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 130 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 131 | /* |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 132 | * Zone and node-based page accounting with per cpu differentials. |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 133 | */ |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 134 | extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS]; |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 135 | extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS]; |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 136 | extern atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; |
Christoph Lameter | f6ac235 | 2006-06-30 01:55:32 -0700 | [diff] [blame] | 137 | |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 138 | #ifdef CONFIG_NUMA |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 139 | static inline void zone_numa_event_add(long x, struct zone *zone, |
| 140 | enum numa_stat_item item) |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 141 | { |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 142 | atomic_long_add(x, &zone->vm_numa_event[item]); |
| 143 | atomic_long_add(x, &vm_numa_event[item]); |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 144 | } |
| 145 | |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 146 | static inline unsigned long zone_numa_event_state(struct zone *zone, |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 147 | enum numa_stat_item item) |
| 148 | { |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 149 | return atomic_long_read(&zone->vm_numa_event[item]); |
| 150 | } |
Kemi Wang | 6380322 | 2017-09-08 16:12:55 -0700 | [diff] [blame] | 151 | |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 152 | static inline unsigned long |
| 153 | global_numa_event_state(enum numa_stat_item item) |
| 154 | { |
| 155 | return atomic_long_read(&vm_numa_event[item]); |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 156 | } |
| 157 | #endif /* CONFIG_NUMA */ |
| 158 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 159 | static inline void zone_page_state_add(long x, struct zone *zone, |
| 160 | enum zone_stat_item item) |
| 161 | { |
| 162 | atomic_long_add(x, &zone->vm_stat[item]); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 163 | atomic_long_add(x, &vm_zone_stat[item]); |
| 164 | } |
| 165 | |
| 166 | static inline void node_page_state_add(long x, struct pglist_data *pgdat, |
| 167 | enum node_stat_item item) |
| 168 | { |
| 169 | atomic_long_add(x, &pgdat->vm_stat[item]); |
| 170 | atomic_long_add(x, &vm_node_stat[item]); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 173 | static inline unsigned long global_zone_page_state(enum zone_stat_item item) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 174 | { |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 175 | long x = atomic_long_read(&vm_zone_stat[item]); |
| 176 | #ifdef CONFIG_SMP |
| 177 | if (x < 0) |
| 178 | x = 0; |
| 179 | #endif |
| 180 | return x; |
| 181 | } |
| 182 | |
Roman Gushchin | ea426c2 | 2020-08-06 23:20:35 -0700 | [diff] [blame] | 183 | static inline |
| 184 | unsigned long global_node_page_state_pages(enum node_stat_item item) |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 185 | { |
| 186 | long x = atomic_long_read(&vm_node_stat[item]); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 187 | #ifdef CONFIG_SMP |
| 188 | if (x < 0) |
| 189 | x = 0; |
| 190 | #endif |
| 191 | return x; |
| 192 | } |
| 193 | |
Roman Gushchin | ea426c2 | 2020-08-06 23:20:35 -0700 | [diff] [blame] | 194 | static inline unsigned long global_node_page_state(enum node_stat_item item) |
| 195 | { |
| 196 | VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); |
| 197 | |
| 198 | return global_node_page_state_pages(item); |
| 199 | } |
| 200 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 201 | static inline unsigned long zone_page_state(struct zone *zone, |
| 202 | enum zone_stat_item item) |
| 203 | { |
| 204 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 205 | #ifdef CONFIG_SMP |
| 206 | if (x < 0) |
| 207 | x = 0; |
| 208 | #endif |
| 209 | return x; |
| 210 | } |
| 211 | |
Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 212 | /* |
| 213 | * More accurate version that also considers the currently pending |
| 214 | * deltas. For that we need to loop over all cpus to find the current |
| 215 | * deltas. There is no synchronization so the result cannot be |
| 216 | * exactly accurate either. |
| 217 | */ |
| 218 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, |
| 219 | enum zone_stat_item item) |
| 220 | { |
| 221 | long x = atomic_long_read(&zone->vm_stat[item]); |
| 222 | |
| 223 | #ifdef CONFIG_SMP |
| 224 | int cpu; |
| 225 | for_each_online_cpu(cpu) |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 226 | x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item]; |
Christoph Lameter | aa45484 | 2010-09-09 16:38:17 -0700 | [diff] [blame] | 227 | |
| 228 | if (x < 0) |
| 229 | x = 0; |
| 230 | #endif |
| 231 | return x; |
| 232 | } |
| 233 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 234 | #ifdef CONFIG_NUMA |
Mel Gorman | 3ac44a3 | 2021-06-28 19:41:47 -0700 | [diff] [blame] | 235 | /* See __count_vm_event comment on why raw_cpu_inc is used. */ |
| 236 | static inline void |
| 237 | __count_numa_event(struct zone *zone, enum numa_stat_item item) |
| 238 | { |
| 239 | struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; |
| 240 | |
| 241 | raw_cpu_inc(pzstats->vm_numa_event[item]); |
| 242 | } |
| 243 | |
Mel Gorman | 3e23060 | 2021-06-28 19:41:50 -0700 | [diff] [blame] | 244 | static inline void |
| 245 | __count_numa_events(struct zone *zone, enum numa_stat_item item, long delta) |
| 246 | { |
| 247 | struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats; |
| 248 | |
| 249 | raw_cpu_add(pzstats->vm_numa_event[item], delta); |
| 250 | } |
| 251 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 252 | extern unsigned long sum_zone_node_page_state(int node, |
Kemi Wang | 3a321d2 | 2017-09-08 16:12:48 -0700 | [diff] [blame] | 253 | enum zone_stat_item item); |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 254 | extern unsigned long sum_zone_numa_event_state(int node, enum numa_stat_item item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 255 | extern unsigned long node_page_state(struct pglist_data *pgdat, |
| 256 | enum node_stat_item item); |
Roman Gushchin | ea426c2 | 2020-08-06 23:20:35 -0700 | [diff] [blame] | 257 | extern unsigned long node_page_state_pages(struct pglist_data *pgdat, |
| 258 | enum node_stat_item item); |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 259 | extern void fold_vm_numa_events(void); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 260 | #else |
Michal Hocko | c41f012 | 2017-09-06 16:23:36 -0700 | [diff] [blame] | 261 | #define sum_zone_node_page_state(node, item) global_zone_page_state(item) |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 262 | #define node_page_state(node, item) global_node_page_state(item) |
Roman Gushchin | ea426c2 | 2020-08-06 23:20:35 -0700 | [diff] [blame] | 263 | #define node_page_state_pages(node, item) global_node_page_state_pages(item) |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 264 | static inline void fold_vm_numa_events(void) |
| 265 | { |
| 266 | } |
Christoph Lameter | ca889e6 | 2006-06-30 01:55:44 -0700 | [diff] [blame] | 267 | #endif /* CONFIG_NUMA */ |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 268 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 269 | #ifdef CONFIG_SMP |
Heiko Carstens | 6cdb18a | 2015-12-29 14:54:32 -0800 | [diff] [blame] | 270 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 271 | void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| 272 | void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| 273 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 274 | void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long); |
| 275 | void __inc_node_page_state(struct page *, enum node_stat_item); |
| 276 | void __dec_node_page_state(struct page *, enum node_stat_item); |
| 277 | |
Heiko Carstens | 6cdb18a | 2015-12-29 14:54:32 -0800 | [diff] [blame] | 278 | void mod_zone_page_state(struct zone *, enum zone_stat_item, long); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 279 | void inc_zone_page_state(struct page *, enum zone_stat_item); |
| 280 | void dec_zone_page_state(struct page *, enum zone_stat_item); |
| 281 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 282 | void mod_node_page_state(struct pglist_data *, enum node_stat_item, long); |
| 283 | void inc_node_page_state(struct page *, enum node_stat_item); |
| 284 | void dec_node_page_state(struct page *, enum node_stat_item); |
| 285 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 286 | extern void inc_node_state(struct pglist_data *, enum node_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 287 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 288 | extern void __inc_node_state(struct pglist_data *, enum node_stat_item); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 289 | extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| 290 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 291 | extern void __dec_node_state(struct pglist_data *, enum node_stat_item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 292 | |
Christoph Lameter | 0eb77e9 | 2016-01-14 15:21:40 -0800 | [diff] [blame] | 293 | void quiet_vmstat(void); |
Christoph Lameter | 2bb921e | 2013-09-11 14:21:30 -0700 | [diff] [blame] | 294 | void cpu_vm_stats_fold(int cpu); |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 295 | void refresh_zone_stat_thresholds(void); |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 296 | |
Hugh Dickins | 52b6f46 | 2016-05-19 17:12:50 -0700 | [diff] [blame] | 297 | struct ctl_table; |
Christoph Hellwig | 3292739 | 2020-04-24 08:43:38 +0200 | [diff] [blame] | 298 | int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, |
| 299 | loff_t *ppos); |
Hugh Dickins | 52b6f46 | 2016-05-19 17:12:50 -0700 | [diff] [blame] | 300 | |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 301 | void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *); |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 302 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 303 | int calculate_pressure_threshold(struct zone *zone); |
| 304 | int calculate_normal_threshold(struct zone *zone); |
| 305 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, |
| 306 | int (*calculate_pressure)(struct zone *)); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 307 | #else /* CONFIG_SMP */ |
| 308 | |
| 309 | /* |
| 310 | * We do not maintain differentials in a single processor configuration. |
| 311 | * The functions directly modify the zone and global counters. |
| 312 | */ |
| 313 | static inline void __mod_zone_page_state(struct zone *zone, |
Heiko Carstens | 6cdb18a | 2015-12-29 14:54:32 -0800 | [diff] [blame] | 314 | enum zone_stat_item item, long delta) |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 315 | { |
| 316 | zone_page_state_add(delta, zone, item); |
| 317 | } |
| 318 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 319 | static inline void __mod_node_page_state(struct pglist_data *pgdat, |
| 320 | enum node_stat_item item, int delta) |
| 321 | { |
Roman Gushchin | be45831 | 2020-10-01 13:07:49 -0700 | [diff] [blame] | 322 | if (vmstat_item_in_bytes(item)) { |
Johannes Weiner | 629484a | 2021-02-25 17:16:51 -0800 | [diff] [blame] | 323 | /* |
| 324 | * Only cgroups use subpage accounting right now; at |
| 325 | * the global level, these items still change in |
| 326 | * multiples of whole pages. Store them as pages |
| 327 | * internally to keep the per-cpu counters compact. |
| 328 | */ |
Roman Gushchin | be45831 | 2020-10-01 13:07:49 -0700 | [diff] [blame] | 329 | VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); |
| 330 | delta >>= PAGE_SHIFT; |
| 331 | } |
| 332 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 333 | node_page_state_add(delta, pgdat, item); |
| 334 | } |
| 335 | |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 336 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| 337 | { |
| 338 | atomic_long_inc(&zone->vm_stat[item]); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 339 | atomic_long_inc(&vm_zone_stat[item]); |
| 340 | } |
| 341 | |
| 342 | static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| 343 | { |
| 344 | atomic_long_inc(&pgdat->vm_stat[item]); |
| 345 | atomic_long_inc(&vm_node_stat[item]); |
Christoph Lameter | 7f4599e | 2006-07-10 04:44:30 -0700 | [diff] [blame] | 346 | } |
| 347 | |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 348 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| 349 | { |
| 350 | atomic_long_dec(&zone->vm_stat[item]); |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 351 | atomic_long_dec(&vm_zone_stat[item]); |
| 352 | } |
| 353 | |
| 354 | static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) |
| 355 | { |
| 356 | atomic_long_dec(&pgdat->vm_stat[item]); |
| 357 | atomic_long_dec(&vm_node_stat[item]); |
Christoph Lameter | c878538 | 2007-02-10 01:43:01 -0800 | [diff] [blame] | 358 | } |
| 359 | |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 360 | static inline void __inc_zone_page_state(struct page *page, |
| 361 | enum zone_stat_item item) |
| 362 | { |
| 363 | __inc_zone_state(page_zone(page), item); |
| 364 | } |
| 365 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 366 | static inline void __inc_node_page_state(struct page *page, |
| 367 | enum node_stat_item item) |
| 368 | { |
| 369 | __inc_node_state(page_pgdat(page), item); |
| 370 | } |
| 371 | |
| 372 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 373 | static inline void __dec_zone_page_state(struct page *page, |
| 374 | enum zone_stat_item item) |
| 375 | { |
Uwe Kleine-König | 57ce36f | 2008-02-25 16:45:03 +0100 | [diff] [blame] | 376 | __dec_zone_state(page_zone(page), item); |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 377 | } |
| 378 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 379 | static inline void __dec_node_page_state(struct page *page, |
| 380 | enum node_stat_item item) |
| 381 | { |
| 382 | __dec_node_state(page_pgdat(page), item); |
| 383 | } |
| 384 | |
| 385 | |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 386 | /* |
| 387 | * We only use atomic operations to update counters. So there is no need to |
| 388 | * disable interrupts. |
| 389 | */ |
| 390 | #define inc_zone_page_state __inc_zone_page_state |
| 391 | #define dec_zone_page_state __dec_zone_page_state |
| 392 | #define mod_zone_page_state __mod_zone_page_state |
| 393 | |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 394 | #define inc_node_page_state __inc_node_page_state |
| 395 | #define dec_node_page_state __dec_node_page_state |
| 396 | #define mod_node_page_state __mod_node_page_state |
| 397 | |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 398 | #define inc_zone_state __inc_zone_state |
Mel Gorman | 75ef718 | 2016-07-28 15:45:24 -0700 | [diff] [blame] | 399 | #define inc_node_state __inc_node_state |
Johannes Weiner | 6a3ed21 | 2014-04-03 14:47:34 -0700 | [diff] [blame] | 400 | #define dec_zone_state __dec_zone_state |
| 401 | |
Mel Gorman | b44129b | 2011-01-13 15:45:43 -0800 | [diff] [blame] | 402 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
Mel Gorman | 88f5acf | 2011-01-13 15:45:41 -0800 | [diff] [blame] | 403 | |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 404 | static inline void refresh_zone_stat_thresholds(void) { } |
Christoph Lameter | 2bb921e | 2013-09-11 14:21:30 -0700 | [diff] [blame] | 405 | static inline void cpu_vm_stats_fold(int cpu) { } |
Christoph Lameter | 0eb77e9 | 2016-01-14 15:21:40 -0800 | [diff] [blame] | 406 | static inline void quiet_vmstat(void) { } |
KOSAKI Motohiro | a6cccdc | 2011-05-24 17:11:33 -0700 | [diff] [blame] | 407 | |
Minchan Kim | 5a88381 | 2012-10-08 16:33:39 -0700 | [diff] [blame] | 408 | static inline void drain_zonestat(struct zone *zone, |
Mel Gorman | 28f836b | 2021-06-28 19:41:38 -0700 | [diff] [blame] | 409 | struct per_cpu_zonestat *pzstats) { } |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 410 | #endif /* CONFIG_SMP */ |
| 411 | |
Matthew Wilcox (Oracle) | a53e17e | 2021-01-18 08:14:00 -0500 | [diff] [blame] | 412 | static inline void __zone_stat_mod_folio(struct folio *folio, |
| 413 | enum zone_stat_item item, long nr) |
| 414 | { |
| 415 | __mod_zone_page_state(folio_zone(folio), item, nr); |
| 416 | } |
| 417 | |
| 418 | static inline void __zone_stat_add_folio(struct folio *folio, |
| 419 | enum zone_stat_item item) |
| 420 | { |
| 421 | __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); |
| 422 | } |
| 423 | |
| 424 | static inline void __zone_stat_sub_folio(struct folio *folio, |
| 425 | enum zone_stat_item item) |
| 426 | { |
| 427 | __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); |
| 428 | } |
| 429 | |
| 430 | static inline void zone_stat_mod_folio(struct folio *folio, |
| 431 | enum zone_stat_item item, long nr) |
| 432 | { |
| 433 | mod_zone_page_state(folio_zone(folio), item, nr); |
| 434 | } |
| 435 | |
| 436 | static inline void zone_stat_add_folio(struct folio *folio, |
| 437 | enum zone_stat_item item) |
| 438 | { |
| 439 | mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); |
| 440 | } |
| 441 | |
| 442 | static inline void zone_stat_sub_folio(struct folio *folio, |
| 443 | enum zone_stat_item item) |
| 444 | { |
| 445 | mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); |
| 446 | } |
| 447 | |
| 448 | static inline void __node_stat_mod_folio(struct folio *folio, |
| 449 | enum node_stat_item item, long nr) |
| 450 | { |
| 451 | __mod_node_page_state(folio_pgdat(folio), item, nr); |
| 452 | } |
| 453 | |
| 454 | static inline void __node_stat_add_folio(struct folio *folio, |
| 455 | enum node_stat_item item) |
| 456 | { |
| 457 | __mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); |
| 458 | } |
| 459 | |
| 460 | static inline void __node_stat_sub_folio(struct folio *folio, |
| 461 | enum node_stat_item item) |
| 462 | { |
| 463 | __mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); |
| 464 | } |
| 465 | |
| 466 | static inline void node_stat_mod_folio(struct folio *folio, |
| 467 | enum node_stat_item item, long nr) |
| 468 | { |
| 469 | mod_node_page_state(folio_pgdat(folio), item, nr); |
| 470 | } |
| 471 | |
| 472 | static inline void node_stat_add_folio(struct folio *folio, |
| 473 | enum node_stat_item item) |
| 474 | { |
| 475 | mod_node_page_state(folio_pgdat(folio), item, folio_nr_pages(folio)); |
| 476 | } |
| 477 | |
| 478 | static inline void node_stat_sub_folio(struct folio *folio, |
| 479 | enum node_stat_item item) |
| 480 | { |
| 481 | mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); |
| 482 | } |
| 483 | |
Bartlomiej Zolnierkiewicz | d1ce749 | 2012-10-08 16:32:02 -0700 | [diff] [blame] | 484 | static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, |
| 485 | int migratetype) |
| 486 | { |
| 487 | __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); |
| 488 | if (is_migrate_cma(migratetype)) |
| 489 | __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); |
| 490 | } |
| 491 | |
KOSAKI Motohiro | fa25c50 | 2011-05-24 17:11:28 -0700 | [diff] [blame] | 492 | extern const char * const vmstat_text[]; |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 493 | |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 494 | static inline const char *zone_stat_name(enum zone_stat_item item) |
| 495 | { |
| 496 | return vmstat_text[item]; |
| 497 | } |
| 498 | |
| 499 | #ifdef CONFIG_NUMA |
| 500 | static inline const char *numa_stat_name(enum numa_stat_item item) |
| 501 | { |
| 502 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
| 503 | item]; |
| 504 | } |
| 505 | #endif /* CONFIG_NUMA */ |
| 506 | |
| 507 | static inline const char *node_stat_name(enum node_stat_item item) |
| 508 | { |
| 509 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 510 | NR_VM_NUMA_EVENT_ITEMS + |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 511 | item]; |
| 512 | } |
| 513 | |
| 514 | static inline const char *lru_list_name(enum lru_list lru) |
| 515 | { |
| 516 | return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" |
| 517 | } |
| 518 | |
| 519 | static inline const char *writeback_stat_name(enum writeback_stat_item item) |
| 520 | { |
| 521 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 522 | NR_VM_NUMA_EVENT_ITEMS + |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 523 | NR_VM_NODE_STAT_ITEMS + |
| 524 | item]; |
| 525 | } |
| 526 | |
Konstantin Khlebnikov | ebc5d83d | 2019-12-04 16:49:53 -0800 | [diff] [blame] | 527 | #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 528 | static inline const char *vm_event_name(enum vm_event_item item) |
| 529 | { |
| 530 | return vmstat_text[NR_VM_ZONE_STAT_ITEMS + |
Mel Gorman | f19298b | 2021-06-28 19:41:44 -0700 | [diff] [blame] | 531 | NR_VM_NUMA_EVENT_ITEMS + |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 532 | NR_VM_NODE_STAT_ITEMS + |
| 533 | NR_VM_WRITEBACK_STAT_ITEMS + |
| 534 | item]; |
| 535 | } |
Konstantin Khlebnikov | ebc5d83d | 2019-12-04 16:49:53 -0800 | [diff] [blame] | 536 | #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ |
Konstantin Khlebnikov | 9d7ea9a | 2019-12-04 16:49:50 -0800 | [diff] [blame] | 537 | |
Shakeel Butt | c47d503 | 2020-12-14 19:07:14 -0800 | [diff] [blame] | 538 | #ifdef CONFIG_MEMCG |
| 539 | |
| 540 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
| 541 | int val); |
| 542 | |
| 543 | static inline void mod_lruvec_state(struct lruvec *lruvec, |
| 544 | enum node_stat_item idx, int val) |
| 545 | { |
| 546 | unsigned long flags; |
| 547 | |
| 548 | local_irq_save(flags); |
| 549 | __mod_lruvec_state(lruvec, idx, val); |
| 550 | local_irq_restore(flags); |
| 551 | } |
| 552 | |
| 553 | void __mod_lruvec_page_state(struct page *page, |
| 554 | enum node_stat_item idx, int val); |
| 555 | |
| 556 | static inline void mod_lruvec_page_state(struct page *page, |
| 557 | enum node_stat_item idx, int val) |
| 558 | { |
| 559 | unsigned long flags; |
| 560 | |
| 561 | local_irq_save(flags); |
| 562 | __mod_lruvec_page_state(page, idx, val); |
| 563 | local_irq_restore(flags); |
| 564 | } |
| 565 | |
| 566 | #else |
| 567 | |
| 568 | static inline void __mod_lruvec_state(struct lruvec *lruvec, |
| 569 | enum node_stat_item idx, int val) |
| 570 | { |
| 571 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); |
| 572 | } |
| 573 | |
| 574 | static inline void mod_lruvec_state(struct lruvec *lruvec, |
| 575 | enum node_stat_item idx, int val) |
| 576 | { |
| 577 | mod_node_page_state(lruvec_pgdat(lruvec), idx, val); |
| 578 | } |
| 579 | |
| 580 | static inline void __mod_lruvec_page_state(struct page *page, |
| 581 | enum node_stat_item idx, int val) |
| 582 | { |
| 583 | __mod_node_page_state(page_pgdat(page), idx, val); |
| 584 | } |
| 585 | |
| 586 | static inline void mod_lruvec_page_state(struct page *page, |
| 587 | enum node_stat_item idx, int val) |
| 588 | { |
| 589 | mod_node_page_state(page_pgdat(page), idx, val); |
| 590 | } |
| 591 | |
| 592 | #endif /* CONFIG_MEMCG */ |
| 593 | |
Shakeel Butt | c47d503 | 2020-12-14 19:07:14 -0800 | [diff] [blame] | 594 | static inline void __inc_lruvec_page_state(struct page *page, |
| 595 | enum node_stat_item idx) |
| 596 | { |
| 597 | __mod_lruvec_page_state(page, idx, 1); |
| 598 | } |
| 599 | |
| 600 | static inline void __dec_lruvec_page_state(struct page *page, |
| 601 | enum node_stat_item idx) |
| 602 | { |
| 603 | __mod_lruvec_page_state(page, idx, -1); |
| 604 | } |
| 605 | |
Matthew Wilcox (Oracle) | a53e17e | 2021-01-18 08:14:00 -0500 | [diff] [blame] | 606 | static inline void __lruvec_stat_mod_folio(struct folio *folio, |
| 607 | enum node_stat_item idx, int val) |
| 608 | { |
| 609 | __mod_lruvec_page_state(&folio->page, idx, val); |
| 610 | } |
| 611 | |
| 612 | static inline void __lruvec_stat_add_folio(struct folio *folio, |
| 613 | enum node_stat_item idx) |
| 614 | { |
| 615 | __lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); |
| 616 | } |
| 617 | |
| 618 | static inline void __lruvec_stat_sub_folio(struct folio *folio, |
| 619 | enum node_stat_item idx) |
| 620 | { |
| 621 | __lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); |
| 622 | } |
| 623 | |
Shakeel Butt | c47d503 | 2020-12-14 19:07:14 -0800 | [diff] [blame] | 624 | static inline void inc_lruvec_page_state(struct page *page, |
| 625 | enum node_stat_item idx) |
| 626 | { |
| 627 | mod_lruvec_page_state(page, idx, 1); |
| 628 | } |
| 629 | |
| 630 | static inline void dec_lruvec_page_state(struct page *page, |
| 631 | enum node_stat_item idx) |
| 632 | { |
| 633 | mod_lruvec_page_state(page, idx, -1); |
| 634 | } |
| 635 | |
Matthew Wilcox (Oracle) | a53e17e | 2021-01-18 08:14:00 -0500 | [diff] [blame] | 636 | static inline void lruvec_stat_mod_folio(struct folio *folio, |
| 637 | enum node_stat_item idx, int val) |
| 638 | { |
| 639 | mod_lruvec_page_state(&folio->page, idx, val); |
| 640 | } |
| 641 | |
| 642 | static inline void lruvec_stat_add_folio(struct folio *folio, |
| 643 | enum node_stat_item idx) |
| 644 | { |
| 645 | lruvec_stat_mod_folio(folio, idx, folio_nr_pages(folio)); |
| 646 | } |
| 647 | |
| 648 | static inline void lruvec_stat_sub_folio(struct folio *folio, |
| 649 | enum node_stat_item idx) |
| 650 | { |
| 651 | lruvec_stat_mod_folio(folio, idx, -folio_nr_pages(folio)); |
| 652 | } |
Christoph Lameter | 2244b95 | 2006-06-30 01:55:33 -0700 | [diff] [blame] | 653 | #endif /* _LINUX_VMSTAT_H */ |