Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 2 | #ifndef PAGE_FLAGS_LAYOUT_H |
| 3 | #define PAGE_FLAGS_LAYOUT_H |
| 4 | |
| 5 | #include <linux/numa.h> |
| 6 | #include <generated/bounds.h> |
| 7 | |
| 8 | /* |
| 9 | * When a memory allocation must conform to specific limitations (such |
| 10 | * as being suitable for DMA) the caller will pass in hints to the |
| 11 | * allocator in the gfp_mask, in the zone modifier bits. These bits |
| 12 | * are used to select a priority ordered list of memory zones which |
| 13 | * match the requested limits. See gfp_zone() in include/linux/gfp.h |
| 14 | */ |
| 15 | #if MAX_NR_ZONES < 2 |
| 16 | #define ZONES_SHIFT 0 |
| 17 | #elif MAX_NR_ZONES <= 2 |
| 18 | #define ZONES_SHIFT 1 |
| 19 | #elif MAX_NR_ZONES <= 4 |
| 20 | #define ZONES_SHIFT 2 |
Dan Williams | b11a7b9 | 2016-03-17 14:19:41 -0700 | [diff] [blame] | 21 | #elif MAX_NR_ZONES <= 8 |
| 22 | #define ZONES_SHIFT 3 |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 23 | #else |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 24 | #error ZONES_SHIFT "Too many zones configured" |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 25 | #endif |
| 26 | |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 27 | #define ZONES_WIDTH ZONES_SHIFT |
| 28 | |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 29 | #ifdef CONFIG_SPARSEMEM |
| 30 | #include <asm/sparsemem.h> |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 31 | #define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS) |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 32 | #else |
| 33 | #define SECTIONS_SHIFT 0 |
| 34 | #endif |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 35 | |
Arnd Bergmann | ee38d94 | 2019-08-02 21:49:02 -0700 | [diff] [blame] | 36 | #ifndef BUILD_VDSO32_64 |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 37 | /* |
| 38 | * page->flags layout: |
| 39 | * |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 40 | * There are five possibilities for how page->flags get laid out. The first |
| 41 | * pair is for the normal case without sparsemem. The second pair is for |
| 42 | * sparsemem when there is plenty of space for node and section information. |
| 43 | * The last is when there is insufficient space in page->flags and a separate |
| 44 | * lookup is necessary. |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 45 | * |
Mel Gorman | b795854 | 2013-10-07 11:29:07 +0100 | [diff] [blame] | 46 | * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 47 | * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS | |
Mel Gorman | b795854 | 2013-10-07 11:29:07 +0100 | [diff] [blame] | 48 | * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 49 | * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS | |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 50 | * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS | |
| 51 | */ |
| 52 | #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) |
| 53 | #define SECTIONS_WIDTH SECTIONS_SHIFT |
| 54 | #else |
| 55 | #define SECTIONS_WIDTH 0 |
| 56 | #endif |
| 57 | |
Yu Zhao | ec1c86b2 | 2022-09-18 02:00:02 -0600 | [diff] [blame] | 58 | #if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \ |
| 59 | <= BITS_PER_LONG - NR_PAGEFLAGS |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 60 | #define NODES_WIDTH NODES_SHIFT |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 61 | #elif defined(CONFIG_SPARSEMEM_VMEMMAP) |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 62 | #error "Vmemmap: No space for nodes field in page flags" |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 63 | #else |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 64 | #define NODES_WIDTH 0 |
| 65 | #endif |
| 66 | |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 67 | /* |
| 68 | * Note that this #define MUST have a value so that it can be tested with |
| 69 | * the IS_ENABLED() macro. |
| 70 | */ |
| 71 | #if NODES_SHIFT != 0 && NODES_WIDTH == 0 |
| 72 | #define NODE_NOT_IN_PAGE_FLAGS 1 |
| 73 | #endif |
| 74 | |
| 75 | #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) |
| 76 | #define KASAN_TAG_WIDTH 8 |
| 77 | #else |
| 78 | #define KASAN_TAG_WIDTH 0 |
| 79 | #endif |
| 80 | |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 81 | #ifdef CONFIG_NUMA_BALANCING |
Mel Gorman | b795854 | 2013-10-07 11:29:07 +0100 | [diff] [blame] | 82 | #define LAST__PID_SHIFT 8 |
| 83 | #define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1) |
| 84 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 85 | #define LAST__CPU_SHIFT NR_CPUS_BITS |
| 86 | #define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1) |
Mel Gorman | b795854 | 2013-10-07 11:29:07 +0100 | [diff] [blame] | 87 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 88 | #define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT) |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 89 | #else |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 90 | #define LAST_CPUPID_SHIFT 0 |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 91 | #endif |
| 92 | |
Yu Zhao | ec1c86b2 | 2022-09-18 02:00:02 -0600 | [diff] [blame] | 93 | #if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \ |
| 94 | KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 95 | #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 96 | #else |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 97 | #define LAST_CPUPID_WIDTH 0 |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 98 | #endif |
| 99 | |
Yu Zhao | 1587db6 | 2021-04-29 23:01:07 -0700 | [diff] [blame] | 100 | #if LAST_CPUPID_SHIFT != 0 && LAST_CPUPID_WIDTH == 0 |
| 101 | #define LAST_CPUPID_NOT_IN_PAGE_FLAGS |
| 102 | #endif |
| 103 | |
Yu Zhao | ec1c86b2 | 2022-09-18 02:00:02 -0600 | [diff] [blame] | 104 | #if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \ |
| 105 | KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS |
Arnd Bergmann | ee38d94 | 2019-08-02 21:49:02 -0700 | [diff] [blame] | 106 | #error "Not enough bits in page flags" |
Andrey Konovalov | 2813b9c | 2018-12-28 00:30:57 -0800 | [diff] [blame] | 107 | #endif |
| 108 | |
Yu Zhao | ac35a49 | 2022-09-18 02:00:03 -0600 | [diff] [blame] | 109 | /* see the comment on MAX_NR_TIERS */ |
| 110 | #define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \ |
| 111 | ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \ |
| 112 | NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH) |
Yu Zhao | ec1c86b2 | 2022-09-18 02:00:02 -0600 | [diff] [blame] | 113 | |
Arnd Bergmann | ee38d94 | 2019-08-02 21:49:02 -0700 | [diff] [blame] | 114 | #endif |
Peter Zijlstra | bbeae5b | 2013-02-22 16:34:30 -0800 | [diff] [blame] | 115 | #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ |