Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_SWAPOPS_H |
| 3 | #define _LINUX_SWAPOPS_H |
| 4 | |
| 5 | #include <linux/radix-tree.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 6 | #include <linux/bug.h> |
Souptick Joarder | 2b74030 | 2018-08-23 17:01:36 -0700 | [diff] [blame] | 7 | #include <linux/mm_types.h> |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 8 | |
Christoph Hellwig | 9b98fa2 | 2019-07-16 16:26:33 -0700 | [diff] [blame] | 9 | #ifdef CONFIG_MMU |
| 10 | |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 11 | #ifdef CONFIG_SWAP |
| 12 | #include <linux/swapfile.h> |
| 13 | #endif /* CONFIG_SWAP */ |
| 14 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | /* |
| 16 | * swapcache pages are stored in the swapper_space radix tree. We want to |
| 17 | * get good packing density in that tree, so the index should be dense in |
| 18 | * the low-order bits. |
| 19 | * |
Miaohe Lin | a930c21 | 2022-05-19 14:08:52 -0700 | [diff] [blame] | 20 | * We arrange the `type' and `offset' fields so that `type' is at the six |
Paolo 'Blaisorblade' Giarrusso | e83a959 | 2005-09-03 15:54:53 -0700 | [diff] [blame] | 21 | * high-order bits of the swp_entry_t and `offset' is right-aligned in the |
Hugh Dickins | 9b15b81 | 2012-06-15 17:55:50 -0700 | [diff] [blame] | 22 | * remaining bits. Although `type' itself needs only five bits, we allow for |
Miaohe Lin | a930c21 | 2022-05-19 14:08:52 -0700 | [diff] [blame] | 23 | * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | * |
| 25 | * swp_entry_t's are *never* stored anywhere in their arch-dependent format. |
| 26 | */ |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 27 | #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT) |
| 28 | #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 30 | /* |
| 31 | * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To |
| 32 | * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries |
| 33 | * can use the extra bits to store other information besides PFN. |
| 34 | */ |
| 35 | #ifdef MAX_PHYSMEM_BITS |
David Hildenbrand | 630dc25 | 2022-12-05 16:08:57 +0100 | [diff] [blame] | 36 | #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 37 | #else /* MAX_PHYSMEM_BITS */ |
David Hildenbrand | 630dc25 | 2022-12-05 16:08:57 +0100 | [diff] [blame] | 38 | #define SWP_PFN_BITS min_t(int, \ |
| 39 | sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \ |
| 40 | SWP_TYPE_SHIFT) |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 41 | #endif /* MAX_PHYSMEM_BITS */ |
David Hildenbrand | 630dc25 | 2022-12-05 16:08:57 +0100 | [diff] [blame] | 42 | #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 43 | |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 44 | /** |
| 45 | * Migration swap entry specific bitfield definitions. Layout: |
| 46 | * |
| 47 | * |----------+--------------------| |
| 48 | * | swp_type | swp_offset | |
| 49 | * |----------+--------+-+-+-------| |
| 50 | * | | resv |D|A| PFN | |
| 51 | * |----------+--------+-+-+-------| |
| 52 | * |
| 53 | * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A) |
| 54 | * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D) |
| 55 | * |
| 56 | * Note: A/D bits will be stored in migration entries iff there're enough |
| 57 | * free bits in arch specific swp offset. By default we'll ignore A/D bits |
| 58 | * when migrating a page. Please refer to migration_entry_supports_ad() |
| 59 | * for more information. If there're more bits besides PFN and A/D bits, |
| 60 | * they should be reserved and always be zeros. |
| 61 | */ |
| 62 | #define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS) |
| 63 | #define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1) |
| 64 | #define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2) |
| 65 | |
| 66 | #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT) |
| 67 | #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT) |
| 68 | |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 69 | static inline bool is_pfn_swap_entry(swp_entry_t entry); |
| 70 | |
Peter Xu | 099dd68 | 2021-06-15 18:23:16 -0700 | [diff] [blame] | 71 | /* Clear all flags but only keep swp_entry_t related information */ |
| 72 | static inline pte_t pte_swp_clear_flags(pte_t pte) |
| 73 | { |
David Hildenbrand | 1493a19 | 2022-05-09 18:20:45 -0700 | [diff] [blame] | 74 | if (pte_swp_exclusive(pte)) |
| 75 | pte = pte_swp_clear_exclusive(pte); |
Peter Xu | 099dd68 | 2021-06-15 18:23:16 -0700 | [diff] [blame] | 76 | if (pte_swp_soft_dirty(pte)) |
| 77 | pte = pte_swp_clear_soft_dirty(pte); |
| 78 | if (pte_swp_uffd_wp(pte)) |
| 79 | pte = pte_swp_clear_uffd_wp(pte); |
| 80 | return pte; |
| 81 | } |
| 82 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | /* |
| 84 | * Store a type+offset into a swp_entry_t in an arch-independent format |
| 85 | */ |
| 86 | static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset) |
| 87 | { |
| 88 | swp_entry_t ret; |
| 89 | |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 90 | ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | return ret; |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * Extract the `type' field from a swp_entry_t. The swp_entry_t is in |
| 96 | * arch-independent format |
| 97 | */ |
| 98 | static inline unsigned swp_type(swp_entry_t entry) |
| 99 | { |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 100 | return (entry.val >> SWP_TYPE_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | /* |
| 104 | * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in |
| 105 | * arch-independent format |
| 106 | */ |
| 107 | static inline pgoff_t swp_offset(swp_entry_t entry) |
| 108 | { |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 109 | return entry.val & SWP_OFFSET_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 112 | /* |
| 113 | * This should only be called upon a pfn swap entry to get the PFN stored |
| 114 | * in the swap entry. Please refers to is_pfn_swap_entry() for definition |
| 115 | * of pfn swap entry. |
| 116 | */ |
| 117 | static inline unsigned long swp_offset_pfn(swp_entry_t entry) |
| 118 | { |
| 119 | VM_BUG_ON(!is_pfn_swap_entry(entry)); |
| 120 | return swp_offset(entry) & SWP_PFN_MASK; |
| 121 | } |
| 122 | |
Matt Mackall | 698dd4b | 2008-02-04 22:29:00 -0800 | [diff] [blame] | 123 | /* check whether a pte points to a swap entry */ |
| 124 | static inline int is_swap_pte(pte_t pte) |
| 125 | { |
Mel Gorman | 21d9ee3 | 2015-02-12 14:58:32 -0800 | [diff] [blame] | 126 | return !pte_none(pte) && !pte_present(pte); |
Matt Mackall | 698dd4b | 2008-02-04 22:29:00 -0800 | [diff] [blame] | 127 | } |
| 128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | /* |
| 130 | * Convert the arch-dependent pte representation of a swp_entry_t into an |
| 131 | * arch-independent swp_entry_t. |
| 132 | */ |
| 133 | static inline swp_entry_t pte_to_swp_entry(pte_t pte) |
| 134 | { |
| 135 | swp_entry_t arch_entry; |
| 136 | |
Peter Xu | 099dd68 | 2021-06-15 18:23:16 -0700 | [diff] [blame] | 137 | pte = pte_swp_clear_flags(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | arch_entry = __pte_to_swp_entry(pte); |
| 139 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * Convert the arch-independent representation of a swp_entry_t into the |
| 144 | * arch-dependent pte representation. |
| 145 | */ |
| 146 | static inline pte_t swp_entry_to_pte(swp_entry_t entry) |
| 147 | { |
| 148 | swp_entry_t arch_entry; |
| 149 | |
| 150 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | return __swp_entry_to_pte(arch_entry); |
| 152 | } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 153 | |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 154 | static inline swp_entry_t radix_to_swp_entry(void *arg) |
| 155 | { |
| 156 | swp_entry_t entry; |
| 157 | |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 158 | entry.val = xa_to_value(arg); |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 159 | return entry; |
| 160 | } |
| 161 | |
| 162 | static inline void *swp_to_radix_entry(swp_entry_t entry) |
| 163 | { |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 164 | return xa_mk_value(entry.val); |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 165 | } |
| 166 | |
Miaohe Lin | 9f186f9 | 2022-05-19 20:50:26 +0800 | [diff] [blame] | 167 | static inline swp_entry_t make_swapin_error_entry(struct page *page) |
| 168 | { |
| 169 | return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page)); |
| 170 | } |
| 171 | |
| 172 | static inline int is_swapin_error_entry(swp_entry_t entry) |
| 173 | { |
| 174 | return swp_type(entry) == SWP_SWAPIN_ERROR; |
| 175 | } |
| 176 | |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 177 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 178 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 179 | { |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 180 | return swp_entry(SWP_DEVICE_READ, offset); |
| 181 | } |
| 182 | |
| 183 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) |
| 184 | { |
| 185 | return swp_entry(SWP_DEVICE_WRITE, offset); |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 186 | } |
| 187 | |
| 188 | static inline bool is_device_private_entry(swp_entry_t entry) |
| 189 | { |
| 190 | int type = swp_type(entry); |
| 191 | return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE; |
| 192 | } |
| 193 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 194 | static inline bool is_writable_device_private_entry(swp_entry_t entry) |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 195 | { |
| 196 | return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); |
| 197 | } |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 198 | |
| 199 | static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) |
| 200 | { |
| 201 | return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset); |
| 202 | } |
| 203 | |
| 204 | static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) |
| 205 | { |
| 206 | return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset); |
| 207 | } |
| 208 | |
| 209 | static inline bool is_device_exclusive_entry(swp_entry_t entry) |
| 210 | { |
| 211 | return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ || |
| 212 | swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE; |
| 213 | } |
| 214 | |
| 215 | static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) |
| 216 | { |
| 217 | return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE); |
| 218 | } |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 219 | #else /* CONFIG_DEVICE_PRIVATE */ |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 220 | static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset) |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 221 | { |
| 222 | return swp_entry(0, 0); |
| 223 | } |
| 224 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 225 | static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset) |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 226 | { |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 227 | return swp_entry(0, 0); |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | static inline bool is_device_private_entry(swp_entry_t entry) |
| 231 | { |
| 232 | return false; |
| 233 | } |
| 234 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 235 | static inline bool is_writable_device_private_entry(swp_entry_t entry) |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 236 | { |
| 237 | return false; |
| 238 | } |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 239 | |
| 240 | static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset) |
| 241 | { |
| 242 | return swp_entry(0, 0); |
| 243 | } |
| 244 | |
| 245 | static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset) |
| 246 | { |
| 247 | return swp_entry(0, 0); |
| 248 | } |
| 249 | |
| 250 | static inline bool is_device_exclusive_entry(swp_entry_t entry) |
| 251 | { |
| 252 | return false; |
| 253 | } |
| 254 | |
| 255 | static inline bool is_writable_device_exclusive_entry(swp_entry_t entry) |
| 256 | { |
| 257 | return false; |
| 258 | } |
Jérôme Glisse | 5042db43 | 2017-09-08 16:11:43 -0700 | [diff] [blame] | 259 | #endif /* CONFIG_DEVICE_PRIVATE */ |
| 260 | |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 261 | #ifdef CONFIG_MIGRATION |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 262 | static inline int is_migration_entry(swp_entry_t entry) |
| 263 | { |
| 264 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ || |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 265 | swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE || |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 266 | swp_type(entry) == SWP_MIGRATION_WRITE); |
| 267 | } |
| 268 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 269 | static inline int is_writable_migration_entry(swp_entry_t entry) |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 270 | { |
| 271 | return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); |
| 272 | } |
| 273 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 274 | static inline int is_readable_migration_entry(swp_entry_t entry) |
| 275 | { |
| 276 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ); |
| 277 | } |
| 278 | |
| 279 | static inline int is_readable_exclusive_migration_entry(swp_entry_t entry) |
| 280 | { |
| 281 | return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE); |
| 282 | } |
| 283 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 284 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 285 | { |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 286 | return swp_entry(SWP_MIGRATION_READ, offset); |
| 287 | } |
| 288 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 289 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) |
| 290 | { |
| 291 | return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset); |
| 292 | } |
| 293 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 294 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) |
| 295 | { |
| 296 | return swp_entry(SWP_MIGRATION_WRITE, offset); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 297 | } |
| 298 | |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 299 | /* |
| 300 | * Returns whether the host has large enough swap offset field to support |
| 301 | * carrying over pgtable A/D bits for page migrations. The result is |
| 302 | * pretty much arch specific. |
| 303 | */ |
| 304 | static inline bool migration_entry_supports_ad(void) |
| 305 | { |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 306 | #ifdef CONFIG_SWAP |
Peter Xu | 5154e60 | 2022-08-11 12:13:31 -0400 | [diff] [blame] | 307 | return swap_migration_ad_supported; |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 308 | #else /* CONFIG_SWAP */ |
| 309 | return false; |
| 310 | #endif /* CONFIG_SWAP */ |
| 311 | } |
| 312 | |
| 313 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) |
| 314 | { |
| 315 | if (migration_entry_supports_ad()) |
| 316 | return swp_entry(swp_type(entry), |
| 317 | swp_offset(entry) | SWP_MIG_YOUNG); |
| 318 | return entry; |
| 319 | } |
| 320 | |
| 321 | static inline bool is_migration_entry_young(swp_entry_t entry) |
| 322 | { |
| 323 | if (migration_entry_supports_ad()) |
| 324 | return swp_offset(entry) & SWP_MIG_YOUNG; |
| 325 | /* Keep the old behavior of aging page after migration */ |
| 326 | return false; |
| 327 | } |
| 328 | |
| 329 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) |
| 330 | { |
| 331 | if (migration_entry_supports_ad()) |
| 332 | return swp_entry(swp_type(entry), |
| 333 | swp_offset(entry) | SWP_MIG_DIRTY); |
| 334 | return entry; |
| 335 | } |
| 336 | |
| 337 | static inline bool is_migration_entry_dirty(swp_entry_t entry) |
| 338 | { |
| 339 | if (migration_entry_supports_ad()) |
| 340 | return swp_offset(entry) & SWP_MIG_DIRTY; |
| 341 | /* Keep the old behavior of clean page after migration */ |
| 342 | return false; |
| 343 | } |
| 344 | |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 345 | extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
| 346 | spinlock_t *ptl); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 347 | extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 348 | unsigned long address); |
Miaohe Lin | ad1ac59 | 2022-05-30 19:30:16 +0800 | [diff] [blame] | 349 | #ifdef CONFIG_HUGETLB_PAGE |
| 350 | extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl); |
| 351 | extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); |
Peter Xu | eba4d77 | 2022-08-11 12:13:26 -0400 | [diff] [blame] | 352 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 353 | #else /* CONFIG_MIGRATION */ |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 354 | static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) |
| 355 | { |
| 356 | return swp_entry(0, 0); |
| 357 | } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 358 | |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 359 | static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset) |
| 360 | { |
| 361 | return swp_entry(0, 0); |
| 362 | } |
| 363 | |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 364 | static inline swp_entry_t make_writable_migration_entry(pgoff_t offset) |
| 365 | { |
| 366 | return swp_entry(0, 0); |
| 367 | } |
| 368 | |
Andrew Morton | 5ec553a | 2007-02-20 13:57:50 -0800 | [diff] [blame] | 369 | static inline int is_migration_entry(swp_entry_t swp) |
| 370 | { |
| 371 | return 0; |
| 372 | } |
Kirill A. Shutemov | 0d665e7 | 2018-01-19 15:49:24 +0300 | [diff] [blame] | 373 | |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 374 | static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, |
| 375 | spinlock_t *ptl) { } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 376 | static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, |
| 377 | unsigned long address) { } |
Miaohe Lin | ad1ac59 | 2022-05-30 19:30:16 +0800 | [diff] [blame] | 378 | #ifdef CONFIG_HUGETLB_PAGE |
| 379 | static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { } |
| 380 | static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { } |
Peter Xu | eba4d77 | 2022-08-11 12:13:26 -0400 | [diff] [blame] | 381 | #endif /* CONFIG_HUGETLB_PAGE */ |
Alistair Popple | 4dd845b | 2021-06-30 18:54:09 -0700 | [diff] [blame] | 382 | static inline int is_writable_migration_entry(swp_entry_t entry) |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 383 | { |
| 384 | return 0; |
| 385 | } |
David Hildenbrand | 6c28760 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 386 | static inline int is_readable_migration_entry(swp_entry_t entry) |
| 387 | { |
| 388 | return 0; |
| 389 | } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 390 | |
Peter Xu | 2e34687 | 2022-08-11 12:13:29 -0400 | [diff] [blame] | 391 | static inline swp_entry_t make_migration_entry_young(swp_entry_t entry) |
| 392 | { |
| 393 | return entry; |
| 394 | } |
| 395 | |
| 396 | static inline bool is_migration_entry_young(swp_entry_t entry) |
| 397 | { |
| 398 | return false; |
| 399 | } |
| 400 | |
| 401 | static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry) |
| 402 | { |
| 403 | return entry; |
| 404 | } |
| 405 | |
| 406 | static inline bool is_migration_entry_dirty(swp_entry_t entry) |
| 407 | { |
| 408 | return false; |
| 409 | } |
Peter Xu | eba4d77 | 2022-08-11 12:13:26 -0400 | [diff] [blame] | 410 | #endif /* CONFIG_MIGRATION */ |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 411 | |
Oscar Salvador | ea92809e | 2024-04-07 15:05:37 +0200 | [diff] [blame] | 412 | #ifdef CONFIG_MEMORY_FAILURE |
| 413 | |
| 414 | extern atomic_long_t num_poisoned_pages __read_mostly; |
| 415 | |
| 416 | /* |
| 417 | * Support for hardware poisoned pages |
| 418 | */ |
| 419 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| 420 | { |
| 421 | BUG_ON(!PageLocked(page)); |
| 422 | return swp_entry(SWP_HWPOISON, page_to_pfn(page)); |
| 423 | } |
| 424 | |
| 425 | static inline int is_hwpoison_entry(swp_entry_t entry) |
| 426 | { |
| 427 | return swp_type(entry) == SWP_HWPOISON; |
| 428 | } |
| 429 | |
| 430 | static inline void num_poisoned_pages_inc(void) |
| 431 | { |
| 432 | atomic_long_inc(&num_poisoned_pages); |
| 433 | } |
| 434 | |
| 435 | static inline void num_poisoned_pages_sub(long i) |
| 436 | { |
| 437 | atomic_long_sub(i, &num_poisoned_pages); |
| 438 | } |
| 439 | |
| 440 | #else /* CONFIG_MEMORY_FAILURE */ |
| 441 | |
| 442 | static inline swp_entry_t make_hwpoison_entry(struct page *page) |
| 443 | { |
| 444 | return swp_entry(0, 0); |
| 445 | } |
| 446 | |
| 447 | static inline int is_hwpoison_entry(swp_entry_t swp) |
| 448 | { |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | static inline void num_poisoned_pages_inc(void) |
| 453 | { |
| 454 | } |
| 455 | |
| 456 | static inline void num_poisoned_pages_sub(long i) |
| 457 | { |
| 458 | } |
| 459 | #endif /* CONFIG_MEMORY_FAILURE */ |
| 460 | |
Peter Xu | 679d1033 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 461 | typedef unsigned long pte_marker; |
| 462 | |
Peter Xu | 1db9dbc | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 463 | #define PTE_MARKER_UFFD_WP BIT(0) |
| 464 | #define PTE_MARKER_MASK (PTE_MARKER_UFFD_WP) |
Peter Xu | 679d1033 | 2022-05-12 20:22:52 -0700 | [diff] [blame] | 465 | |
| 466 | #ifdef CONFIG_PTE_MARKER |
| 467 | |
| 468 | static inline swp_entry_t make_pte_marker_entry(pte_marker marker) |
| 469 | { |
| 470 | return swp_entry(SWP_PTE_MARKER, marker); |
| 471 | } |
| 472 | |
| 473 | static inline bool is_pte_marker_entry(swp_entry_t entry) |
| 474 | { |
| 475 | return swp_type(entry) == SWP_PTE_MARKER; |
| 476 | } |
| 477 | |
| 478 | static inline pte_marker pte_marker_get(swp_entry_t entry) |
| 479 | { |
| 480 | return swp_offset(entry) & PTE_MARKER_MASK; |
| 481 | } |
| 482 | |
| 483 | static inline bool is_pte_marker(pte_t pte) |
| 484 | { |
| 485 | return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte)); |
| 486 | } |
| 487 | |
| 488 | #else /* CONFIG_PTE_MARKER */ |
| 489 | |
| 490 | static inline swp_entry_t make_pte_marker_entry(pte_marker marker) |
| 491 | { |
| 492 | /* This should never be called if !CONFIG_PTE_MARKER */ |
| 493 | WARN_ON_ONCE(1); |
| 494 | return swp_entry(0, 0); |
| 495 | } |
| 496 | |
| 497 | static inline bool is_pte_marker_entry(swp_entry_t entry) |
| 498 | { |
| 499 | return false; |
| 500 | } |
| 501 | |
| 502 | static inline pte_marker pte_marker_get(swp_entry_t entry) |
| 503 | { |
| 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | static inline bool is_pte_marker(pte_t pte) |
| 508 | { |
| 509 | return false; |
| 510 | } |
| 511 | |
| 512 | #endif /* CONFIG_PTE_MARKER */ |
| 513 | |
| 514 | static inline pte_t make_pte_marker(pte_marker marker) |
| 515 | { |
| 516 | return swp_entry_to_pte(make_pte_marker_entry(marker)); |
| 517 | } |
| 518 | |
| 519 | /* |
| 520 | * This is a special version to check pte_none() just to cover the case when |
| 521 | * the pte is a pte marker. It existed because in many cases the pte marker |
| 522 | * should be seen as a none pte; it's just that we have stored some information |
| 523 | * onto the none pte so it becomes not-none any more. |
| 524 | * |
| 525 | * It should be used when the pte is file-backed, ram-based and backing |
| 526 | * userspace pages, like shmem. It is not needed upon pgtables that do not |
| 527 | * support pte markers at all. For example, it's not needed on anonymous |
| 528 | * memory, kernel-only memory (including when the system is during-boot), |
| 529 | * non-ram based generic file-system. It's fine to be used even there, but the |
| 530 | * extra pte marker check will be pure overhead. |
| 531 | * |
| 532 | * For systems configured with !CONFIG_PTE_MARKER this will be automatically |
| 533 | * optimized to pte_none(). |
| 534 | */ |
| 535 | static inline int pte_none_mostly(pte_t pte) |
| 536 | { |
| 537 | return pte_none(pte) || is_pte_marker(pte); |
| 538 | } |
| 539 | |
Alistair Popple | af5cdaf | 2021-06-30 18:54:06 -0700 | [diff] [blame] | 540 | static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry) |
| 541 | { |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 542 | struct page *p = pfn_to_page(swp_offset_pfn(entry)); |
Alistair Popple | af5cdaf | 2021-06-30 18:54:06 -0700 | [diff] [blame] | 543 | |
| 544 | /* |
| 545 | * Any use of migration entries may only occur while the |
| 546 | * corresponding page is locked |
| 547 | */ |
| 548 | BUG_ON(is_migration_entry(entry) && !PageLocked(p)); |
| 549 | |
| 550 | return p; |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * A pfn swap entry is a special type of swap entry that always has a pfn stored |
Oscar Salvador | ea92809e | 2024-04-07 15:05:37 +0200 | [diff] [blame] | 555 | * in the swap offset. They can either be used to represent unaddressable device |
| 556 | * memory, to restrict access to a page undergoing migration or to represent a |
| 557 | * pfn which has been hwpoisoned and unmapped. |
Alistair Popple | af5cdaf | 2021-06-30 18:54:06 -0700 | [diff] [blame] | 558 | */ |
| 559 | static inline bool is_pfn_swap_entry(swp_entry_t entry) |
| 560 | { |
Peter Xu | 0d206b5 | 2022-08-11 12:13:27 -0400 | [diff] [blame] | 561 | /* Make sure the swp offset can always store the needed fields */ |
| 562 | BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS); |
| 563 | |
Alistair Popple | b756a3b | 2021-06-30 18:54:25 -0700 | [diff] [blame] | 564 | return is_migration_entry(entry) || is_device_private_entry(entry) || |
Oscar Salvador | ea92809e | 2024-04-07 15:05:37 +0200 | [diff] [blame] | 565 | is_device_exclusive_entry(entry) || is_hwpoison_entry(entry); |
Alistair Popple | af5cdaf | 2021-06-30 18:54:06 -0700 | [diff] [blame] | 566 | } |
| 567 | |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 568 | struct page_vma_mapped_walk; |
| 569 | |
| 570 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 571 | extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 572 | struct page *page); |
| 573 | |
| 574 | extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
| 575 | struct page *new); |
| 576 | |
| 577 | extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); |
| 578 | |
| 579 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
| 580 | { |
| 581 | swp_entry_t arch_entry; |
| 582 | |
Naoya Horiguchi | ab6e3d0 | 2017-09-08 16:11:04 -0700 | [diff] [blame] | 583 | if (pmd_swp_soft_dirty(pmd)) |
| 584 | pmd = pmd_swp_clear_soft_dirty(pmd); |
Peter Xu | 8f34f1e | 2021-06-30 18:49:02 -0700 | [diff] [blame] | 585 | if (pmd_swp_uffd_wp(pmd)) |
| 586 | pmd = pmd_swp_clear_uffd_wp(pmd); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 587 | arch_entry = __pmd_to_swp_entry(pmd); |
| 588 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
| 589 | } |
| 590 | |
| 591 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
| 592 | { |
| 593 | swp_entry_t arch_entry; |
| 594 | |
| 595 | arch_entry = __swp_entry(swp_type(entry), swp_offset(entry)); |
| 596 | return __swp_entry_to_pmd(arch_entry); |
| 597 | } |
| 598 | |
| 599 | static inline int is_pmd_migration_entry(pmd_t pmd) |
| 600 | { |
Hongchen Zhang | b304c6f | 2022-05-09 18:20:53 -0700 | [diff] [blame] | 601 | return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd)); |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 602 | } |
Peter Xu | eba4d77 | 2022-08-11 12:13:26 -0400 | [diff] [blame] | 603 | #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
David Hildenbrand | 7f5abe6 | 2022-05-09 18:20:44 -0700 | [diff] [blame] | 604 | static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 605 | struct page *page) |
| 606 | { |
| 607 | BUILD_BUG(); |
| 608 | } |
| 609 | |
| 610 | static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, |
| 611 | struct page *new) |
| 612 | { |
| 613 | BUILD_BUG(); |
| 614 | } |
| 615 | |
| 616 | static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { } |
| 617 | |
| 618 | static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) |
| 619 | { |
| 620 | return swp_entry(0, 0); |
| 621 | } |
| 622 | |
| 623 | static inline pmd_t swp_entry_to_pmd(swp_entry_t entry) |
| 624 | { |
| 625 | return __pmd(0); |
| 626 | } |
| 627 | |
| 628 | static inline int is_pmd_migration_entry(pmd_t pmd) |
| 629 | { |
| 630 | return 0; |
| 631 | } |
Peter Xu | eba4d77 | 2022-08-11 12:13:26 -0400 | [diff] [blame] | 632 | #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ |
Zi Yan | 616b837 | 2017-09-08 16:10:57 -0700 | [diff] [blame] | 633 | |
Andi Kleen | a7420aa | 2009-09-16 11:50:05 +0200 | [diff] [blame] | 634 | static inline int non_swap_entry(swp_entry_t entry) |
| 635 | { |
| 636 | return swp_type(entry) >= MAX_SWAPFILES; |
| 637 | } |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 638 | |
Christoph Hellwig | 9b98fa2 | 2019-07-16 16:26:33 -0700 | [diff] [blame] | 639 | #endif /* CONFIG_MMU */ |
Hugh Dickins | a2c16d6 | 2011-08-03 16:21:19 -0700 | [diff] [blame] | 640 | #endif /* _LINUX_SWAPOPS_H */ |