blob: 1f59f9edcc24127a9edec19d93ac01da4ff9268c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07002#ifndef _LINUX_SWAPOPS_H
3#define _LINUX_SWAPOPS_H
4
5#include <linux/radix-tree.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05006#include <linux/bug.h>
Souptick Joarder2b740302018-08-23 17:01:36 -07007#include <linux/mm_types.h>
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07008
Christoph Hellwig9b98fa22019-07-16 16:26:33 -07009#ifdef CONFIG_MMU
10
Peter Xu2e346872022-08-11 12:13:29 -040011#ifdef CONFIG_SWAP
12#include <linux/swapfile.h>
13#endif /* CONFIG_SWAP */
14
Linus Torvalds1da177e2005-04-16 15:20:36 -070015/*
16 * swapcache pages are stored in the swapper_space radix tree. We want to
17 * get good packing density in that tree, so the index should be dense in
18 * the low-order bits.
19 *
Miaohe Lina930c212022-05-19 14:08:52 -070020 * We arrange the `type' and `offset' fields so that `type' is at the six
Paolo 'Blaisorblade' Giarrussoe83a9592005-09-03 15:54:53 -070021 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
Hugh Dickins9b15b812012-06-15 17:55:50 -070022 * remaining bits. Although `type' itself needs only five bits, we allow for
Miaohe Lina930c212022-05-19 14:08:52 -070023 * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 *
25 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
26 */
Matthew Wilcox3159f942017-11-03 13:30:42 -040027#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
28#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Peter Xu0d206b52022-08-11 12:13:27 -040030/*
31 * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
32 * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
33 * can use the extra bits to store other information besides PFN.
34 */
35#ifdef MAX_PHYSMEM_BITS
David Hildenbrand630dc252022-12-05 16:08:57 +010036#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
Peter Xu0d206b52022-08-11 12:13:27 -040037#else /* MAX_PHYSMEM_BITS */
David Hildenbrand630dc252022-12-05 16:08:57 +010038#define SWP_PFN_BITS min_t(int, \
39 sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
40 SWP_TYPE_SHIFT)
Peter Xu0d206b52022-08-11 12:13:27 -040041#endif /* MAX_PHYSMEM_BITS */
David Hildenbrand630dc252022-12-05 16:08:57 +010042#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
Peter Xu0d206b52022-08-11 12:13:27 -040043
Peter Xu2e346872022-08-11 12:13:29 -040044/**
45 * Migration swap entry specific bitfield definitions. Layout:
46 *
47 * |----------+--------------------|
48 * | swp_type | swp_offset |
49 * |----------+--------+-+-+-------|
50 * | | resv |D|A| PFN |
51 * |----------+--------+-+-+-------|
52 *
53 * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
54 * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
55 *
56 * Note: A/D bits will be stored in migration entries iff there're enough
57 * free bits in arch specific swp offset. By default we'll ignore A/D bits
58 * when migrating a page. Please refer to migration_entry_supports_ad()
59 * for more information. If there're more bits besides PFN and A/D bits,
60 * they should be reserved and always be zeros.
61 */
62#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
63#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
64#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
65
66#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
67#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
68
Peter Xu0d206b52022-08-11 12:13:27 -040069static inline bool is_pfn_swap_entry(swp_entry_t entry);
70
Peter Xu099dd682021-06-15 18:23:16 -070071/* Clear all flags but only keep swp_entry_t related information */
72static inline pte_t pte_swp_clear_flags(pte_t pte)
73{
David Hildenbrand1493a192022-05-09 18:20:45 -070074 if (pte_swp_exclusive(pte))
75 pte = pte_swp_clear_exclusive(pte);
Peter Xu099dd682021-06-15 18:23:16 -070076 if (pte_swp_soft_dirty(pte))
77 pte = pte_swp_clear_soft_dirty(pte);
78 if (pte_swp_uffd_wp(pte))
79 pte = pte_swp_clear_uffd_wp(pte);
80 return pte;
81}
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*
84 * Store a type+offset into a swp_entry_t in an arch-independent format
85 */
86static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
87{
88 swp_entry_t ret;
89
Matthew Wilcox3159f942017-11-03 13:30:42 -040090 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 return ret;
92}
93
94/*
95 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
96 * arch-independent format
97 */
98static inline unsigned swp_type(swp_entry_t entry)
99{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400100 return (entry.val >> SWP_TYPE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101}
102
103/*
104 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
105 * arch-independent format
106 */
107static inline pgoff_t swp_offset(swp_entry_t entry)
108{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400109 return entry.val & SWP_OFFSET_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
Peter Xu0d206b52022-08-11 12:13:27 -0400112/*
113 * This should only be called upon a pfn swap entry to get the PFN stored
114 * in the swap entry. Please refers to is_pfn_swap_entry() for definition
115 * of pfn swap entry.
116 */
117static inline unsigned long swp_offset_pfn(swp_entry_t entry)
118{
119 VM_BUG_ON(!is_pfn_swap_entry(entry));
120 return swp_offset(entry) & SWP_PFN_MASK;
121}
122
Matt Mackall698dd4b2008-02-04 22:29:00 -0800123/* check whether a pte points to a swap entry */
124static inline int is_swap_pte(pte_t pte)
125{
Mel Gorman21d9ee32015-02-12 14:58:32 -0800126 return !pte_none(pte) && !pte_present(pte);
Matt Mackall698dd4b2008-02-04 22:29:00 -0800127}
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/*
130 * Convert the arch-dependent pte representation of a swp_entry_t into an
131 * arch-independent swp_entry_t.
132 */
133static inline swp_entry_t pte_to_swp_entry(pte_t pte)
134{
135 swp_entry_t arch_entry;
136
Peter Xu099dd682021-06-15 18:23:16 -0700137 pte = pte_swp_clear_flags(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 arch_entry = __pte_to_swp_entry(pte);
139 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
140}
141
142/*
143 * Convert the arch-independent representation of a swp_entry_t into the
144 * arch-dependent pte representation.
145 */
146static inline pte_t swp_entry_to_pte(swp_entry_t entry)
147{
148 swp_entry_t arch_entry;
149
150 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 return __swp_entry_to_pte(arch_entry);
152}
Christoph Lameter06972122006-06-23 02:03:35 -0700153
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700154static inline swp_entry_t radix_to_swp_entry(void *arg)
155{
156 swp_entry_t entry;
157
Matthew Wilcox3159f942017-11-03 13:30:42 -0400158 entry.val = xa_to_value(arg);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700159 return entry;
160}
161
162static inline void *swp_to_radix_entry(swp_entry_t entry)
163{
Matthew Wilcox3159f942017-11-03 13:30:42 -0400164 return xa_mk_value(entry.val);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700165}
166
Miaohe Lin9f186f92022-05-19 20:50:26 +0800167static inline swp_entry_t make_swapin_error_entry(struct page *page)
168{
169 return swp_entry(SWP_SWAPIN_ERROR, page_to_pfn(page));
170}
171
172static inline int is_swapin_error_entry(swp_entry_t entry)
173{
174 return swp_type(entry) == SWP_SWAPIN_ERROR;
175}
176
Jérôme Glisse5042db432017-09-08 16:11:43 -0700177#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
Alistair Popple4dd845b2021-06-30 18:54:09 -0700178static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
Jérôme Glisse5042db432017-09-08 16:11:43 -0700179{
Alistair Popple4dd845b2021-06-30 18:54:09 -0700180 return swp_entry(SWP_DEVICE_READ, offset);
181}
182
183static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
184{
185 return swp_entry(SWP_DEVICE_WRITE, offset);
Jérôme Glisse5042db432017-09-08 16:11:43 -0700186}
187
188static inline bool is_device_private_entry(swp_entry_t entry)
189{
190 int type = swp_type(entry);
191 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
192}
193
Alistair Popple4dd845b2021-06-30 18:54:09 -0700194static inline bool is_writable_device_private_entry(swp_entry_t entry)
Jérôme Glisse5042db432017-09-08 16:11:43 -0700195{
196 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
197}
Alistair Poppleb756a3b2021-06-30 18:54:25 -0700198
199static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
200{
201 return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
202}
203
204static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
205{
206 return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
207}
208
209static inline bool is_device_exclusive_entry(swp_entry_t entry)
210{
211 return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
212 swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
213}
214
215static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
216{
217 return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
218}
Jérôme Glisse5042db432017-09-08 16:11:43 -0700219#else /* CONFIG_DEVICE_PRIVATE */
Alistair Popple4dd845b2021-06-30 18:54:09 -0700220static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
Jérôme Glisse5042db432017-09-08 16:11:43 -0700221{
222 return swp_entry(0, 0);
223}
224
Alistair Popple4dd845b2021-06-30 18:54:09 -0700225static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
Jérôme Glisse5042db432017-09-08 16:11:43 -0700226{
Alistair Popple4dd845b2021-06-30 18:54:09 -0700227 return swp_entry(0, 0);
Jérôme Glisse5042db432017-09-08 16:11:43 -0700228}
229
230static inline bool is_device_private_entry(swp_entry_t entry)
231{
232 return false;
233}
234
Alistair Popple4dd845b2021-06-30 18:54:09 -0700235static inline bool is_writable_device_private_entry(swp_entry_t entry)
Jérôme Glisse5042db432017-09-08 16:11:43 -0700236{
237 return false;
238}
Alistair Poppleb756a3b2021-06-30 18:54:25 -0700239
240static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
241{
242 return swp_entry(0, 0);
243}
244
245static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
246{
247 return swp_entry(0, 0);
248}
249
250static inline bool is_device_exclusive_entry(swp_entry_t entry)
251{
252 return false;
253}
254
255static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
256{
257 return false;
258}
Jérôme Glisse5042db432017-09-08 16:11:43 -0700259#endif /* CONFIG_DEVICE_PRIVATE */
260
Christoph Lameter06972122006-06-23 02:03:35 -0700261#ifdef CONFIG_MIGRATION
Christoph Lameter06972122006-06-23 02:03:35 -0700262static inline int is_migration_entry(swp_entry_t entry)
263{
264 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
David Hildenbrand6c287602022-05-09 18:20:44 -0700265 swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
Christoph Lameter06972122006-06-23 02:03:35 -0700266 swp_type(entry) == SWP_MIGRATION_WRITE);
267}
268
Alistair Popple4dd845b2021-06-30 18:54:09 -0700269static inline int is_writable_migration_entry(swp_entry_t entry)
Christoph Lameter06972122006-06-23 02:03:35 -0700270{
271 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
272}
273
David Hildenbrand6c287602022-05-09 18:20:44 -0700274static inline int is_readable_migration_entry(swp_entry_t entry)
275{
276 return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
277}
278
279static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
280{
281 return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
282}
283
Alistair Popple4dd845b2021-06-30 18:54:09 -0700284static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
Christoph Lameter06972122006-06-23 02:03:35 -0700285{
Alistair Popple4dd845b2021-06-30 18:54:09 -0700286 return swp_entry(SWP_MIGRATION_READ, offset);
287}
288
David Hildenbrand6c287602022-05-09 18:20:44 -0700289static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
290{
291 return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
292}
293
Alistair Popple4dd845b2021-06-30 18:54:09 -0700294static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
295{
296 return swp_entry(SWP_MIGRATION_WRITE, offset);
Christoph Lameter06972122006-06-23 02:03:35 -0700297}
298
Peter Xu2e346872022-08-11 12:13:29 -0400299/*
300 * Returns whether the host has large enough swap offset field to support
301 * carrying over pgtable A/D bits for page migrations. The result is
302 * pretty much arch specific.
303 */
304static inline bool migration_entry_supports_ad(void)
305{
Peter Xu2e346872022-08-11 12:13:29 -0400306#ifdef CONFIG_SWAP
Peter Xu5154e602022-08-11 12:13:31 -0400307 return swap_migration_ad_supported;
Peter Xu2e346872022-08-11 12:13:29 -0400308#else /* CONFIG_SWAP */
309 return false;
310#endif /* CONFIG_SWAP */
311}
312
313static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
314{
315 if (migration_entry_supports_ad())
316 return swp_entry(swp_type(entry),
317 swp_offset(entry) | SWP_MIG_YOUNG);
318 return entry;
319}
320
321static inline bool is_migration_entry_young(swp_entry_t entry)
322{
323 if (migration_entry_supports_ad())
324 return swp_offset(entry) & SWP_MIG_YOUNG;
325 /* Keep the old behavior of aging page after migration */
326 return false;
327}
328
329static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
330{
331 if (migration_entry_supports_ad())
332 return swp_entry(swp_type(entry),
333 swp_offset(entry) | SWP_MIG_DIRTY);
334 return entry;
335}
336
337static inline bool is_migration_entry_dirty(swp_entry_t entry)
338{
339 if (migration_entry_supports_ad())
340 return swp_offset(entry) & SWP_MIG_DIRTY;
341 /* Keep the old behavior of clean page after migration */
342 return false;
343}
344
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800345extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
346 spinlock_t *ptl);
Christoph Lameter06972122006-06-23 02:03:35 -0700347extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
348 unsigned long address);
Miaohe Linad1ac592022-05-30 19:30:16 +0800349#ifdef CONFIG_HUGETLB_PAGE
350extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
351extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
Peter Xueba4d772022-08-11 12:13:26 -0400352#endif /* CONFIG_HUGETLB_PAGE */
353#else /* CONFIG_MIGRATION */
Alistair Popple4dd845b2021-06-30 18:54:09 -0700354static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
355{
356 return swp_entry(0, 0);
357}
Christoph Lameter06972122006-06-23 02:03:35 -0700358
David Hildenbrand6c287602022-05-09 18:20:44 -0700359static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
360{
361 return swp_entry(0, 0);
362}
363
Alistair Popple4dd845b2021-06-30 18:54:09 -0700364static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
365{
366 return swp_entry(0, 0);
367}
368
Andrew Morton5ec553a2007-02-20 13:57:50 -0800369static inline int is_migration_entry(swp_entry_t swp)
370{
371 return 0;
372}
Kirill A. Shutemov0d665e72018-01-19 15:49:24 +0300373
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800374static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
375 spinlock_t *ptl) { }
Christoph Lameter06972122006-06-23 02:03:35 -0700376static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
377 unsigned long address) { }
Miaohe Linad1ac592022-05-30 19:30:16 +0800378#ifdef CONFIG_HUGETLB_PAGE
379static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
380static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
Peter Xueba4d772022-08-11 12:13:26 -0400381#endif /* CONFIG_HUGETLB_PAGE */
Alistair Popple4dd845b2021-06-30 18:54:09 -0700382static inline int is_writable_migration_entry(swp_entry_t entry)
Christoph Lameter06972122006-06-23 02:03:35 -0700383{
384 return 0;
385}
David Hildenbrand6c287602022-05-09 18:20:44 -0700386static inline int is_readable_migration_entry(swp_entry_t entry)
387{
388 return 0;
389}
Christoph Lameter06972122006-06-23 02:03:35 -0700390
Peter Xu2e346872022-08-11 12:13:29 -0400391static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
392{
393 return entry;
394}
395
396static inline bool is_migration_entry_young(swp_entry_t entry)
397{
398 return false;
399}
400
401static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
402{
403 return entry;
404}
405
406static inline bool is_migration_entry_dirty(swp_entry_t entry)
407{
408 return false;
409}
Peter Xueba4d772022-08-11 12:13:26 -0400410#endif /* CONFIG_MIGRATION */
Christoph Lameter06972122006-06-23 02:03:35 -0700411
Oscar Salvadorea92809e2024-04-07 15:05:37 +0200412#ifdef CONFIG_MEMORY_FAILURE
413
414extern atomic_long_t num_poisoned_pages __read_mostly;
415
416/*
417 * Support for hardware poisoned pages
418 */
419static inline swp_entry_t make_hwpoison_entry(struct page *page)
420{
421 BUG_ON(!PageLocked(page));
422 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
423}
424
425static inline int is_hwpoison_entry(swp_entry_t entry)
426{
427 return swp_type(entry) == SWP_HWPOISON;
428}
429
430static inline void num_poisoned_pages_inc(void)
431{
432 atomic_long_inc(&num_poisoned_pages);
433}
434
435static inline void num_poisoned_pages_sub(long i)
436{
437 atomic_long_sub(i, &num_poisoned_pages);
438}
439
440#else /* CONFIG_MEMORY_FAILURE */
441
442static inline swp_entry_t make_hwpoison_entry(struct page *page)
443{
444 return swp_entry(0, 0);
445}
446
447static inline int is_hwpoison_entry(swp_entry_t swp)
448{
449 return 0;
450}
451
452static inline void num_poisoned_pages_inc(void)
453{
454}
455
456static inline void num_poisoned_pages_sub(long i)
457{
458}
459#endif /* CONFIG_MEMORY_FAILURE */
460
Peter Xu679d10332022-05-12 20:22:52 -0700461typedef unsigned long pte_marker;
462
Peter Xu1db9dbc2022-05-12 20:22:52 -0700463#define PTE_MARKER_UFFD_WP BIT(0)
464#define PTE_MARKER_MASK (PTE_MARKER_UFFD_WP)
Peter Xu679d10332022-05-12 20:22:52 -0700465
466#ifdef CONFIG_PTE_MARKER
467
468static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
469{
470 return swp_entry(SWP_PTE_MARKER, marker);
471}
472
473static inline bool is_pte_marker_entry(swp_entry_t entry)
474{
475 return swp_type(entry) == SWP_PTE_MARKER;
476}
477
478static inline pte_marker pte_marker_get(swp_entry_t entry)
479{
480 return swp_offset(entry) & PTE_MARKER_MASK;
481}
482
483static inline bool is_pte_marker(pte_t pte)
484{
485 return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
486}
487
488#else /* CONFIG_PTE_MARKER */
489
490static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
491{
492 /* This should never be called if !CONFIG_PTE_MARKER */
493 WARN_ON_ONCE(1);
494 return swp_entry(0, 0);
495}
496
497static inline bool is_pte_marker_entry(swp_entry_t entry)
498{
499 return false;
500}
501
502static inline pte_marker pte_marker_get(swp_entry_t entry)
503{
504 return 0;
505}
506
507static inline bool is_pte_marker(pte_t pte)
508{
509 return false;
510}
511
512#endif /* CONFIG_PTE_MARKER */
513
514static inline pte_t make_pte_marker(pte_marker marker)
515{
516 return swp_entry_to_pte(make_pte_marker_entry(marker));
517}
518
519/*
520 * This is a special version to check pte_none() just to cover the case when
521 * the pte is a pte marker. It existed because in many cases the pte marker
522 * should be seen as a none pte; it's just that we have stored some information
523 * onto the none pte so it becomes not-none any more.
524 *
525 * It should be used when the pte is file-backed, ram-based and backing
526 * userspace pages, like shmem. It is not needed upon pgtables that do not
527 * support pte markers at all. For example, it's not needed on anonymous
528 * memory, kernel-only memory (including when the system is during-boot),
529 * non-ram based generic file-system. It's fine to be used even there, but the
530 * extra pte marker check will be pure overhead.
531 *
532 * For systems configured with !CONFIG_PTE_MARKER this will be automatically
533 * optimized to pte_none().
534 */
535static inline int pte_none_mostly(pte_t pte)
536{
537 return pte_none(pte) || is_pte_marker(pte);
538}
539
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700540static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
541{
Peter Xu0d206b52022-08-11 12:13:27 -0400542 struct page *p = pfn_to_page(swp_offset_pfn(entry));
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700543
544 /*
545 * Any use of migration entries may only occur while the
546 * corresponding page is locked
547 */
548 BUG_ON(is_migration_entry(entry) && !PageLocked(p));
549
550 return p;
551}
552
553/*
554 * A pfn swap entry is a special type of swap entry that always has a pfn stored
Oscar Salvadorea92809e2024-04-07 15:05:37 +0200555 * in the swap offset. They can either be used to represent unaddressable device
556 * memory, to restrict access to a page undergoing migration or to represent a
557 * pfn which has been hwpoisoned and unmapped.
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700558 */
559static inline bool is_pfn_swap_entry(swp_entry_t entry)
560{
Peter Xu0d206b52022-08-11 12:13:27 -0400561 /* Make sure the swp offset can always store the needed fields */
562 BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
563
Alistair Poppleb756a3b2021-06-30 18:54:25 -0700564 return is_migration_entry(entry) || is_device_private_entry(entry) ||
Oscar Salvadorea92809e2024-04-07 15:05:37 +0200565 is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
Alistair Poppleaf5cdaf2021-06-30 18:54:06 -0700566}
567
Zi Yan616b8372017-09-08 16:10:57 -0700568struct page_vma_mapped_walk;
569
570#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
David Hildenbrand7f5abe62022-05-09 18:20:44 -0700571extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
Zi Yan616b8372017-09-08 16:10:57 -0700572 struct page *page);
573
574extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
575 struct page *new);
576
577extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
578
579static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
580{
581 swp_entry_t arch_entry;
582
Naoya Horiguchiab6e3d02017-09-08 16:11:04 -0700583 if (pmd_swp_soft_dirty(pmd))
584 pmd = pmd_swp_clear_soft_dirty(pmd);
Peter Xu8f34f1e2021-06-30 18:49:02 -0700585 if (pmd_swp_uffd_wp(pmd))
586 pmd = pmd_swp_clear_uffd_wp(pmd);
Zi Yan616b8372017-09-08 16:10:57 -0700587 arch_entry = __pmd_to_swp_entry(pmd);
588 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
589}
590
591static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
592{
593 swp_entry_t arch_entry;
594
595 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
596 return __swp_entry_to_pmd(arch_entry);
597}
598
599static inline int is_pmd_migration_entry(pmd_t pmd)
600{
Hongchen Zhangb304c6f2022-05-09 18:20:53 -0700601 return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
Zi Yan616b8372017-09-08 16:10:57 -0700602}
Peter Xueba4d772022-08-11 12:13:26 -0400603#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
David Hildenbrand7f5abe62022-05-09 18:20:44 -0700604static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
Zi Yan616b8372017-09-08 16:10:57 -0700605 struct page *page)
606{
607 BUILD_BUG();
608}
609
610static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
611 struct page *new)
612{
613 BUILD_BUG();
614}
615
616static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
617
618static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
619{
620 return swp_entry(0, 0);
621}
622
623static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
624{
625 return __pmd(0);
626}
627
628static inline int is_pmd_migration_entry(pmd_t pmd)
629{
630 return 0;
631}
Peter Xueba4d772022-08-11 12:13:26 -0400632#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
Zi Yan616b8372017-09-08 16:10:57 -0700633
Andi Kleena7420aa2009-09-16 11:50:05 +0200634static inline int non_swap_entry(swp_entry_t entry)
635{
636 return swp_type(entry) >= MAX_SWAPFILES;
637}
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700638
Christoph Hellwig9b98fa22019-07-16 16:26:33 -0700639#endif /* CONFIG_MMU */
Hugh Dickinsa2c16d62011-08-03 16:21:19 -0700640#endif /* _LINUX_SWAPOPS_H */