blob: 5f1ae07d724b88ddafadb30c100a6c1a9c2b41ef [file] [log] [blame]
Thomas Gleixner873e65b2019-05-27 08:55:15 +02001/* SPDX-License-Identifier: GPL-2.0-only */
Mel Gorman835c1342007-10-16 01:25:47 -07002/*
3 * Macros for manipulating and testing flags related to a
Mel Gormand9c23402007-10-16 01:26:01 -07004 * pageblock_nr_pages number of pages.
Mel Gorman835c1342007-10-16 01:25:47 -07005 *
Mel Gorman835c1342007-10-16 01:25:47 -07006 * Copyright (C) IBM Corporation, 2006
7 *
8 * Original author, Mel Gorman
9 * Major cleanups and reduction of bit operations, Andy Whitcroft
10 */
11#ifndef PAGEBLOCK_FLAGS_H
12#define PAGEBLOCK_FLAGS_H
13
14#include <linux/types.h>
15
Pingfan Liu125b8602018-12-28 00:38:43 -080016#define PB_migratetype_bits 3
Mel Gorman835c1342007-10-16 01:25:47 -070017/* Bit indices that affect a whole block of pages */
18enum pageblock_bits {
Paul Jacksonc801ed32008-05-14 08:15:23 -070019 PB_migrate,
Pingfan Liu125b8602018-12-28 00:38:43 -080020 PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
Paul Jacksonc801ed32008-05-14 08:15:23 -070021 /* 3 bits required for migrate types */
Mel Gormanbb13ffe2012-10-08 16:32:41 -070022 PB_migrate_skip,/* If set the block is skipped by compaction */
Mel Gormane58469b2014-06-04 16:10:16 -070023
24 /*
25 * Assume the bits will always align on a word. If this assumption
26 * changes then get/set pageblock needs updating.
27 */
Mel Gorman835c1342007-10-16 01:25:47 -070028 NR_PAGEBLOCK_BITS
29};
30
Mel Gormand9c23402007-10-16 01:26:01 -070031#ifdef CONFIG_HUGETLB_PAGE
32
33#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
34
35/* Huge page sizes are variable */
Kirill A. Shutemovd00181b2015-11-06 16:29:57 -080036extern unsigned int pageblock_order;
Mel Gormand9c23402007-10-16 01:26:01 -070037
38#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
39
David Hildenbrandb3d40a22022-03-22 14:43:20 -070040/*
41 * Huge pages are a constant size, but don't exceed the maximum allocation
42 * granularity.
43 */
44#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER - 1)
Mel Gormand9c23402007-10-16 01:26:01 -070045
46#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
47
48#else /* CONFIG_HUGETLB_PAGE */
49
50/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
51#define pageblock_order (MAX_ORDER-1)
52
53#endif /* CONFIG_HUGETLB_PAGE */
54
55#define pageblock_nr_pages (1UL << pageblock_order)
Kefeng Wang5f7fa132022-09-07 14:08:43 +080056#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
Kefeng Wangee0913c2022-09-07 14:08:44 +080057#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
Kefeng Wang4f9bc692022-09-07 14:08:42 +080058#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
59#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
Mel Gormand9c23402007-10-16 01:26:01 -070060
Mel Gorman835c1342007-10-16 01:25:47 -070061/* Forward declaration */
62struct page;
63
Matthew Wilcox (Oracle)ca891f412021-06-28 19:41:22 -070064unsigned long get_pfnblock_flags_mask(const struct page *page,
Mel Gormandc4b0ca2014-06-04 16:10:17 -070065 unsigned long pfn,
Mel Gormane58469b2014-06-04 16:10:16 -070066 unsigned long mask);
Mel Gormandc4b0ca2014-06-04 16:10:17 -070067
68void set_pfnblock_flags_mask(struct page *page,
Mel Gormane58469b2014-06-04 16:10:16 -070069 unsigned long flags,
Mel Gormandc4b0ca2014-06-04 16:10:17 -070070 unsigned long pfn,
Mel Gormane58469b2014-06-04 16:10:16 -070071 unsigned long mask);
72
Mel Gorman835c1342007-10-16 01:25:47 -070073/* Declarations for getting and setting flags. See mm/page_alloc.c */
Mel Gormanbb13ffe2012-10-08 16:32:41 -070074#ifdef CONFIG_COMPACTION
75#define get_pageblock_skip(page) \
Wei Yangd93d5ab2020-08-06 23:25:48 -070076 get_pfnblock_flags_mask(page, page_to_pfn(page), \
Wei Yang535b81e2020-08-06 23:25:51 -070077 (1 << (PB_migrate_skip)))
Mel Gormanbb13ffe2012-10-08 16:32:41 -070078#define clear_pageblock_skip(page) \
Wei Yangd93d5ab2020-08-06 23:25:48 -070079 set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
Wei Yang535b81e2020-08-06 23:25:51 -070080 (1 << PB_migrate_skip))
Mel Gormanbb13ffe2012-10-08 16:32:41 -070081#define set_pageblock_skip(page) \
Wei Yangd93d5ab2020-08-06 23:25:48 -070082 set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
83 page_to_pfn(page), \
Wei Yang535b81e2020-08-06 23:25:51 -070084 (1 << PB_migrate_skip))
David Rientjes21dc7e02017-11-17 15:26:30 -080085#else
86static inline bool get_pageblock_skip(struct page *page)
87{
88 return false;
89}
90static inline void clear_pageblock_skip(struct page *page)
91{
92}
93static inline void set_pageblock_skip(struct page *page)
94{
95}
Mel Gormanbb13ffe2012-10-08 16:32:41 -070096#endif /* CONFIG_COMPACTION */
97
Mel Gorman835c1342007-10-16 01:25:47 -070098#endif /* PAGEBLOCK_FLAGS_H */