Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_MEMORY_HOTPLUG_H |
| 3 | #define __LINUX_MEMORY_HOTPLUG_H |
| 4 | |
| 5 | #include <linux/mmzone.h> |
| 6 | #include <linux/spinlock.h> |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 7 | #include <linux/notifier.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 8 | #include <linux/bug.h> |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 9 | |
KAMEZAWA Hiroyuki | 7867930 | 2006-03-06 15:42:49 -0800 | [diff] [blame] | 10 | struct page; |
| 11 | struct zone; |
| 12 | struct pglist_data; |
Badari Pulavarty | ea01ea93 | 2008-04-28 02:12:01 -0700 | [diff] [blame] | 13 | struct mem_section; |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 14 | struct memory_group; |
David Vrabel | 62cedb9 | 2015-06-25 16:35:49 +0100 | [diff] [blame] | 15 | struct resource; |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 16 | struct vmem_altmap; |
Joao Martins | e3246d8 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 17 | struct dev_pagemap; |
KAMEZAWA Hiroyuki | 7867930 | 2006-03-06 15:42:49 -0800 | [diff] [blame] | 18 | |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 19 | #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION |
| 20 | /* |
| 21 | * For supporting node-hotadd, we have to allocate a new pgdat. |
| 22 | * |
| 23 | * If an arch has generic style NODE_DATA(), |
| 24 | * node_data[nid] = kzalloc() works well. But it depends on the architecture. |
| 25 | * |
| 26 | * In general, generic_alloc_nodedata() is used. |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 27 | * |
| 28 | */ |
| 29 | extern pg_data_t *arch_alloc_nodedata(int nid); |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 30 | extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); |
| 31 | |
| 32 | #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ |
| 33 | |
| 34 | #define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 35 | |
| 36 | #ifdef CONFIG_NUMA |
| 37 | /* |
| 38 | * XXX: node aware allocation can't work well to get new node's memory at this time. |
| 39 | * Because, pgdat for the new node is not allocated/initialized yet itself. |
| 40 | * To use new node's memory, more consideration will be necessary. |
| 41 | */ |
| 42 | #define generic_alloc_nodedata(nid) \ |
| 43 | ({ \ |
Michal Hocko | 09f49dc | 2022-03-22 14:46:54 -0700 | [diff] [blame] | 44 | memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 45 | }) |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 46 | |
| 47 | extern pg_data_t *node_data[]; |
| 48 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
| 49 | { |
| 50 | node_data[nid] = pgdat; |
| 51 | } |
| 52 | |
| 53 | #else /* !CONFIG_NUMA */ |
| 54 | |
| 55 | /* never called */ |
| 56 | static inline pg_data_t *generic_alloc_nodedata(int nid) |
| 57 | { |
| 58 | BUG(); |
| 59 | return NULL; |
| 60 | } |
Michal Hocko | e930d99 | 2022-03-22 14:46:51 -0700 | [diff] [blame] | 61 | static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) |
| 62 | { |
| 63 | } |
| 64 | #endif /* CONFIG_NUMA */ |
| 65 | #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ |
| 66 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 67 | #ifdef CONFIG_MEMORY_HOTPLUG |
Dan Williams | 9f605f2 | 2021-02-25 17:16:57 -0800 | [diff] [blame] | 68 | struct page *pfn_to_online_page(unsigned long pfn); |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 69 | |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 70 | /* Types for control the zone type of onlined and offlined memory */ |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 71 | enum { |
David Hildenbrand | 956f8b4 | 2020-04-06 20:07:16 -0700 | [diff] [blame] | 72 | /* Offline the memory. */ |
David Hildenbrand | efc978a | 2020-04-06 20:07:20 -0700 | [diff] [blame] | 73 | MMOP_OFFLINE = 0, |
David Hildenbrand | 956f8b4 | 2020-04-06 20:07:16 -0700 | [diff] [blame] | 74 | /* Online the memory. Zone depends, see default_zone_for_pfn(). */ |
| 75 | MMOP_ONLINE, |
| 76 | /* Online the memory to ZONE_NORMAL. */ |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 77 | MMOP_ONLINE_KERNEL, |
David Hildenbrand | 956f8b4 | 2020-04-06 20:07:16 -0700 | [diff] [blame] | 78 | /* Online the memory to ZONE_MOVABLE. */ |
Tang Chen | 4f7c6b4 | 2014-08-06 16:05:13 -0700 | [diff] [blame] | 79 | MMOP_ONLINE_MOVABLE, |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 80 | }; |
| 81 | |
David Hildenbrand | b611719 | 2020-10-15 20:08:44 -0700 | [diff] [blame] | 82 | /* Flags for add_memory() and friends to specify memory hotplug details. */ |
| 83 | typedef int __bitwise mhp_t; |
| 84 | |
| 85 | /* No special request */ |
| 86 | #define MHP_NONE ((__force mhp_t)0) |
David Hildenbrand | 9ca6551 | 2020-10-15 20:08:49 -0700 | [diff] [blame] | 87 | /* |
| 88 | * Allow merging of the added System RAM resource with adjacent, |
| 89 | * mergeable resources. After a successful call to add_memory_resource() |
| 90 | * with this flag set, the resource pointer must no longer be used as it |
| 91 | * might be stale, or the resource might have changed. |
| 92 | */ |
David Hildenbrand | 2601126 | 2021-02-25 17:17:17 -0800 | [diff] [blame] | 93 | #define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0)) |
David Hildenbrand | b611719 | 2020-10-15 20:08:44 -0700 | [diff] [blame] | 94 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 95 | /* |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 96 | * We want memmap (struct page array) to be self contained. |
| 97 | * To do so, we will use the beginning of the hot-added range to build |
| 98 | * the page tables for the memmap array that describes the entire range. |
| 99 | * Only selected architectures support it with SPARSE_VMEMMAP. |
| 100 | */ |
| 101 | #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) |
David Hildenbrand | 028fc57 | 2021-09-07 19:55:26 -0700 | [diff] [blame] | 102 | /* |
| 103 | * The nid field specifies a memory group id (mgid) instead. The memory group |
| 104 | * implies the node id (nid). |
| 105 | */ |
| 106 | #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 107 | |
| 108 | /* |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 109 | * Extended parameters for memory hotplug: |
| 110 | * altmap: alternative allocator for memmap array (optional) |
Logan Gunthorpe | bfeb022 | 2020-04-10 14:33:36 -0700 | [diff] [blame] | 111 | * pgprot: page protection flags to apply to newly created page tables |
| 112 | * (required) |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 113 | */ |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 114 | struct mhp_params { |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 115 | struct vmem_altmap *altmap; |
Logan Gunthorpe | bfeb022 | 2020-04-10 14:33:36 -0700 | [diff] [blame] | 116 | pgprot_t pgprot; |
Joao Martins | e3246d8 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 117 | struct dev_pagemap *pgmap; |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 118 | }; |
| 119 | |
Anshuman Khandual | bca3fea | 2021-02-25 17:17:33 -0800 | [diff] [blame] | 120 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping); |
| 121 | struct range mhp_get_pluggable_range(bool need_mapping); |
| 122 | |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 123 | /* |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 124 | * Zone resizing functions |
Lai Jiangshan | 511c2ab | 2012-12-11 16:03:16 -0800 | [diff] [blame] | 125 | * |
| 126 | * Note: any attempt to resize a zone should has pgdat_resize_lock() |
| 127 | * zone_span_writelock() both held. This ensure the size of a zone |
| 128 | * can't be changed while pgdat_resize_lock() held. |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 129 | */ |
| 130 | static inline unsigned zone_span_seqbegin(struct zone *zone) |
| 131 | { |
| 132 | return read_seqbegin(&zone->span_seqlock); |
| 133 | } |
| 134 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) |
| 135 | { |
| 136 | return read_seqretry(&zone->span_seqlock, iv); |
| 137 | } |
| 138 | static inline void zone_span_writelock(struct zone *zone) |
| 139 | { |
| 140 | write_seqlock(&zone->span_seqlock); |
| 141 | } |
| 142 | static inline void zone_span_writeunlock(struct zone *zone) |
| 143 | { |
| 144 | write_sequnlock(&zone->span_seqlock); |
| 145 | } |
| 146 | static inline void zone_seqlock_init(struct zone *zone) |
| 147 | { |
| 148 | seqlock_init(&zone->span_seqlock); |
| 149 | } |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 150 | extern void adjust_present_page_count(struct page *page, |
| 151 | struct memory_group *group, |
| 152 | long nr_pages); |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 153 | /* VM interface that may be used by firmware interface */ |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 154 | extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, |
| 155 | struct zone *zone); |
| 156 | extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); |
David Hildenbrand | bd5c2344 | 2020-01-30 22:14:54 -0800 | [diff] [blame] | 157 | extern int online_pages(unsigned long pfn, unsigned long nr_pages, |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 158 | struct zone *zone, struct memory_group *group); |
David Hildenbrand | 257bea7 | 2020-10-15 20:07:59 -0700 | [diff] [blame] | 159 | extern void __offline_isolated_pages(unsigned long start_pfn, |
| 160 | unsigned long end_pfn); |
KAMEZAWA Hiroyuki | 48e9419 | 2007-10-16 01:26:14 -0700 | [diff] [blame] | 161 | |
Arun KS | a9cd410 | 2019-03-05 15:42:14 -0800 | [diff] [blame] | 162 | typedef void (*online_page_callback_t)(struct page *page, unsigned int order); |
Daniel Kiper | 9d0ad8c | 2011-07-25 17:12:05 -0700 | [diff] [blame] | 163 | |
David Hildenbrand | 18db149 | 2019-11-30 17:53:51 -0800 | [diff] [blame] | 164 | extern void generic_online_page(struct page *page, unsigned int order); |
Daniel Kiper | 9d0ad8c | 2011-07-25 17:12:05 -0700 | [diff] [blame] | 165 | extern int set_online_page_callback(online_page_callback_t callback); |
| 166 | extern int restore_online_page_callback(online_page_callback_t callback); |
| 167 | |
Toshi Kani | 01b0f19 | 2013-11-12 15:07:25 -0800 | [diff] [blame] | 168 | extern int try_online_node(int nid); |
| 169 | |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 170 | extern int arch_add_memory(int nid, u64 start, u64 size, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 171 | struct mhp_params *params); |
Juergen Gross | 357b4da | 2019-02-14 11:42:39 +0100 | [diff] [blame] | 172 | extern u64 max_mem_size; |
| 173 | |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 174 | extern int mhp_online_type_from_str(const char *str); |
David Hildenbrand | 5f47adf | 2020-04-06 20:07:44 -0700 | [diff] [blame] | 175 | |
David Hildenbrand | 862919e | 2020-04-06 20:07:40 -0700 | [diff] [blame] | 176 | /* Default online_type (MMOP_*) when new memory blocks are added. */ |
Anshuman Khandual | 1adf8b4 | 2021-02-25 17:17:13 -0800 | [diff] [blame] | 177 | extern int mhp_default_online_type; |
Michal Hocko | 4932381 | 2017-07-06 15:41:05 -0700 | [diff] [blame] | 178 | /* If movable_node boot option specified */ |
| 179 | extern bool movable_node_enabled; |
| 180 | static inline bool movable_node_is_enabled(void) |
| 181 | { |
| 182 | return movable_node_enabled; |
| 183 | } |
Vitaly Kuznetsov | 31bc385 | 2016-03-15 14:56:48 -0700 | [diff] [blame] | 184 | |
David Hildenbrand | 65a2aa5 | 2021-09-07 19:55:04 -0700 | [diff] [blame] | 185 | extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); |
David Hildenbrand | feee6b2 | 2020-01-04 12:59:33 -0800 | [diff] [blame] | 186 | extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, |
| 187 | struct vmem_altmap *altmap); |
KAMEZAWA Hiroyuki | 49ac825 | 2010-10-26 14:21:30 -0700 | [diff] [blame] | 188 | |
Michal Hocko | f1dd2cd1 | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 189 | /* reasonably generic interface to expand the physical pages */ |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 190 | extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 191 | struct mhp_params *params); |
Yasunori Goto | bc02af9 | 2006-06-27 02:53:30 -0700 | [diff] [blame] | 192 | |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 193 | #ifndef CONFIG_ARCH_HAS_ADD_PAGES |
| 194 | static inline int add_pages(int nid, unsigned long start_pfn, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 195 | unsigned long nr_pages, struct mhp_params *params) |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 196 | { |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 197 | return __add_pages(nid, start_pfn, nr_pages, params); |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 198 | } |
| 199 | #else /* ARCH_HAS_ADD_PAGES */ |
Christoph Hellwig | 24e6d5a | 2017-12-29 08:53:53 +0100 | [diff] [blame] | 200 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 201 | struct mhp_params *params); |
Michal Hocko | 3072e41 | 2017-09-08 16:11:39 -0700 | [diff] [blame] | 202 | #endif /* ARCH_HAS_ADD_PAGES */ |
| 203 | |
Vladimir Davydov | bfc8c90 | 2014-06-04 16:07:18 -0700 | [diff] [blame] | 204 | void get_online_mems(void); |
| 205 | void put_online_mems(void); |
KOSAKI Motohiro | 20d6c96 | 2010-12-02 14:31:19 -0800 | [diff] [blame] | 206 | |
David Rientjes | 30467e0 | 2015-04-14 15:45:11 -0700 | [diff] [blame] | 207 | void mem_hotplug_begin(void); |
| 208 | void mem_hotplug_done(void); |
| 209 | |
Kefeng Wang | b4a0215 | 2022-08-27 19:19:59 +0800 | [diff] [blame] | 210 | /* See kswapd_is_running() */ |
| 211 | static inline void pgdat_kswapd_lock(pg_data_t *pgdat) |
| 212 | { |
| 213 | mutex_lock(&pgdat->kswapd_lock); |
| 214 | } |
| 215 | |
| 216 | static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) |
| 217 | { |
| 218 | mutex_unlock(&pgdat->kswapd_lock); |
| 219 | } |
| 220 | |
| 221 | static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) |
| 222 | { |
| 223 | mutex_init(&pgdat->kswapd_lock); |
| 224 | } |
| 225 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 226 | #else /* ! CONFIG_MEMORY_HOTPLUG */ |
Michal Hocko | 2d070ea | 2017-07-06 15:37:56 -0700 | [diff] [blame] | 227 | #define pfn_to_online_page(pfn) \ |
| 228 | ({ \ |
| 229 | struct page *___page = NULL; \ |
| 230 | if (pfn_valid(pfn)) \ |
| 231 | ___page = pfn_to_page(pfn); \ |
| 232 | ___page; \ |
| 233 | }) |
| 234 | |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 235 | static inline unsigned zone_span_seqbegin(struct zone *zone) |
| 236 | { |
| 237 | return 0; |
| 238 | } |
| 239 | static inline int zone_span_seqretry(struct zone *zone, unsigned iv) |
| 240 | { |
| 241 | return 0; |
| 242 | } |
| 243 | static inline void zone_span_writelock(struct zone *zone) {} |
| 244 | static inline void zone_span_writeunlock(struct zone *zone) {} |
| 245 | static inline void zone_seqlock_init(struct zone *zone) {} |
Dave Hansen | 3947be1 | 2005-10-29 18:16:54 -0700 | [diff] [blame] | 246 | |
Toshi Kani | 01b0f19 | 2013-11-12 15:07:25 -0800 | [diff] [blame] | 247 | static inline int try_online_node(int nid) |
| 248 | { |
| 249 | return 0; |
| 250 | } |
| 251 | |
Vladimir Davydov | bfc8c90 | 2014-06-04 16:07:18 -0700 | [diff] [blame] | 252 | static inline void get_online_mems(void) {} |
| 253 | static inline void put_online_mems(void) {} |
KOSAKI Motohiro | 20d6c96 | 2010-12-02 14:31:19 -0800 | [diff] [blame] | 254 | |
David Rientjes | 30467e0 | 2015-04-14 15:45:11 -0700 | [diff] [blame] | 255 | static inline void mem_hotplug_begin(void) {} |
| 256 | static inline void mem_hotplug_done(void) {} |
| 257 | |
Michal Hocko | 4932381 | 2017-07-06 15:41:05 -0700 | [diff] [blame] | 258 | static inline bool movable_node_is_enabled(void) |
| 259 | { |
| 260 | return false; |
| 261 | } |
Kefeng Wang | b4a0215 | 2022-08-27 19:19:59 +0800 | [diff] [blame] | 262 | |
| 263 | static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} |
| 264 | static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} |
| 265 | static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} |
Dave Hansen | bdc8cb9 | 2005-10-29 18:16:53 -0700 | [diff] [blame] | 266 | #endif /* ! CONFIG_MEMORY_HOTPLUG */ |
Andi Kleen | 9d99aaa | 2006-04-07 19:49:15 +0200 | [diff] [blame] | 267 | |
Anshuman Khandual | bca3fea | 2021-02-25 17:17:33 -0800 | [diff] [blame] | 268 | /* |
| 269 | * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some |
| 270 | * platforms might override and use arch_get_mappable_range() |
| 271 | * for internal non memory hotplug purposes. |
| 272 | */ |
| 273 | struct range arch_get_mappable_range(void); |
| 274 | |
Pavel Tatashin | 3a2d7fa | 2018-04-05 16:22:27 -0700 | [diff] [blame] | 275 | #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) |
| 276 | /* |
| 277 | * pgdat resizing functions |
| 278 | */ |
| 279 | static inline |
| 280 | void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) |
| 281 | { |
| 282 | spin_lock_irqsave(&pgdat->node_size_lock, *flags); |
| 283 | } |
| 284 | static inline |
| 285 | void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) |
| 286 | { |
| 287 | spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); |
| 288 | } |
| 289 | static inline |
| 290 | void pgdat_resize_init(struct pglist_data *pgdat) |
| 291 | { |
| 292 | spin_lock_init(&pgdat->node_size_lock); |
| 293 | } |
| 294 | #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ |
| 295 | /* |
| 296 | * Stub functions for when hotplug is off |
| 297 | */ |
| 298 | static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} |
| 299 | static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} |
| 300 | static inline void pgdat_resize_init(struct pglist_data *pgdat) {} |
| 301 | #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ |
| 302 | |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 303 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 304 | |
Wen Congyang | 90b30cd | 2013-02-22 16:33:27 -0800 | [diff] [blame] | 305 | extern void try_offline_node(int nid); |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 306 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 307 | struct zone *zone, struct memory_group *group); |
David Hildenbrand | e1c158e | 2021-09-07 19:55:09 -0700 | [diff] [blame] | 308 | extern int remove_memory(u64 start, u64 size); |
| 309 | extern void __remove_memory(u64 start, u64 size); |
| 310 | extern int offline_and_remove_memory(u64 start, u64 size); |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 311 | |
| 312 | #else |
Wen Congyang | 90b30cd | 2013-02-22 16:33:27 -0800 | [diff] [blame] | 313 | static inline void try_offline_node(int nid) {} |
Rafael J. Wysocki | aba6efc | 2013-06-01 22:24:07 +0200 | [diff] [blame] | 314 | |
David Hildenbrand | 836809e | 2021-09-07 19:55:30 -0700 | [diff] [blame] | 315 | static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, |
David Hildenbrand | 395f608 | 2022-03-22 14:47:31 -0700 | [diff] [blame] | 316 | struct zone *zone, struct memory_group *group) |
Rafael J. Wysocki | aba6efc | 2013-06-01 22:24:07 +0200 | [diff] [blame] | 317 | { |
| 318 | return -EINVAL; |
| 319 | } |
| 320 | |
David Hildenbrand | e1c158e | 2021-09-07 19:55:09 -0700 | [diff] [blame] | 321 | static inline int remove_memory(u64 start, u64 size) |
Pavel Tatashin | eca499a | 2019-07-16 16:30:31 -0700 | [diff] [blame] | 322 | { |
| 323 | return -EBUSY; |
| 324 | } |
| 325 | |
David Hildenbrand | e1c158e | 2021-09-07 19:55:09 -0700 | [diff] [blame] | 326 | static inline void __remove_memory(u64 start, u64 size) {} |
Badari Pulavarty | 5c755e9 | 2008-07-23 21:28:19 -0700 | [diff] [blame] | 327 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
| 328 | |
Ben Dooks (Codethink) | aba9817 | 2019-11-30 17:54:10 -0800 | [diff] [blame] | 329 | extern void set_zone_contiguous(struct zone *zone); |
| 330 | extern void clear_zone_contiguous(struct zone *zone); |
| 331 | |
David Hildenbrand | 3a0aaef | 2020-10-15 20:08:39 -0700 | [diff] [blame] | 332 | #ifdef CONFIG_MEMORY_HOTPLUG |
Michal Hocko | 70b5b46 | 2022-03-22 14:47:00 -0700 | [diff] [blame] | 333 | extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); |
David Hildenbrand | b611719 | 2020-10-15 20:08:44 -0700 | [diff] [blame] | 334 | extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); |
| 335 | extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); |
| 336 | extern int add_memory_resource(int nid, struct resource *resource, |
| 337 | mhp_t mhp_flags); |
David Hildenbrand | 7b7b272 | 2020-06-04 16:48:41 -0700 | [diff] [blame] | 338 | extern int add_memory_driver_managed(int nid, u64 start, u64 size, |
David Hildenbrand | b611719 | 2020-10-15 20:08:44 -0700 | [diff] [blame] | 339 | const char *resource_name, |
| 340 | mhp_t mhp_flags); |
Michal Hocko | f1dd2cd1 | 2017-07-06 15:38:11 -0700 | [diff] [blame] | 341 | extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
David Hildenbrand | d882c00 | 2020-10-15 20:08:19 -0700 | [diff] [blame] | 342 | unsigned long nr_pages, |
| 343 | struct vmem_altmap *altmap, int migratetype); |
David Hildenbrand | feee6b2 | 2020-01-04 12:59:33 -0800 | [diff] [blame] | 344 | extern void remove_pfn_range_from_zone(struct zone *zone, |
| 345 | unsigned long start_pfn, |
| 346 | unsigned long nr_pages); |
Dan Williams | 7ea6216 | 2019-07-18 15:58:22 -0700 | [diff] [blame] | 347 | extern int sparse_add_section(int nid, unsigned long pfn, |
Joao Martins | e3246d8 | 2022-04-28 23:16:15 -0700 | [diff] [blame] | 348 | unsigned long nr_pages, struct vmem_altmap *altmap, |
| 349 | struct dev_pagemap *pgmap); |
Dan Williams | ba72b4c | 2019-07-18 15:58:26 -0700 | [diff] [blame] | 350 | extern void sparse_remove_section(struct mem_section *ms, |
Dan Williams | 7ea6216 | 2019-07-18 15:58:22 -0700 | [diff] [blame] | 351 | unsigned long pfn, unsigned long nr_pages, |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 352 | unsigned long map_offset, struct vmem_altmap *altmap); |
Yasunori Goto | 0475327 | 2008-04-28 02:13:31 -0700 | [diff] [blame] | 353 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
| 354 | unsigned long pnum); |
David Hildenbrand | 7cf209b | 2021-09-07 19:54:59 -0700 | [diff] [blame] | 355 | extern struct zone *zone_for_pfn_range(int online_type, int nid, |
David Hildenbrand | 445fcf7 | 2021-09-07 19:55:45 -0700 | [diff] [blame] | 356 | struct memory_group *group, unsigned long start_pfn, |
| 357 | unsigned long nr_pages); |
David Hildenbrand | 4abb1e5 | 2020-11-11 15:53:17 +0100 | [diff] [blame] | 358 | extern int arch_create_linear_mapping(int nid, u64 start, u64 size, |
| 359 | struct mhp_params *params); |
| 360 | void arch_remove_linear_mapping(u64 start, u64 size); |
Oscar Salvador | a08a2ae | 2021-05-04 18:39:42 -0700 | [diff] [blame] | 361 | extern bool mhp_supports_memmap_on_memory(unsigned long size); |
David Hildenbrand | 3a0aaef | 2020-10-15 20:08:39 -0700 | [diff] [blame] | 362 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 363 | |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 364 | #endif /* __LINUX_MEMORY_HOTPLUG_H */ |