blob: 9fcbf57065957244d59e6c52a1f4f906fd48d7bd [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Dave Hansen208d54e2005-10-29 18:16:52 -07002#ifndef __LINUX_MEMORY_HOTPLUG_H
3#define __LINUX_MEMORY_HOTPLUG_H
4
5#include <linux/mmzone.h>
6#include <linux/spinlock.h>
Dave Hansen3947be12005-10-29 18:16:54 -07007#include <linux/notifier.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -05008#include <linux/bug.h>
Dave Hansen208d54e2005-10-29 18:16:52 -07009
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080010struct page;
11struct zone;
12struct pglist_data;
Badari Pulavartyea01ea932008-04-28 02:12:01 -070013struct mem_section;
David Hildenbrand836809e2021-09-07 19:55:30 -070014struct memory_group;
David Vrabel62cedb92015-06-25 16:35:49 +010015struct resource;
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +010016struct vmem_altmap;
Joao Martinse3246d82022-04-28 23:16:15 -070017struct dev_pagemap;
KAMEZAWA Hiroyuki78679302006-03-06 15:42:49 -080018
Michal Hockoe930d992022-03-22 14:46:51 -070019#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
20/*
21 * For supporting node-hotadd, we have to allocate a new pgdat.
22 *
23 * If an arch has generic style NODE_DATA(),
24 * node_data[nid] = kzalloc() works well. But it depends on the architecture.
25 *
26 * In general, generic_alloc_nodedata() is used.
Michal Hockoe930d992022-03-22 14:46:51 -070027 *
28 */
29extern pg_data_t *arch_alloc_nodedata(int nid);
Michal Hockoe930d992022-03-22 14:46:51 -070030extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
31
32#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
33
34#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
Michal Hockoe930d992022-03-22 14:46:51 -070035
36#ifdef CONFIG_NUMA
37/*
38 * XXX: node aware allocation can't work well to get new node's memory at this time.
39 * Because, pgdat for the new node is not allocated/initialized yet itself.
40 * To use new node's memory, more consideration will be necessary.
41 */
42#define generic_alloc_nodedata(nid) \
43({ \
Michal Hocko09f49dc2022-03-22 14:46:54 -070044 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
Michal Hockoe930d992022-03-22 14:46:51 -070045})
Michal Hockoe930d992022-03-22 14:46:51 -070046
47extern pg_data_t *node_data[];
48static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
49{
50 node_data[nid] = pgdat;
51}
52
53#else /* !CONFIG_NUMA */
54
55/* never called */
56static inline pg_data_t *generic_alloc_nodedata(int nid)
57{
58 BUG();
59 return NULL;
60}
Michal Hockoe930d992022-03-22 14:46:51 -070061static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
62{
63}
64#endif /* CONFIG_NUMA */
65#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
66
Dave Hansen208d54e2005-10-29 18:16:52 -070067#ifdef CONFIG_MEMORY_HOTPLUG
Dan Williams9f605f22021-02-25 17:16:57 -080068struct page *pfn_to_online_page(unsigned long pfn);
Yasunori Goto04753272008-04-28 02:13:31 -070069
Tang Chen4f7c6b42014-08-06 16:05:13 -070070/* Types for control the zone type of onlined and offlined memory */
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080071enum {
David Hildenbrand956f8b42020-04-06 20:07:16 -070072 /* Offline the memory. */
David Hildenbrandefc978a2020-04-06 20:07:20 -070073 MMOP_OFFLINE = 0,
David Hildenbrand956f8b42020-04-06 20:07:16 -070074 /* Online the memory. Zone depends, see default_zone_for_pfn(). */
75 MMOP_ONLINE,
76 /* Online the memory to ZONE_NORMAL. */
Tang Chen4f7c6b42014-08-06 16:05:13 -070077 MMOP_ONLINE_KERNEL,
David Hildenbrand956f8b42020-04-06 20:07:16 -070078 /* Online the memory to ZONE_MOVABLE. */
Tang Chen4f7c6b42014-08-06 16:05:13 -070079 MMOP_ONLINE_MOVABLE,
Lai Jiangshan511c2ab2012-12-11 16:03:16 -080080};
81
David Hildenbrandb6117192020-10-15 20:08:44 -070082/* Flags for add_memory() and friends to specify memory hotplug details. */
83typedef int __bitwise mhp_t;
84
85/* No special request */
86#define MHP_NONE ((__force mhp_t)0)
David Hildenbrand9ca65512020-10-15 20:08:49 -070087/*
88 * Allow merging of the added System RAM resource with adjacent,
89 * mergeable resources. After a successful call to add_memory_resource()
90 * with this flag set, the resource pointer must no longer be used as it
91 * might be stale, or the resource might have changed.
92 */
David Hildenbrand26011262021-02-25 17:17:17 -080093#define MHP_MERGE_RESOURCE ((__force mhp_t)BIT(0))
David Hildenbrandb6117192020-10-15 20:08:44 -070094
Dave Hansen208d54e2005-10-29 18:16:52 -070095/*
Oscar Salvadora08a2ae2021-05-04 18:39:42 -070096 * We want memmap (struct page array) to be self contained.
97 * To do so, we will use the beginning of the hot-added range to build
98 * the page tables for the memmap array that describes the entire range.
99 * Only selected architectures support it with SPARSE_VMEMMAP.
100 */
101#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
David Hildenbrand028fc572021-09-07 19:55:26 -0700102/*
103 * The nid field specifies a memory group id (mgid) instead. The memory group
104 * implies the node id (nid).
105 */
106#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2))
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700107
108/*
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700109 * Extended parameters for memory hotplug:
110 * altmap: alternative allocator for memmap array (optional)
Logan Gunthorpebfeb0222020-04-10 14:33:36 -0700111 * pgprot: page protection flags to apply to newly created page tables
112 * (required)
Michal Hocko940519f2019-05-13 17:21:26 -0700113 */
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700114struct mhp_params {
Michal Hocko940519f2019-05-13 17:21:26 -0700115 struct vmem_altmap *altmap;
Logan Gunthorpebfeb0222020-04-10 14:33:36 -0700116 pgprot_t pgprot;
Joao Martinse3246d82022-04-28 23:16:15 -0700117 struct dev_pagemap *pgmap;
Michal Hocko940519f2019-05-13 17:21:26 -0700118};
119
Anshuman Khandualbca3fea2021-02-25 17:17:33 -0800120bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
121struct range mhp_get_pluggable_range(bool need_mapping);
122
Michal Hocko940519f2019-05-13 17:21:26 -0700123/*
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700124 * Zone resizing functions
Lai Jiangshan511c2ab2012-12-11 16:03:16 -0800125 *
126 * Note: any attempt to resize a zone should has pgdat_resize_lock()
127 * zone_span_writelock() both held. This ensure the size of a zone
128 * can't be changed while pgdat_resize_lock() held.
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700129 */
130static inline unsigned zone_span_seqbegin(struct zone *zone)
131{
132 return read_seqbegin(&zone->span_seqlock);
133}
134static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
135{
136 return read_seqretry(&zone->span_seqlock, iv);
137}
138static inline void zone_span_writelock(struct zone *zone)
139{
140 write_seqlock(&zone->span_seqlock);
141}
142static inline void zone_span_writeunlock(struct zone *zone)
143{
144 write_sequnlock(&zone->span_seqlock);
145}
146static inline void zone_seqlock_init(struct zone *zone)
147{
148 seqlock_init(&zone->span_seqlock);
149}
David Hildenbrand836809e2021-09-07 19:55:30 -0700150extern void adjust_present_page_count(struct page *page,
151 struct memory_group *group,
152 long nr_pages);
Dave Hansen3947be12005-10-29 18:16:54 -0700153/* VM interface that may be used by firmware interface */
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700154extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
155 struct zone *zone);
156extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
David Hildenbrandbd5c23442020-01-30 22:14:54 -0800157extern int online_pages(unsigned long pfn, unsigned long nr_pages,
David Hildenbrand836809e2021-09-07 19:55:30 -0700158 struct zone *zone, struct memory_group *group);
David Hildenbrand257bea72020-10-15 20:07:59 -0700159extern void __offline_isolated_pages(unsigned long start_pfn,
160 unsigned long end_pfn);
KAMEZAWA Hiroyuki48e94192007-10-16 01:26:14 -0700161
Arun KSa9cd4102019-03-05 15:42:14 -0800162typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700163
David Hildenbrand18db1492019-11-30 17:53:51 -0800164extern void generic_online_page(struct page *page, unsigned int order);
Daniel Kiper9d0ad8c2011-07-25 17:12:05 -0700165extern int set_online_page_callback(online_page_callback_t callback);
166extern int restore_online_page_callback(online_page_callback_t callback);
167
Toshi Kani01b0f192013-11-12 15:07:25 -0800168extern int try_online_node(int nid);
169
Michal Hocko940519f2019-05-13 17:21:26 -0700170extern int arch_add_memory(int nid, u64 start, u64 size,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700171 struct mhp_params *params);
Juergen Gross357b4da2019-02-14 11:42:39 +0100172extern u64 max_mem_size;
173
Anshuman Khandual1adf8b42021-02-25 17:17:13 -0800174extern int mhp_online_type_from_str(const char *str);
David Hildenbrand5f47adf2020-04-06 20:07:44 -0700175
David Hildenbrand862919e2020-04-06 20:07:40 -0700176/* Default online_type (MMOP_*) when new memory blocks are added. */
Anshuman Khandual1adf8b42021-02-25 17:17:13 -0800177extern int mhp_default_online_type;
Michal Hocko49323812017-07-06 15:41:05 -0700178/* If movable_node boot option specified */
179extern bool movable_node_enabled;
180static inline bool movable_node_is_enabled(void)
181{
182 return movable_node_enabled;
183}
Vitaly Kuznetsov31bc3852016-03-15 14:56:48 -0700184
David Hildenbrand65a2aa52021-09-07 19:55:04 -0700185extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
David Hildenbrandfeee6b22020-01-04 12:59:33 -0800186extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
187 struct vmem_altmap *altmap);
KAMEZAWA Hiroyuki49ac8252010-10-26 14:21:30 -0700188
Michal Hockof1dd2cd12017-07-06 15:38:11 -0700189/* reasonably generic interface to expand the physical pages */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100190extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700191 struct mhp_params *params);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700192
Michal Hocko3072e412017-09-08 16:11:39 -0700193#ifndef CONFIG_ARCH_HAS_ADD_PAGES
194static inline int add_pages(int nid, unsigned long start_pfn,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700195 unsigned long nr_pages, struct mhp_params *params)
Michal Hocko3072e412017-09-08 16:11:39 -0700196{
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700197 return __add_pages(nid, start_pfn, nr_pages, params);
Michal Hocko3072e412017-09-08 16:11:39 -0700198}
199#else /* ARCH_HAS_ADD_PAGES */
Christoph Hellwig24e6d5a2017-12-29 08:53:53 +0100200int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
Logan Gunthorpef5637d32020-04-10 14:33:21 -0700201 struct mhp_params *params);
Michal Hocko3072e412017-09-08 16:11:39 -0700202#endif /* ARCH_HAS_ADD_PAGES */
203
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700204void get_online_mems(void);
205void put_online_mems(void);
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800206
David Rientjes30467e02015-04-14 15:45:11 -0700207void mem_hotplug_begin(void);
208void mem_hotplug_done(void);
209
Kefeng Wangb4a02152022-08-27 19:19:59 +0800210/* See kswapd_is_running() */
211static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
212{
213 mutex_lock(&pgdat->kswapd_lock);
214}
215
216static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
217{
218 mutex_unlock(&pgdat->kswapd_lock);
219}
220
221static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
222{
223 mutex_init(&pgdat->kswapd_lock);
224}
225
Dave Hansen208d54e2005-10-29 18:16:52 -0700226#else /* ! CONFIG_MEMORY_HOTPLUG */
Michal Hocko2d070ea2017-07-06 15:37:56 -0700227#define pfn_to_online_page(pfn) \
228({ \
229 struct page *___page = NULL; \
230 if (pfn_valid(pfn)) \
231 ___page = pfn_to_page(pfn); \
232 ___page; \
233 })
234
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700235static inline unsigned zone_span_seqbegin(struct zone *zone)
236{
237 return 0;
238}
239static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
240{
241 return 0;
242}
243static inline void zone_span_writelock(struct zone *zone) {}
244static inline void zone_span_writeunlock(struct zone *zone) {}
245static inline void zone_seqlock_init(struct zone *zone) {}
Dave Hansen3947be12005-10-29 18:16:54 -0700246
Toshi Kani01b0f192013-11-12 15:07:25 -0800247static inline int try_online_node(int nid)
248{
249 return 0;
250}
251
Vladimir Davydovbfc8c902014-06-04 16:07:18 -0700252static inline void get_online_mems(void) {}
253static inline void put_online_mems(void) {}
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -0800254
David Rientjes30467e02015-04-14 15:45:11 -0700255static inline void mem_hotplug_begin(void) {}
256static inline void mem_hotplug_done(void) {}
257
Michal Hocko49323812017-07-06 15:41:05 -0700258static inline bool movable_node_is_enabled(void)
259{
260 return false;
261}
Kefeng Wangb4a02152022-08-27 19:19:59 +0800262
263static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
264static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
265static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
Dave Hansenbdc8cb92005-10-29 18:16:53 -0700266#endif /* ! CONFIG_MEMORY_HOTPLUG */
Andi Kleen9d99aaa2006-04-07 19:49:15 +0200267
Anshuman Khandualbca3fea2021-02-25 17:17:33 -0800268/*
269 * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
270 * platforms might override and use arch_get_mappable_range()
271 * for internal non memory hotplug purposes.
272 */
273struct range arch_get_mappable_range(void);
274
Pavel Tatashin3a2d7fa2018-04-05 16:22:27 -0700275#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
276/*
277 * pgdat resizing functions
278 */
279static inline
280void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
281{
282 spin_lock_irqsave(&pgdat->node_size_lock, *flags);
283}
284static inline
285void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
286{
287 spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
288}
289static inline
290void pgdat_resize_init(struct pglist_data *pgdat)
291{
292 spin_lock_init(&pgdat->node_size_lock);
293}
294#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
295/*
296 * Stub functions for when hotplug is off
297 */
298static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
299static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
300static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
301#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
302
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700303#ifdef CONFIG_MEMORY_HOTREMOVE
304
Wen Congyang90b30cd2013-02-22 16:33:27 -0800305extern void try_offline_node(int nid);
David Hildenbrand836809e2021-09-07 19:55:30 -0700306extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
David Hildenbrand395f6082022-03-22 14:47:31 -0700307 struct zone *zone, struct memory_group *group);
David Hildenbrande1c158e2021-09-07 19:55:09 -0700308extern int remove_memory(u64 start, u64 size);
309extern void __remove_memory(u64 start, u64 size);
310extern int offline_and_remove_memory(u64 start, u64 size);
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700311
312#else
Wen Congyang90b30cd2013-02-22 16:33:27 -0800313static inline void try_offline_node(int nid) {}
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200314
David Hildenbrand836809e2021-09-07 19:55:30 -0700315static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
David Hildenbrand395f6082022-03-22 14:47:31 -0700316 struct zone *zone, struct memory_group *group)
Rafael J. Wysockiaba6efc2013-06-01 22:24:07 +0200317{
318 return -EINVAL;
319}
320
David Hildenbrande1c158e2021-09-07 19:55:09 -0700321static inline int remove_memory(u64 start, u64 size)
Pavel Tatashineca499a2019-07-16 16:30:31 -0700322{
323 return -EBUSY;
324}
325
David Hildenbrande1c158e2021-09-07 19:55:09 -0700326static inline void __remove_memory(u64 start, u64 size) {}
Badari Pulavarty5c755e92008-07-23 21:28:19 -0700327#endif /* CONFIG_MEMORY_HOTREMOVE */
328
Ben Dooks (Codethink)aba98172019-11-30 17:54:10 -0800329extern void set_zone_contiguous(struct zone *zone);
330extern void clear_zone_contiguous(struct zone *zone);
331
David Hildenbrand3a0aaef2020-10-15 20:08:39 -0700332#ifdef CONFIG_MEMORY_HOTPLUG
Michal Hocko70b5b462022-03-22 14:47:00 -0700333extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
David Hildenbrandb6117192020-10-15 20:08:44 -0700334extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
335extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
336extern int add_memory_resource(int nid, struct resource *resource,
337 mhp_t mhp_flags);
David Hildenbrand7b7b2722020-06-04 16:48:41 -0700338extern int add_memory_driver_managed(int nid, u64 start, u64 size,
David Hildenbrandb6117192020-10-15 20:08:44 -0700339 const char *resource_name,
340 mhp_t mhp_flags);
Michal Hockof1dd2cd12017-07-06 15:38:11 -0700341extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
David Hildenbrandd882c002020-10-15 20:08:19 -0700342 unsigned long nr_pages,
343 struct vmem_altmap *altmap, int migratetype);
David Hildenbrandfeee6b22020-01-04 12:59:33 -0800344extern void remove_pfn_range_from_zone(struct zone *zone,
345 unsigned long start_pfn,
346 unsigned long nr_pages);
Dan Williams7ea62162019-07-18 15:58:22 -0700347extern int sparse_add_section(int nid, unsigned long pfn,
Joao Martinse3246d82022-04-28 23:16:15 -0700348 unsigned long nr_pages, struct vmem_altmap *altmap,
349 struct dev_pagemap *pgmap);
Dan Williamsba72b4c2019-07-18 15:58:26 -0700350extern void sparse_remove_section(struct mem_section *ms,
Dan Williams7ea62162019-07-18 15:58:22 -0700351 unsigned long pfn, unsigned long nr_pages,
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100352 unsigned long map_offset, struct vmem_altmap *altmap);
Yasunori Goto04753272008-04-28 02:13:31 -0700353extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
354 unsigned long pnum);
David Hildenbrand7cf209b2021-09-07 19:54:59 -0700355extern struct zone *zone_for_pfn_range(int online_type, int nid,
David Hildenbrand445fcf72021-09-07 19:55:45 -0700356 struct memory_group *group, unsigned long start_pfn,
357 unsigned long nr_pages);
David Hildenbrand4abb1e52020-11-11 15:53:17 +0100358extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
359 struct mhp_params *params);
360void arch_remove_linear_mapping(u64 start, u64 size);
Oscar Salvadora08a2ae2021-05-04 18:39:42 -0700361extern bool mhp_supports_memmap_on_memory(unsigned long size);
David Hildenbrand3a0aaef2020-10-15 20:08:39 -0700362#endif /* CONFIG_MEMORY_HOTPLUG */
363
Dave Hansen208d54e2005-10-29 18:16:52 -0700364#endif /* __LINUX_MEMORY_HOTPLUG_H */