1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_MEMORY_HOTPLUG_H
3 #define __LINUX_MEMORY_HOTPLUG_H
4 
5 #include <linux/mmzone.h>
6 #include <linux/spinlock.h>
7 #include <linux/notifier.h>
8 #include <linux/bug.h>
9 
10 struct page;
11 struct zone;
12 struct pglist_data;
13 struct mem_section;
14 struct memory_group;
15 struct resource;
16 struct vmem_altmap;
17 struct dev_pagemap;
18 
19 #ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
20 /*
21  * For supporting node-hotadd, we have to allocate a new pgdat.
22  *
23  * If an arch has generic style NODE_DATA(),
24  * node_data[nid] = kzalloc() works well. But it depends on the architecture.
25  *
26  * In general, generic_alloc_nodedata() is used.
27  *
28  */
29 extern pg_data_t *arch_alloc_nodedata(int nid);
30 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
31 
32 #else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
33 
34 #define arch_alloc_nodedata(nid)	generic_alloc_nodedata(nid)
35 
36 #ifdef CONFIG_NUMA
37 /*
38  * XXX: node aware allocation can't work well to get new node's memory at this time.
39  *	Because, pgdat for the new node is not allocated/initialized yet itself.
40  *	To use new node's memory, more consideration will be necessary.
41  */
42 #define generic_alloc_nodedata(nid)				\
43 ({								\
44 	memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES);	\
45 })
46 
47 extern pg_data_t *node_data[];
arch_refresh_nodedata(int nid,pg_data_t * pgdat)48 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
49 {
50 	node_data[nid] = pgdat;
51 }
52 
53 #else /* !CONFIG_NUMA */
54 
55 /* never called */
generic_alloc_nodedata(int nid)56 static inline pg_data_t *generic_alloc_nodedata(int nid)
57 {
58 	BUG();
59 	return NULL;
60 }
arch_refresh_nodedata(int nid,pg_data_t * pgdat)61 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
62 {
63 }
64 #endif /* CONFIG_NUMA */
65 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
66 
67 #ifdef CONFIG_MEMORY_HOTPLUG
68 struct page *pfn_to_online_page(unsigned long pfn);
69 
70 /* Types for control the zone type of onlined and offlined memory */
71 enum {
72 	/* Offline the memory. */
73 	MMOP_OFFLINE = 0,
74 	/* Online the memory. Zone depends, see default_zone_for_pfn(). */
75 	MMOP_ONLINE,
76 	/* Online the memory to ZONE_NORMAL. */
77 	MMOP_ONLINE_KERNEL,
78 	/* Online the memory to ZONE_MOVABLE. */
79 	MMOP_ONLINE_MOVABLE,
80 };
81 
82 /* Flags for add_memory() and friends to specify memory hotplug details. */
83 typedef int __bitwise mhp_t;
84 
85 /* No special request */
86 #define MHP_NONE		((__force mhp_t)0)
87 /*
88  * Allow merging of the added System RAM resource with adjacent,
89  * mergeable resources. After a successful call to add_memory_resource()
90  * with this flag set, the resource pointer must no longer be used as it
91  * might be stale, or the resource might have changed.
92  */
93 #define MHP_MERGE_RESOURCE	((__force mhp_t)BIT(0))
94 
95 /*
96  * We want memmap (struct page array) to be self contained.
97  * To do so, we will use the beginning of the hot-added range to build
98  * the page tables for the memmap array that describes the entire range.
99  * Only selected architectures support it with SPARSE_VMEMMAP.
100  * This is only a hint, the core kernel can decide to not do this based on
101  * different alignment checks.
102  */
103 #define MHP_MEMMAP_ON_MEMORY   ((__force mhp_t)BIT(1))
104 /*
105  * The nid field specifies a memory group id (mgid) instead. The memory group
106  * implies the node id (nid).
107  */
108 #define MHP_NID_IS_MGID		((__force mhp_t)BIT(2))
109 
110 /*
111  * Extended parameters for memory hotplug:
112  * altmap: alternative allocator for memmap array (optional)
113  * pgprot: page protection flags to apply to newly created page tables
114  *	(required)
115  */
116 struct mhp_params {
117 	struct vmem_altmap *altmap;
118 	pgprot_t pgprot;
119 	struct dev_pagemap *pgmap;
120 };
121 
122 bool mhp_range_allowed(u64 start, u64 size, bool need_mapping);
123 struct range mhp_get_pluggable_range(bool need_mapping);
124 
125 /*
126  * Zone resizing functions
127  *
128  * Note: any attempt to resize a zone should has pgdat_resize_lock()
129  * zone_span_writelock() both held. This ensure the size of a zone
130  * can't be changed while pgdat_resize_lock() held.
131  */
zone_span_seqbegin(struct zone * zone)132 static inline unsigned zone_span_seqbegin(struct zone *zone)
133 {
134 	return read_seqbegin(&zone->span_seqlock);
135 }
zone_span_seqretry(struct zone * zone,unsigned iv)136 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
137 {
138 	return read_seqretry(&zone->span_seqlock, iv);
139 }
zone_span_writelock(struct zone * zone)140 static inline void zone_span_writelock(struct zone *zone)
141 {
142 	write_seqlock(&zone->span_seqlock);
143 }
zone_span_writeunlock(struct zone * zone)144 static inline void zone_span_writeunlock(struct zone *zone)
145 {
146 	write_sequnlock(&zone->span_seqlock);
147 }
zone_seqlock_init(struct zone * zone)148 static inline void zone_seqlock_init(struct zone *zone)
149 {
150 	seqlock_init(&zone->span_seqlock);
151 }
152 extern void adjust_present_page_count(struct page *page,
153 				      struct memory_group *group,
154 				      long nr_pages);
155 /* VM interface that may be used by firmware interface */
156 extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
157 				     struct zone *zone);
158 extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages);
159 extern int online_pages(unsigned long pfn, unsigned long nr_pages,
160 			struct zone *zone, struct memory_group *group);
161 extern void __offline_isolated_pages(unsigned long start_pfn,
162 				     unsigned long end_pfn);
163 
164 typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
165 
166 extern void generic_online_page(struct page *page, unsigned int order);
167 extern int set_online_page_callback(online_page_callback_t callback);
168 extern int restore_online_page_callback(online_page_callback_t callback);
169 
170 extern int try_online_node(int nid);
171 
172 extern int arch_add_memory(int nid, u64 start, u64 size,
173 			   struct mhp_params *params);
174 extern u64 max_mem_size;
175 
176 extern int mhp_online_type_from_str(const char *str);
177 
178 /* Default online_type (MMOP_*) when new memory blocks are added. */
179 extern int mhp_default_online_type;
180 /* If movable_node boot option specified */
181 extern bool movable_node_enabled;
movable_node_is_enabled(void)182 static inline bool movable_node_is_enabled(void)
183 {
184 	return movable_node_enabled;
185 }
186 
187 extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap);
188 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
189 			   struct vmem_altmap *altmap);
190 
191 /* reasonably generic interface to expand the physical pages */
192 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
193 		       struct mhp_params *params);
194 
195 #ifndef CONFIG_ARCH_HAS_ADD_PAGES
add_pages(int nid,unsigned long start_pfn,unsigned long nr_pages,struct mhp_params * params)196 static inline int add_pages(int nid, unsigned long start_pfn,
197 		unsigned long nr_pages, struct mhp_params *params)
198 {
199 	return __add_pages(nid, start_pfn, nr_pages, params);
200 }
201 #else /* ARCH_HAS_ADD_PAGES */
202 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
203 	      struct mhp_params *params);
204 #endif /* ARCH_HAS_ADD_PAGES */
205 
206 void get_online_mems(void);
207 void put_online_mems(void);
208 
209 void mem_hotplug_begin(void);
210 void mem_hotplug_done(void);
211 
212 /* See kswapd_is_running() */
pgdat_kswapd_lock(pg_data_t * pgdat)213 static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
214 {
215 	mutex_lock(&pgdat->kswapd_lock);
216 }
217 
pgdat_kswapd_unlock(pg_data_t * pgdat)218 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
219 {
220 	mutex_unlock(&pgdat->kswapd_lock);
221 }
222 
pgdat_kswapd_lock_init(pg_data_t * pgdat)223 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
224 {
225 	mutex_init(&pgdat->kswapd_lock);
226 }
227 
228 #else /* ! CONFIG_MEMORY_HOTPLUG */
229 #define pfn_to_online_page(pfn)			\
230 ({						\
231 	struct page *___page = NULL;		\
232 	if (pfn_valid(pfn))			\
233 		___page = pfn_to_page(pfn);	\
234 	___page;				\
235  })
236 
zone_span_seqbegin(struct zone * zone)237 static inline unsigned zone_span_seqbegin(struct zone *zone)
238 {
239 	return 0;
240 }
zone_span_seqretry(struct zone * zone,unsigned iv)241 static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
242 {
243 	return 0;
244 }
zone_span_writelock(struct zone * zone)245 static inline void zone_span_writelock(struct zone *zone) {}
zone_span_writeunlock(struct zone * zone)246 static inline void zone_span_writeunlock(struct zone *zone) {}
zone_seqlock_init(struct zone * zone)247 static inline void zone_seqlock_init(struct zone *zone) {}
248 
try_online_node(int nid)249 static inline int try_online_node(int nid)
250 {
251 	return 0;
252 }
253 
get_online_mems(void)254 static inline void get_online_mems(void) {}
put_online_mems(void)255 static inline void put_online_mems(void) {}
256 
mem_hotplug_begin(void)257 static inline void mem_hotplug_begin(void) {}
mem_hotplug_done(void)258 static inline void mem_hotplug_done(void) {}
259 
movable_node_is_enabled(void)260 static inline bool movable_node_is_enabled(void)
261 {
262 	return false;
263 }
264 
pgdat_kswapd_lock(pg_data_t * pgdat)265 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
pgdat_kswapd_unlock(pg_data_t * pgdat)266 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
pgdat_kswapd_lock_init(pg_data_t * pgdat)267 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
268 #endif /* ! CONFIG_MEMORY_HOTPLUG */
269 
270 /*
271  * Keep this declaration outside CONFIG_MEMORY_HOTPLUG as some
272  * platforms might override and use arch_get_mappable_range()
273  * for internal non memory hotplug purposes.
274  */
275 struct range arch_get_mappable_range(void);
276 
277 #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
278 /*
279  * pgdat resizing functions
280  */
281 static inline
pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags)282 void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
283 {
284 	spin_lock_irqsave(&pgdat->node_size_lock, *flags);
285 }
286 static inline
pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags)287 void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
288 {
289 	spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
290 }
291 static inline
pgdat_resize_init(struct pglist_data * pgdat)292 void pgdat_resize_init(struct pglist_data *pgdat)
293 {
294 	spin_lock_init(&pgdat->node_size_lock);
295 }
296 #else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
297 /*
298  * Stub functions for when hotplug is off
299  */
pgdat_resize_lock(struct pglist_data * p,unsigned long * f)300 static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_unlock(struct pglist_data * p,unsigned long * f)301 static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
pgdat_resize_init(struct pglist_data * pgdat)302 static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
303 #endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */
304 
305 #ifdef CONFIG_MEMORY_HOTREMOVE
306 
307 extern void try_offline_node(int nid);
308 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
309 			 struct zone *zone, struct memory_group *group);
310 extern int remove_memory(u64 start, u64 size);
311 extern void __remove_memory(u64 start, u64 size);
312 extern int offline_and_remove_memory(u64 start, u64 size);
313 
314 #else
try_offline_node(int nid)315 static inline void try_offline_node(int nid) {}
316 
offline_pages(unsigned long start_pfn,unsigned long nr_pages,struct zone * zone,struct memory_group * group)317 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
318 				struct zone *zone, struct memory_group *group)
319 {
320 	return -EINVAL;
321 }
322 
remove_memory(u64 start,u64 size)323 static inline int remove_memory(u64 start, u64 size)
324 {
325 	return -EBUSY;
326 }
327 
__remove_memory(u64 start,u64 size)328 static inline void __remove_memory(u64 start, u64 size) {}
329 #endif /* CONFIG_MEMORY_HOTREMOVE */
330 
331 #ifdef CONFIG_MEMORY_HOTPLUG
332 extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat);
333 extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
334 extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags);
335 extern int add_memory_resource(int nid, struct resource *resource,
336 			       mhp_t mhp_flags);
337 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
338 				     const char *resource_name,
339 				     mhp_t mhp_flags);
340 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
341 				   unsigned long nr_pages,
342 				   struct vmem_altmap *altmap, int migratetype);
343 extern void remove_pfn_range_from_zone(struct zone *zone,
344 				       unsigned long start_pfn,
345 				       unsigned long nr_pages);
346 extern int sparse_add_section(int nid, unsigned long pfn,
347 		unsigned long nr_pages, struct vmem_altmap *altmap,
348 		struct dev_pagemap *pgmap);
349 extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
350 				  struct vmem_altmap *altmap);
351 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
352 					  unsigned long pnum);
353 extern struct zone *zone_for_pfn_range(int online_type, int nid,
354 		struct memory_group *group, unsigned long start_pfn,
355 		unsigned long nr_pages);
356 extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
357 				      struct mhp_params *params);
358 void arch_remove_linear_mapping(u64 start, u64 size);
359 #endif /* CONFIG_MEMORY_HOTPLUG */
360 
361 #endif /* __LINUX_MEMORY_HOTPLUG_H */
362