xref: /openbmc/linux/drivers/base/memory.c (revision b348b5fe)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory subsystem support
4  *
5  * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6  *            Dave Hansen <haveblue@us.ibm.com>
7  *
8  * This file provides the necessary infrastructure to represent
9  * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10  * All arch-independent code that assumes MEMORY_HOTPLUG requires
11  * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/topology.h>
17 #include <linux/capability.h>
18 #include <linux/device.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/mm.h>
22 #include <linux/stat.h>
23 #include <linux/slab.h>
24 #include <linux/xarray.h>
25 
26 #include <linux/atomic.h>
27 #include <linux/uaccess.h>
28 
29 #define MEMORY_CLASS_NAME	"memory"
30 
31 static const char *const online_type_to_str[] = {
32 	[MMOP_OFFLINE] = "offline",
33 	[MMOP_ONLINE] = "online",
34 	[MMOP_ONLINE_KERNEL] = "online_kernel",
35 	[MMOP_ONLINE_MOVABLE] = "online_movable",
36 };
37 
38 int mhp_online_type_from_str(const char *str)
39 {
40 	int i;
41 
42 	for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 		if (sysfs_streq(str, online_type_to_str[i]))
44 			return i;
45 	}
46 	return -EINVAL;
47 }
48 
49 #define to_memory_block(dev) container_of(dev, struct memory_block, dev)
50 
51 static int sections_per_block;
52 
53 static inline unsigned long memory_block_id(unsigned long section_nr)
54 {
55 	return section_nr / sections_per_block;
56 }
57 
58 static inline unsigned long pfn_to_block_id(unsigned long pfn)
59 {
60 	return memory_block_id(pfn_to_section_nr(pfn));
61 }
62 
63 static inline unsigned long phys_to_block_id(unsigned long phys)
64 {
65 	return pfn_to_block_id(PFN_DOWN(phys));
66 }
67 
68 static int memory_subsys_online(struct device *dev);
69 static int memory_subsys_offline(struct device *dev);
70 
71 static struct bus_type memory_subsys = {
72 	.name = MEMORY_CLASS_NAME,
73 	.dev_name = MEMORY_CLASS_NAME,
74 	.online = memory_subsys_online,
75 	.offline = memory_subsys_offline,
76 };
77 
78 /*
79  * Memory blocks are cached in a local radix tree to avoid
80  * a costly linear search for the corresponding device on
81  * the subsystem bus.
82  */
83 static DEFINE_XARRAY(memory_blocks);
84 
85 /*
86  * Memory groups, indexed by memory group id (mgid).
87  */
88 static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
89 #define MEMORY_GROUP_MARK_DYNAMIC	XA_MARK_1
90 
91 static BLOCKING_NOTIFIER_HEAD(memory_chain);
92 
93 int register_memory_notifier(struct notifier_block *nb)
94 {
95 	return blocking_notifier_chain_register(&memory_chain, nb);
96 }
97 EXPORT_SYMBOL(register_memory_notifier);
98 
99 void unregister_memory_notifier(struct notifier_block *nb)
100 {
101 	blocking_notifier_chain_unregister(&memory_chain, nb);
102 }
103 EXPORT_SYMBOL(unregister_memory_notifier);
104 
105 static void memory_block_release(struct device *dev)
106 {
107 	struct memory_block *mem = to_memory_block(dev);
108 	/* Verify that the altmap is freed */
109 	WARN_ON(mem->altmap);
110 	kfree(mem);
111 }
112 
113 unsigned long __weak memory_block_size_bytes(void)
114 {
115 	return MIN_MEMORY_BLOCK_SIZE;
116 }
117 EXPORT_SYMBOL_GPL(memory_block_size_bytes);
118 
119 /* Show the memory block ID, relative to the memory block size */
120 static ssize_t phys_index_show(struct device *dev,
121 			       struct device_attribute *attr, char *buf)
122 {
123 	struct memory_block *mem = to_memory_block(dev);
124 
125 	return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
126 }
127 
128 /*
129  * Legacy interface that we cannot remove. Always indicate "removable"
130  * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
131  */
132 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
133 			      char *buf)
134 {
135 	return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
136 }
137 
138 /*
139  * online, offline, going offline, etc.
140  */
141 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
142 			  char *buf)
143 {
144 	struct memory_block *mem = to_memory_block(dev);
145 	const char *output;
146 
147 	/*
148 	 * We can probably put these states in a nice little array
149 	 * so that they're not open-coded
150 	 */
151 	switch (mem->state) {
152 	case MEM_ONLINE:
153 		output = "online";
154 		break;
155 	case MEM_OFFLINE:
156 		output = "offline";
157 		break;
158 	case MEM_GOING_OFFLINE:
159 		output = "going-offline";
160 		break;
161 	default:
162 		WARN_ON(1);
163 		return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
164 	}
165 
166 	return sysfs_emit(buf, "%s\n", output);
167 }
168 
169 int memory_notify(unsigned long val, void *v)
170 {
171 	return blocking_notifier_call_chain(&memory_chain, val, v);
172 }
173 
174 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
175 static unsigned long memblk_nr_poison(struct memory_block *mem);
176 #else
177 static inline unsigned long memblk_nr_poison(struct memory_block *mem)
178 {
179 	return 0;
180 }
181 #endif
182 
183 static int memory_block_online(struct memory_block *mem)
184 {
185 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
186 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
187 	unsigned long nr_vmemmap_pages = 0;
188 	struct zone *zone;
189 	int ret;
190 
191 	if (memblk_nr_poison(mem))
192 		return -EHWPOISON;
193 
194 	zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
195 				  start_pfn, nr_pages);
196 
197 	/*
198 	 * Although vmemmap pages have a different lifecycle than the pages
199 	 * they describe (they remain until the memory is unplugged), doing
200 	 * their initialization and accounting at memory onlining/offlining
201 	 * stage helps to keep accounting easier to follow - e.g vmemmaps
202 	 * belong to the same zone as the memory they backed.
203 	 */
204 	if (mem->altmap)
205 		nr_vmemmap_pages = mem->altmap->free;
206 
207 	if (nr_vmemmap_pages) {
208 		ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
209 		if (ret)
210 			return ret;
211 	}
212 
213 	ret = online_pages(start_pfn + nr_vmemmap_pages,
214 			   nr_pages - nr_vmemmap_pages, zone, mem->group);
215 	if (ret) {
216 		if (nr_vmemmap_pages)
217 			mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
218 		return ret;
219 	}
220 
221 	/*
222 	 * Account once onlining succeeded. If the zone was unpopulated, it is
223 	 * now already properly populated.
224 	 */
225 	if (nr_vmemmap_pages)
226 		adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
227 					  nr_vmemmap_pages);
228 
229 	mem->zone = zone;
230 	return ret;
231 }
232 
233 static int memory_block_offline(struct memory_block *mem)
234 {
235 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
236 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
237 	unsigned long nr_vmemmap_pages = 0;
238 	int ret;
239 
240 	if (!mem->zone)
241 		return -EINVAL;
242 
243 	/*
244 	 * Unaccount before offlining, such that unpopulated zone and kthreads
245 	 * can properly be torn down in offline_pages().
246 	 */
247 	if (mem->altmap)
248 		nr_vmemmap_pages = mem->altmap->free;
249 
250 	if (nr_vmemmap_pages)
251 		adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
252 					  -nr_vmemmap_pages);
253 
254 	ret = offline_pages(start_pfn + nr_vmemmap_pages,
255 			    nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
256 	if (ret) {
257 		/* offline_pages() failed. Account back. */
258 		if (nr_vmemmap_pages)
259 			adjust_present_page_count(pfn_to_page(start_pfn),
260 						  mem->group, nr_vmemmap_pages);
261 		return ret;
262 	}
263 
264 	if (nr_vmemmap_pages)
265 		mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
266 
267 	mem->zone = NULL;
268 	return ret;
269 }
270 
271 /*
272  * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
273  * OK to have direct references to sparsemem variables in here.
274  */
275 static int
276 memory_block_action(struct memory_block *mem, unsigned long action)
277 {
278 	int ret;
279 
280 	switch (action) {
281 	case MEM_ONLINE:
282 		ret = memory_block_online(mem);
283 		break;
284 	case MEM_OFFLINE:
285 		ret = memory_block_offline(mem);
286 		break;
287 	default:
288 		WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
289 		     "%ld\n", __func__, mem->start_section_nr, action, action);
290 		ret = -EINVAL;
291 	}
292 
293 	return ret;
294 }
295 
296 static int memory_block_change_state(struct memory_block *mem,
297 		unsigned long to_state, unsigned long from_state_req)
298 {
299 	int ret = 0;
300 
301 	if (mem->state != from_state_req)
302 		return -EINVAL;
303 
304 	if (to_state == MEM_OFFLINE)
305 		mem->state = MEM_GOING_OFFLINE;
306 
307 	ret = memory_block_action(mem, to_state);
308 	mem->state = ret ? from_state_req : to_state;
309 
310 	return ret;
311 }
312 
313 /* The device lock serializes operations on memory_subsys_[online|offline] */
314 static int memory_subsys_online(struct device *dev)
315 {
316 	struct memory_block *mem = to_memory_block(dev);
317 	int ret;
318 
319 	if (mem->state == MEM_ONLINE)
320 		return 0;
321 
322 	/*
323 	 * When called via device_online() without configuring the online_type,
324 	 * we want to default to MMOP_ONLINE.
325 	 */
326 	if (mem->online_type == MMOP_OFFLINE)
327 		mem->online_type = MMOP_ONLINE;
328 
329 	ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
330 	mem->online_type = MMOP_OFFLINE;
331 
332 	return ret;
333 }
334 
335 static int memory_subsys_offline(struct device *dev)
336 {
337 	struct memory_block *mem = to_memory_block(dev);
338 
339 	if (mem->state == MEM_OFFLINE)
340 		return 0;
341 
342 	return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
343 }
344 
345 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
346 			   const char *buf, size_t count)
347 {
348 	const int online_type = mhp_online_type_from_str(buf);
349 	struct memory_block *mem = to_memory_block(dev);
350 	int ret;
351 
352 	if (online_type < 0)
353 		return -EINVAL;
354 
355 	ret = lock_device_hotplug_sysfs();
356 	if (ret)
357 		return ret;
358 
359 	switch (online_type) {
360 	case MMOP_ONLINE_KERNEL:
361 	case MMOP_ONLINE_MOVABLE:
362 	case MMOP_ONLINE:
363 		/* mem->online_type is protected by device_hotplug_lock */
364 		mem->online_type = online_type;
365 		ret = device_online(&mem->dev);
366 		break;
367 	case MMOP_OFFLINE:
368 		ret = device_offline(&mem->dev);
369 		break;
370 	default:
371 		ret = -EINVAL; /* should never happen */
372 	}
373 
374 	unlock_device_hotplug();
375 
376 	if (ret < 0)
377 		return ret;
378 	if (ret)
379 		return -EINVAL;
380 
381 	return count;
382 }
383 
384 /*
385  * Legacy interface that we cannot remove: s390x exposes the storage increment
386  * covered by a memory block, allowing for identifying which memory blocks
387  * comprise a storage increment. Since a memory block spans complete
388  * storage increments nowadays, this interface is basically unused. Other
389  * archs never exposed != 0.
390  */
391 static ssize_t phys_device_show(struct device *dev,
392 				struct device_attribute *attr, char *buf)
393 {
394 	struct memory_block *mem = to_memory_block(dev);
395 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
396 
397 	return sysfs_emit(buf, "%d\n",
398 			  arch_get_memory_phys_device(start_pfn));
399 }
400 
401 #ifdef CONFIG_MEMORY_HOTREMOVE
402 static int print_allowed_zone(char *buf, int len, int nid,
403 			      struct memory_group *group,
404 			      unsigned long start_pfn, unsigned long nr_pages,
405 			      int online_type, struct zone *default_zone)
406 {
407 	struct zone *zone;
408 
409 	zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
410 	if (zone == default_zone)
411 		return 0;
412 
413 	return sysfs_emit_at(buf, len, " %s", zone->name);
414 }
415 
416 static ssize_t valid_zones_show(struct device *dev,
417 				struct device_attribute *attr, char *buf)
418 {
419 	struct memory_block *mem = to_memory_block(dev);
420 	unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
421 	unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
422 	struct memory_group *group = mem->group;
423 	struct zone *default_zone;
424 	int nid = mem->nid;
425 	int len = 0;
426 
427 	/*
428 	 * Check the existing zone. Make sure that we do that only on the
429 	 * online nodes otherwise the page_zone is not reliable
430 	 */
431 	if (mem->state == MEM_ONLINE) {
432 		/*
433 		 * If !mem->zone, the memory block spans multiple zones and
434 		 * cannot get offlined.
435 		 */
436 		default_zone = mem->zone;
437 		if (!default_zone)
438 			return sysfs_emit(buf, "%s\n", "none");
439 		len += sysfs_emit_at(buf, len, "%s", default_zone->name);
440 		goto out;
441 	}
442 
443 	default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
444 					  start_pfn, nr_pages);
445 
446 	len += sysfs_emit_at(buf, len, "%s", default_zone->name);
447 	len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
448 				  MMOP_ONLINE_KERNEL, default_zone);
449 	len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
450 				  MMOP_ONLINE_MOVABLE, default_zone);
451 out:
452 	len += sysfs_emit_at(buf, len, "\n");
453 	return len;
454 }
455 static DEVICE_ATTR_RO(valid_zones);
456 #endif
457 
458 static DEVICE_ATTR_RO(phys_index);
459 static DEVICE_ATTR_RW(state);
460 static DEVICE_ATTR_RO(phys_device);
461 static DEVICE_ATTR_RO(removable);
462 
463 /*
464  * Show the memory block size (shared by all memory blocks).
465  */
466 static ssize_t block_size_bytes_show(struct device *dev,
467 				     struct device_attribute *attr, char *buf)
468 {
469 	return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
470 }
471 
472 static DEVICE_ATTR_RO(block_size_bytes);
473 
474 /*
475  * Memory auto online policy.
476  */
477 
478 static ssize_t auto_online_blocks_show(struct device *dev,
479 				       struct device_attribute *attr, char *buf)
480 {
481 	return sysfs_emit(buf, "%s\n",
482 			  online_type_to_str[mhp_default_online_type]);
483 }
484 
485 static ssize_t auto_online_blocks_store(struct device *dev,
486 					struct device_attribute *attr,
487 					const char *buf, size_t count)
488 {
489 	const int online_type = mhp_online_type_from_str(buf);
490 
491 	if (online_type < 0)
492 		return -EINVAL;
493 
494 	mhp_default_online_type = online_type;
495 	return count;
496 }
497 
498 static DEVICE_ATTR_RW(auto_online_blocks);
499 
500 /*
501  * Some architectures will have custom drivers to do this, and
502  * will not need to do it from userspace.  The fake hot-add code
503  * as well as ppc64 will do all of their discovery in userspace
504  * and will require this interface.
505  */
506 #ifdef CONFIG_ARCH_MEMORY_PROBE
507 static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
508 			   const char *buf, size_t count)
509 {
510 	u64 phys_addr;
511 	int nid, ret;
512 	unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
513 
514 	ret = kstrtoull(buf, 0, &phys_addr);
515 	if (ret)
516 		return ret;
517 
518 	if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
519 		return -EINVAL;
520 
521 	ret = lock_device_hotplug_sysfs();
522 	if (ret)
523 		return ret;
524 
525 	nid = memory_add_physaddr_to_nid(phys_addr);
526 	ret = __add_memory(nid, phys_addr,
527 			   MIN_MEMORY_BLOCK_SIZE * sections_per_block,
528 			   MHP_NONE);
529 
530 	if (ret)
531 		goto out;
532 
533 	ret = count;
534 out:
535 	unlock_device_hotplug();
536 	return ret;
537 }
538 
539 static DEVICE_ATTR_WO(probe);
540 #endif
541 
542 #ifdef CONFIG_MEMORY_FAILURE
543 /*
544  * Support for offlining pages of memory
545  */
546 
547 /* Soft offline a page */
548 static ssize_t soft_offline_page_store(struct device *dev,
549 				       struct device_attribute *attr,
550 				       const char *buf, size_t count)
551 {
552 	int ret;
553 	u64 pfn;
554 	if (!capable(CAP_SYS_ADMIN))
555 		return -EPERM;
556 	if (kstrtoull(buf, 0, &pfn) < 0)
557 		return -EINVAL;
558 	pfn >>= PAGE_SHIFT;
559 	ret = soft_offline_page(pfn, 0);
560 	return ret == 0 ? count : ret;
561 }
562 
563 /* Forcibly offline a page, including killing processes. */
564 static ssize_t hard_offline_page_store(struct device *dev,
565 				       struct device_attribute *attr,
566 				       const char *buf, size_t count)
567 {
568 	int ret;
569 	u64 pfn;
570 	if (!capable(CAP_SYS_ADMIN))
571 		return -EPERM;
572 	if (kstrtoull(buf, 0, &pfn) < 0)
573 		return -EINVAL;
574 	pfn >>= PAGE_SHIFT;
575 	ret = memory_failure(pfn, MF_SW_SIMULATED);
576 	if (ret == -EOPNOTSUPP)
577 		ret = 0;
578 	return ret ? ret : count;
579 }
580 
581 static DEVICE_ATTR_WO(soft_offline_page);
582 static DEVICE_ATTR_WO(hard_offline_page);
583 #endif
584 
585 /* See phys_device_show(). */
586 int __weak arch_get_memory_phys_device(unsigned long start_pfn)
587 {
588 	return 0;
589 }
590 
591 /*
592  * A reference for the returned memory block device is acquired.
593  *
594  * Called under device_hotplug_lock.
595  */
596 static struct memory_block *find_memory_block_by_id(unsigned long block_id)
597 {
598 	struct memory_block *mem;
599 
600 	mem = xa_load(&memory_blocks, block_id);
601 	if (mem)
602 		get_device(&mem->dev);
603 	return mem;
604 }
605 
606 /*
607  * Called under device_hotplug_lock.
608  */
609 struct memory_block *find_memory_block(unsigned long section_nr)
610 {
611 	unsigned long block_id = memory_block_id(section_nr);
612 
613 	return find_memory_block_by_id(block_id);
614 }
615 
616 static struct attribute *memory_memblk_attrs[] = {
617 	&dev_attr_phys_index.attr,
618 	&dev_attr_state.attr,
619 	&dev_attr_phys_device.attr,
620 	&dev_attr_removable.attr,
621 #ifdef CONFIG_MEMORY_HOTREMOVE
622 	&dev_attr_valid_zones.attr,
623 #endif
624 	NULL
625 };
626 
627 static const struct attribute_group memory_memblk_attr_group = {
628 	.attrs = memory_memblk_attrs,
629 };
630 
631 static const struct attribute_group *memory_memblk_attr_groups[] = {
632 	&memory_memblk_attr_group,
633 	NULL,
634 };
635 
636 static int __add_memory_block(struct memory_block *memory)
637 {
638 	int ret;
639 
640 	memory->dev.bus = &memory_subsys;
641 	memory->dev.id = memory->start_section_nr / sections_per_block;
642 	memory->dev.release = memory_block_release;
643 	memory->dev.groups = memory_memblk_attr_groups;
644 	memory->dev.offline = memory->state == MEM_OFFLINE;
645 
646 	ret = device_register(&memory->dev);
647 	if (ret) {
648 		put_device(&memory->dev);
649 		return ret;
650 	}
651 	ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
652 			      GFP_KERNEL));
653 	if (ret)
654 		device_unregister(&memory->dev);
655 
656 	return ret;
657 }
658 
659 static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
660 						     int nid)
661 {
662 	const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
663 	const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
664 	struct zone *zone, *matching_zone = NULL;
665 	pg_data_t *pgdat = NODE_DATA(nid);
666 	int i;
667 
668 	/*
669 	 * This logic only works for early memory, when the applicable zones
670 	 * already span the memory block. We don't expect overlapping zones on
671 	 * a single node for early memory. So if we're told that some PFNs
672 	 * of a node fall into this memory block, we can assume that all node
673 	 * zones that intersect with the memory block are actually applicable.
674 	 * No need to look at the memmap.
675 	 */
676 	for (i = 0; i < MAX_NR_ZONES; i++) {
677 		zone = pgdat->node_zones + i;
678 		if (!populated_zone(zone))
679 			continue;
680 		if (!zone_intersects(zone, start_pfn, nr_pages))
681 			continue;
682 		if (!matching_zone) {
683 			matching_zone = zone;
684 			continue;
685 		}
686 		/* Spans multiple zones ... */
687 		matching_zone = NULL;
688 		break;
689 	}
690 	return matching_zone;
691 }
692 
693 #ifdef CONFIG_NUMA
694 /**
695  * memory_block_add_nid() - Indicate that system RAM falling into this memory
696  *			    block device (partially) belongs to the given node.
697  * @mem: The memory block device.
698  * @nid: The node id.
699  * @context: The memory initialization context.
700  *
701  * Indicate that system RAM falling into this memory block (partially) belongs
702  * to the given node. If the context indicates ("early") that we are adding the
703  * node during node device subsystem initialization, this will also properly
704  * set/adjust mem->zone based on the zone ranges of the given node.
705  */
706 void memory_block_add_nid(struct memory_block *mem, int nid,
707 			  enum meminit_context context)
708 {
709 	if (context == MEMINIT_EARLY && mem->nid != nid) {
710 		/*
711 		 * For early memory we have to determine the zone when setting
712 		 * the node id and handle multiple nodes spanning a single
713 		 * memory block by indicate via zone == NULL that we're not
714 		 * dealing with a single zone. So if we're setting the node id
715 		 * the first time, determine if there is a single zone. If we're
716 		 * setting the node id a second time to a different node,
717 		 * invalidate the single detected zone.
718 		 */
719 		if (mem->nid == NUMA_NO_NODE)
720 			mem->zone = early_node_zone_for_memory_block(mem, nid);
721 		else
722 			mem->zone = NULL;
723 	}
724 
725 	/*
726 	 * If this memory block spans multiple nodes, we only indicate
727 	 * the last processed node. If we span multiple nodes (not applicable
728 	 * to hotplugged memory), zone == NULL will prohibit memory offlining
729 	 * and consequently unplug.
730 	 */
731 	mem->nid = nid;
732 }
733 #endif
734 
735 static int add_memory_block(unsigned long block_id, unsigned long state,
736 			    struct vmem_altmap *altmap,
737 			    struct memory_group *group)
738 {
739 	struct memory_block *mem;
740 	int ret = 0;
741 
742 	mem = find_memory_block_by_id(block_id);
743 	if (mem) {
744 		put_device(&mem->dev);
745 		return -EEXIST;
746 	}
747 	mem = kzalloc(sizeof(*mem), GFP_KERNEL);
748 	if (!mem)
749 		return -ENOMEM;
750 
751 	mem->start_section_nr = block_id * sections_per_block;
752 	mem->state = state;
753 	mem->nid = NUMA_NO_NODE;
754 	mem->altmap = altmap;
755 	INIT_LIST_HEAD(&mem->group_next);
756 
757 #ifndef CONFIG_NUMA
758 	if (state == MEM_ONLINE)
759 		/*
760 		 * MEM_ONLINE at this point implies early memory. With NUMA,
761 		 * we'll determine the zone when setting the node id via
762 		 * memory_block_add_nid(). Memory hotplug updated the zone
763 		 * manually when memory onlining/offlining succeeds.
764 		 */
765 		mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
766 #endif /* CONFIG_NUMA */
767 
768 	ret = __add_memory_block(mem);
769 	if (ret)
770 		return ret;
771 
772 	if (group) {
773 		mem->group = group;
774 		list_add(&mem->group_next, &group->memory_blocks);
775 	}
776 
777 	return 0;
778 }
779 
780 static int __init add_boot_memory_block(unsigned long base_section_nr)
781 {
782 	int section_count = 0;
783 	unsigned long nr;
784 
785 	for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
786 	     nr++)
787 		if (present_section_nr(nr))
788 			section_count++;
789 
790 	if (section_count == 0)
791 		return 0;
792 	return add_memory_block(memory_block_id(base_section_nr),
793 				MEM_ONLINE, NULL,  NULL);
794 }
795 
796 static int add_hotplug_memory_block(unsigned long block_id,
797 				    struct vmem_altmap *altmap,
798 				    struct memory_group *group)
799 {
800 	return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
801 }
802 
803 static void remove_memory_block(struct memory_block *memory)
804 {
805 	if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
806 		return;
807 
808 	WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
809 
810 	if (memory->group) {
811 		list_del(&memory->group_next);
812 		memory->group = NULL;
813 	}
814 
815 	/* drop the ref. we got via find_memory_block() */
816 	put_device(&memory->dev);
817 	device_unregister(&memory->dev);
818 }
819 
820 /*
821  * Create memory block devices for the given memory area. Start and size
822  * have to be aligned to memory block granularity. Memory block devices
823  * will be initialized as offline.
824  *
825  * Called under device_hotplug_lock.
826  */
827 int create_memory_block_devices(unsigned long start, unsigned long size,
828 				struct vmem_altmap *altmap,
829 				struct memory_group *group)
830 {
831 	const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
832 	unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
833 	struct memory_block *mem;
834 	unsigned long block_id;
835 	int ret = 0;
836 
837 	if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
838 			 !IS_ALIGNED(size, memory_block_size_bytes())))
839 		return -EINVAL;
840 
841 	for (block_id = start_block_id; block_id != end_block_id; block_id++) {
842 		ret = add_hotplug_memory_block(block_id, altmap, group);
843 		if (ret)
844 			break;
845 	}
846 	if (ret) {
847 		end_block_id = block_id;
848 		for (block_id = start_block_id; block_id != end_block_id;
849 		     block_id++) {
850 			mem = find_memory_block_by_id(block_id);
851 			if (WARN_ON_ONCE(!mem))
852 				continue;
853 			remove_memory_block(mem);
854 		}
855 	}
856 	return ret;
857 }
858 
859 /*
860  * Remove memory block devices for the given memory area. Start and size
861  * have to be aligned to memory block granularity. Memory block devices
862  * have to be offline.
863  *
864  * Called under device_hotplug_lock.
865  */
866 void remove_memory_block_devices(unsigned long start, unsigned long size)
867 {
868 	const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
869 	const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
870 	struct memory_block *mem;
871 	unsigned long block_id;
872 
873 	if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
874 			 !IS_ALIGNED(size, memory_block_size_bytes())))
875 		return;
876 
877 	for (block_id = start_block_id; block_id != end_block_id; block_id++) {
878 		mem = find_memory_block_by_id(block_id);
879 		if (WARN_ON_ONCE(!mem))
880 			continue;
881 		num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
882 		unregister_memory_block_under_nodes(mem);
883 		remove_memory_block(mem);
884 	}
885 }
886 
887 static struct attribute *memory_root_attrs[] = {
888 #ifdef CONFIG_ARCH_MEMORY_PROBE
889 	&dev_attr_probe.attr,
890 #endif
891 
892 #ifdef CONFIG_MEMORY_FAILURE
893 	&dev_attr_soft_offline_page.attr,
894 	&dev_attr_hard_offline_page.attr,
895 #endif
896 
897 	&dev_attr_block_size_bytes.attr,
898 	&dev_attr_auto_online_blocks.attr,
899 	NULL
900 };
901 
902 static const struct attribute_group memory_root_attr_group = {
903 	.attrs = memory_root_attrs,
904 };
905 
906 static const struct attribute_group *memory_root_attr_groups[] = {
907 	&memory_root_attr_group,
908 	NULL,
909 };
910 
911 /*
912  * Initialize the sysfs support for memory devices. At the time this function
913  * is called, we cannot have concurrent creation/deletion of memory block
914  * devices, the device_hotplug_lock is not needed.
915  */
916 void __init memory_dev_init(void)
917 {
918 	int ret;
919 	unsigned long block_sz, nr;
920 
921 	/* Validate the configured memory block size */
922 	block_sz = memory_block_size_bytes();
923 	if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
924 		panic("Memory block size not suitable: 0x%lx\n", block_sz);
925 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
926 
927 	ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
928 	if (ret)
929 		panic("%s() failed to register subsystem: %d\n", __func__, ret);
930 
931 	/*
932 	 * Create entries for memory sections that were found
933 	 * during boot and have been initialized
934 	 */
935 	for (nr = 0; nr <= __highest_present_section_nr;
936 	     nr += sections_per_block) {
937 		ret = add_boot_memory_block(nr);
938 		if (ret)
939 			panic("%s() failed to add memory block: %d\n", __func__,
940 			      ret);
941 	}
942 }
943 
944 /**
945  * walk_memory_blocks - walk through all present memory blocks overlapped
946  *			by the range [start, start + size)
947  *
948  * @start: start address of the memory range
949  * @size: size of the memory range
950  * @arg: argument passed to func
951  * @func: callback for each memory section walked
952  *
953  * This function walks through all present memory blocks overlapped by the
954  * range [start, start + size), calling func on each memory block.
955  *
956  * In case func() returns an error, walking is aborted and the error is
957  * returned.
958  *
959  * Called under device_hotplug_lock.
960  */
961 int walk_memory_blocks(unsigned long start, unsigned long size,
962 		       void *arg, walk_memory_blocks_func_t func)
963 {
964 	const unsigned long start_block_id = phys_to_block_id(start);
965 	const unsigned long end_block_id = phys_to_block_id(start + size - 1);
966 	struct memory_block *mem;
967 	unsigned long block_id;
968 	int ret = 0;
969 
970 	if (!size)
971 		return 0;
972 
973 	for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
974 		mem = find_memory_block_by_id(block_id);
975 		if (!mem)
976 			continue;
977 
978 		ret = func(mem, arg);
979 		put_device(&mem->dev);
980 		if (ret)
981 			break;
982 	}
983 	return ret;
984 }
985 
986 struct for_each_memory_block_cb_data {
987 	walk_memory_blocks_func_t func;
988 	void *arg;
989 };
990 
991 static int for_each_memory_block_cb(struct device *dev, void *data)
992 {
993 	struct memory_block *mem = to_memory_block(dev);
994 	struct for_each_memory_block_cb_data *cb_data = data;
995 
996 	return cb_data->func(mem, cb_data->arg);
997 }
998 
999 /**
1000  * for_each_memory_block - walk through all present memory blocks
1001  *
1002  * @arg: argument passed to func
1003  * @func: callback for each memory block walked
1004  *
1005  * This function walks through all present memory blocks, calling func on
1006  * each memory block.
1007  *
1008  * In case func() returns an error, walking is aborted and the error is
1009  * returned.
1010  */
1011 int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
1012 {
1013 	struct for_each_memory_block_cb_data cb_data = {
1014 		.func = func,
1015 		.arg = arg,
1016 	};
1017 
1018 	return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
1019 				for_each_memory_block_cb);
1020 }
1021 
1022 /*
1023  * This is an internal helper to unify allocation and initialization of
1024  * memory groups. Note that the passed memory group will be copied to a
1025  * dynamically allocated memory group. After this call, the passed
1026  * memory group should no longer be used.
1027  */
1028 static int memory_group_register(struct memory_group group)
1029 {
1030 	struct memory_group *new_group;
1031 	uint32_t mgid;
1032 	int ret;
1033 
1034 	if (!node_possible(group.nid))
1035 		return -EINVAL;
1036 
1037 	new_group = kzalloc(sizeof(group), GFP_KERNEL);
1038 	if (!new_group)
1039 		return -ENOMEM;
1040 	*new_group = group;
1041 	INIT_LIST_HEAD(&new_group->memory_blocks);
1042 
1043 	ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
1044 		       GFP_KERNEL);
1045 	if (ret) {
1046 		kfree(new_group);
1047 		return ret;
1048 	} else if (group.is_dynamic) {
1049 		xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
1050 	}
1051 	return mgid;
1052 }
1053 
1054 /**
1055  * memory_group_register_static() - Register a static memory group.
1056  * @nid: The node id.
1057  * @max_pages: The maximum number of pages we'll have in this static memory
1058  *	       group.
1059  *
1060  * Register a new static memory group and return the memory group id.
1061  * All memory in the group belongs to a single unit, such as a DIMM. All
1062  * memory belonging to a static memory group is added in one go to be removed
1063  * in one go -- it's static.
1064  *
1065  * Returns an error if out of memory, if the node id is invalid, if no new
1066  * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
1067  * returns the new memory group id.
1068  */
1069 int memory_group_register_static(int nid, unsigned long max_pages)
1070 {
1071 	struct memory_group group = {
1072 		.nid = nid,
1073 		.s = {
1074 			.max_pages = max_pages,
1075 		},
1076 	};
1077 
1078 	if (!max_pages)
1079 		return -EINVAL;
1080 	return memory_group_register(group);
1081 }
1082 EXPORT_SYMBOL_GPL(memory_group_register_static);
1083 
1084 /**
1085  * memory_group_register_dynamic() - Register a dynamic memory group.
1086  * @nid: The node id.
1087  * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
1088  *		memory group.
1089  *
1090  * Register a new dynamic memory group and return the memory group id.
1091  * Memory within a dynamic memory group is added/removed dynamically
1092  * in unit_pages.
1093  *
1094  * Returns an error if out of memory, if the node id is invalid, if no new
1095  * memory groups can be registered, or if unit_pages is invalid (0, not a
1096  * power of two, smaller than a single memory block). Otherwise, returns the
1097  * new memory group id.
1098  */
1099 int memory_group_register_dynamic(int nid, unsigned long unit_pages)
1100 {
1101 	struct memory_group group = {
1102 		.nid = nid,
1103 		.is_dynamic = true,
1104 		.d = {
1105 			.unit_pages = unit_pages,
1106 		},
1107 	};
1108 
1109 	if (!unit_pages || !is_power_of_2(unit_pages) ||
1110 	    unit_pages < PHYS_PFN(memory_block_size_bytes()))
1111 		return -EINVAL;
1112 	return memory_group_register(group);
1113 }
1114 EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
1115 
1116 /**
1117  * memory_group_unregister() - Unregister a memory group.
1118  * @mgid: the memory group id
1119  *
1120  * Unregister a memory group. If any memory block still belongs to this
1121  * memory group, unregistering will fail.
1122  *
1123  * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
1124  * memory blocks still belong to this memory group and returns 0 if
1125  * unregistering succeeded.
1126  */
1127 int memory_group_unregister(int mgid)
1128 {
1129 	struct memory_group *group;
1130 
1131 	if (mgid < 0)
1132 		return -EINVAL;
1133 
1134 	group = xa_load(&memory_groups, mgid);
1135 	if (!group)
1136 		return -EINVAL;
1137 	if (!list_empty(&group->memory_blocks))
1138 		return -EBUSY;
1139 	xa_erase(&memory_groups, mgid);
1140 	kfree(group);
1141 	return 0;
1142 }
1143 EXPORT_SYMBOL_GPL(memory_group_unregister);
1144 
1145 /*
1146  * This is an internal helper only to be used in core memory hotplug code to
1147  * lookup a memory group. We don't care about locking, as we don't expect a
1148  * memory group to get unregistered while adding memory to it -- because
1149  * the group and the memory is managed by the same driver.
1150  */
1151 struct memory_group *memory_group_find_by_id(int mgid)
1152 {
1153 	return xa_load(&memory_groups, mgid);
1154 }
1155 
1156 /*
1157  * This is an internal helper only to be used in core memory hotplug code to
1158  * walk all dynamic memory groups excluding a given memory group, either
1159  * belonging to a specific node, or belonging to any node.
1160  */
1161 int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
1162 			       struct memory_group *excluded, void *arg)
1163 {
1164 	struct memory_group *group;
1165 	unsigned long index;
1166 	int ret = 0;
1167 
1168 	xa_for_each_marked(&memory_groups, index, group,
1169 			   MEMORY_GROUP_MARK_DYNAMIC) {
1170 		if (group == excluded)
1171 			continue;
1172 #ifdef CONFIG_NUMA
1173 		if (nid != NUMA_NO_NODE && group->nid != nid)
1174 			continue;
1175 #endif /* CONFIG_NUMA */
1176 		ret = func(group, arg);
1177 		if (ret)
1178 			break;
1179 	}
1180 	return ret;
1181 }
1182 
1183 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
1184 void memblk_nr_poison_inc(unsigned long pfn)
1185 {
1186 	const unsigned long block_id = pfn_to_block_id(pfn);
1187 	struct memory_block *mem = find_memory_block_by_id(block_id);
1188 
1189 	if (mem)
1190 		atomic_long_inc(&mem->nr_hwpoison);
1191 }
1192 
1193 void memblk_nr_poison_sub(unsigned long pfn, long i)
1194 {
1195 	const unsigned long block_id = pfn_to_block_id(pfn);
1196 	struct memory_block *mem = find_memory_block_by_id(block_id);
1197 
1198 	if (mem)
1199 		atomic_long_sub(i, &mem->nr_hwpoison);
1200 }
1201 
1202 static unsigned long memblk_nr_poison(struct memory_block *mem)
1203 {
1204 	return atomic_long_read(&mem->nr_hwpoison);
1205 }
1206 #endif
1207