1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries Memory Hotplug infrastructure.
4  *
5  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6  */
7 
8 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
16 
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/prom.h>
20 #include <asm/sparsemem.h>
21 #include <asm/fadump.h>
22 #include <asm/drmem.h>
23 #include "pseries.h"
24 
25 static bool rtas_hp_event;
26 
27 unsigned long pseries_memory_block_size(void)
28 {
29 	struct device_node *np;
30 	unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
31 	struct resource r;
32 
33 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
34 	if (np) {
35 		const __be64 *size;
36 
37 		size = of_get_property(np, "ibm,lmb-size", NULL);
38 		if (size)
39 			memblock_size = be64_to_cpup(size);
40 		of_node_put(np);
41 	} else  if (machine_is(pseries)) {
42 		/* This fallback really only applies to pseries */
43 		unsigned int memzero_size = 0;
44 
45 		np = of_find_node_by_path("/memory@0");
46 		if (np) {
47 			if (!of_address_to_resource(np, 0, &r))
48 				memzero_size = resource_size(&r);
49 			of_node_put(np);
50 		}
51 
52 		if (memzero_size) {
53 			/* We now know the size of memory@0, use this to find
54 			 * the first memoryblock and get its size.
55 			 */
56 			char buf[64];
57 
58 			sprintf(buf, "/memory@%x", memzero_size);
59 			np = of_find_node_by_path(buf);
60 			if (np) {
61 				if (!of_address_to_resource(np, 0, &r))
62 					memblock_size = resource_size(&r);
63 				of_node_put(np);
64 			}
65 		}
66 	}
67 	return memblock_size;
68 }
69 
70 static void dlpar_free_property(struct property *prop)
71 {
72 	kfree(prop->name);
73 	kfree(prop->value);
74 	kfree(prop);
75 }
76 
77 static struct property *dlpar_clone_property(struct property *prop,
78 					     u32 prop_size)
79 {
80 	struct property *new_prop;
81 
82 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
83 	if (!new_prop)
84 		return NULL;
85 
86 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
87 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
88 	if (!new_prop->name || !new_prop->value) {
89 		dlpar_free_property(new_prop);
90 		return NULL;
91 	}
92 
93 	memcpy(new_prop->value, prop->value, prop->length);
94 	new_prop->length = prop_size;
95 
96 	of_property_set_flag(new_prop, OF_DYNAMIC);
97 	return new_prop;
98 }
99 
100 static bool find_aa_index(struct device_node *dr_node,
101 			 struct property *ala_prop,
102 			 const u32 *lmb_assoc, u32 *aa_index)
103 {
104 	u32 *assoc_arrays, new_prop_size;
105 	struct property *new_prop;
106 	int aa_arrays, aa_array_entries, aa_array_sz;
107 	int i, index;
108 
109 	/*
110 	 * The ibm,associativity-lookup-arrays property is defined to be
111 	 * a 32-bit value specifying the number of associativity arrays
112 	 * followed by a 32-bitvalue specifying the number of entries per
113 	 * array, followed by the associativity arrays.
114 	 */
115 	assoc_arrays = ala_prop->value;
116 
117 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
118 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
119 	aa_array_sz = aa_array_entries * sizeof(u32);
120 
121 	for (i = 0; i < aa_arrays; i++) {
122 		index = (i * aa_array_entries) + 2;
123 
124 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
125 			continue;
126 
127 		*aa_index = i;
128 		return true;
129 	}
130 
131 	new_prop_size = ala_prop->length + aa_array_sz;
132 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
133 	if (!new_prop)
134 		return false;
135 
136 	assoc_arrays = new_prop->value;
137 
138 	/* increment the number of entries in the lookup array */
139 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
140 
141 	/* copy the new associativity into the lookup array */
142 	index = aa_arrays * aa_array_entries + 2;
143 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
144 
145 	of_update_property(dr_node, new_prop);
146 
147 	/*
148 	 * The associativity lookup array index for this lmb is
149 	 * number of entries - 1 since we added its associativity
150 	 * to the end of the lookup array.
151 	 */
152 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
153 	return true;
154 }
155 
156 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
157 {
158 	struct device_node *parent, *lmb_node, *dr_node;
159 	struct property *ala_prop;
160 	const u32 *lmb_assoc;
161 	u32 aa_index;
162 	bool found;
163 
164 	parent = of_find_node_by_path("/");
165 	if (!parent)
166 		return -ENODEV;
167 
168 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
169 					     parent);
170 	of_node_put(parent);
171 	if (!lmb_node)
172 		return -EINVAL;
173 
174 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
175 	if (!lmb_assoc) {
176 		dlpar_free_cc_nodes(lmb_node);
177 		return -ENODEV;
178 	}
179 
180 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
181 	if (!dr_node) {
182 		dlpar_free_cc_nodes(lmb_node);
183 		return -ENODEV;
184 	}
185 
186 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
187 				    NULL);
188 	if (!ala_prop) {
189 		of_node_put(dr_node);
190 		dlpar_free_cc_nodes(lmb_node);
191 		return -ENODEV;
192 	}
193 
194 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
195 
196 	of_node_put(dr_node);
197 	dlpar_free_cc_nodes(lmb_node);
198 
199 	if (!found) {
200 		pr_err("Could not find LMB associativity\n");
201 		return -1;
202 	}
203 
204 	lmb->aa_index = aa_index;
205 	return 0;
206 }
207 
208 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
209 {
210 	unsigned long section_nr;
211 	struct mem_section *mem_sect;
212 	struct memory_block *mem_block;
213 
214 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
215 	mem_sect = __nr_to_section(section_nr);
216 
217 	mem_block = find_memory_block(mem_sect);
218 	return mem_block;
219 }
220 
221 static int get_lmb_range(u32 drc_index, int n_lmbs,
222 			 struct drmem_lmb **start_lmb,
223 			 struct drmem_lmb **end_lmb)
224 {
225 	struct drmem_lmb *lmb, *start, *end;
226 	struct drmem_lmb *limit;
227 
228 	start = NULL;
229 	for_each_drmem_lmb(lmb) {
230 		if (lmb->drc_index == drc_index) {
231 			start = lmb;
232 			break;
233 		}
234 	}
235 
236 	if (!start)
237 		return -EINVAL;
238 
239 	end = &start[n_lmbs];
240 
241 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
242 	if (end > limit)
243 		return -EINVAL;
244 
245 	*start_lmb = start;
246 	*end_lmb = end;
247 	return 0;
248 }
249 
250 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
251 {
252 	struct memory_block *mem_block;
253 	int rc;
254 
255 	mem_block = lmb_to_memblock(lmb);
256 	if (!mem_block)
257 		return -EINVAL;
258 
259 	if (online && mem_block->dev.offline)
260 		rc = device_online(&mem_block->dev);
261 	else if (!online && !mem_block->dev.offline)
262 		rc = device_offline(&mem_block->dev);
263 	else
264 		rc = 0;
265 
266 	put_device(&mem_block->dev);
267 
268 	return rc;
269 }
270 
271 static int dlpar_online_lmb(struct drmem_lmb *lmb)
272 {
273 	return dlpar_change_lmb_state(lmb, true);
274 }
275 
276 #ifdef CONFIG_MEMORY_HOTREMOVE
277 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
278 {
279 	return dlpar_change_lmb_state(lmb, false);
280 }
281 
282 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
283 {
284 	unsigned long block_sz, start_pfn;
285 	int sections_per_block;
286 	int i, nid;
287 
288 	start_pfn = base >> PAGE_SHIFT;
289 
290 	lock_device_hotplug();
291 
292 	if (!pfn_valid(start_pfn))
293 		goto out;
294 
295 	block_sz = pseries_memory_block_size();
296 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
297 	nid = memory_add_physaddr_to_nid(base);
298 
299 	for (i = 0; i < sections_per_block; i++) {
300 		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
301 		base += MIN_MEMORY_BLOCK_SIZE;
302 	}
303 
304 out:
305 	/* Update memory regions for memory remove */
306 	memblock_remove(base, memblock_size);
307 	unlock_device_hotplug();
308 	return 0;
309 }
310 
311 static int pseries_remove_mem_node(struct device_node *np)
312 {
313 	const __be32 *regs;
314 	unsigned long base;
315 	unsigned int lmb_size;
316 	int ret = -EINVAL;
317 
318 	/*
319 	 * Check to see if we are actually removing memory
320 	 */
321 	if (!of_node_is_type(np, "memory"))
322 		return 0;
323 
324 	/*
325 	 * Find the base address and size of the memblock
326 	 */
327 	regs = of_get_property(np, "reg", NULL);
328 	if (!regs)
329 		return ret;
330 
331 	base = be64_to_cpu(*(unsigned long *)regs);
332 	lmb_size = be32_to_cpu(regs[3]);
333 
334 	pseries_remove_memblock(base, lmb_size);
335 	return 0;
336 }
337 
338 static bool lmb_is_removable(struct drmem_lmb *lmb)
339 {
340 	int i, scns_per_block;
341 	bool rc = true;
342 	unsigned long pfn, block_sz;
343 	u64 phys_addr;
344 
345 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
346 		return false;
347 
348 	block_sz = memory_block_size_bytes();
349 	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
350 	phys_addr = lmb->base_addr;
351 
352 #ifdef CONFIG_FA_DUMP
353 	/*
354 	 * Don't hot-remove memory that falls in fadump boot memory area
355 	 * and memory that is reserved for capturing old kernel memory.
356 	 */
357 	if (is_fadump_memory_area(phys_addr, block_sz))
358 		return false;
359 #endif
360 
361 	for (i = 0; i < scns_per_block; i++) {
362 		pfn = PFN_DOWN(phys_addr);
363 		if (!pfn_in_present_section(pfn)) {
364 			phys_addr += MIN_MEMORY_BLOCK_SIZE;
365 			continue;
366 		}
367 
368 		rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
369 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
370 	}
371 
372 	return rc;
373 }
374 
375 static int dlpar_add_lmb(struct drmem_lmb *);
376 
377 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
378 {
379 	unsigned long block_sz;
380 	int rc;
381 
382 	if (!lmb_is_removable(lmb))
383 		return -EINVAL;
384 
385 	rc = dlpar_offline_lmb(lmb);
386 	if (rc)
387 		return rc;
388 
389 	block_sz = pseries_memory_block_size();
390 
391 	__remove_memory(lmb->nid, lmb->base_addr, block_sz);
392 
393 	/* Update memory regions for memory remove */
394 	memblock_remove(lmb->base_addr, block_sz);
395 
396 	invalidate_lmb_associativity_index(lmb);
397 	lmb_clear_nid(lmb);
398 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
399 
400 	return 0;
401 }
402 
403 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
404 {
405 	struct drmem_lmb *lmb;
406 	int lmbs_removed = 0;
407 	int lmbs_available = 0;
408 	int rc;
409 
410 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
411 
412 	if (lmbs_to_remove == 0)
413 		return -EINVAL;
414 
415 	/* Validate that there are enough LMBs to satisfy the request */
416 	for_each_drmem_lmb(lmb) {
417 		if (lmb_is_removable(lmb))
418 			lmbs_available++;
419 
420 		if (lmbs_available == lmbs_to_remove)
421 			break;
422 	}
423 
424 	if (lmbs_available < lmbs_to_remove) {
425 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
426 			lmbs_available, lmbs_to_remove);
427 		return -EINVAL;
428 	}
429 
430 	for_each_drmem_lmb(lmb) {
431 		rc = dlpar_remove_lmb(lmb);
432 		if (rc)
433 			continue;
434 
435 		/* Mark this lmb so we can add it later if all of the
436 		 * requested LMBs cannot be removed.
437 		 */
438 		drmem_mark_lmb_reserved(lmb);
439 
440 		lmbs_removed++;
441 		if (lmbs_removed == lmbs_to_remove)
442 			break;
443 	}
444 
445 	if (lmbs_removed != lmbs_to_remove) {
446 		pr_err("Memory hot-remove failed, adding LMB's back\n");
447 
448 		for_each_drmem_lmb(lmb) {
449 			if (!drmem_lmb_reserved(lmb))
450 				continue;
451 
452 			rc = dlpar_add_lmb(lmb);
453 			if (rc)
454 				pr_err("Failed to add LMB back, drc index %x\n",
455 				       lmb->drc_index);
456 
457 			drmem_remove_lmb_reservation(lmb);
458 		}
459 
460 		rc = -EINVAL;
461 	} else {
462 		for_each_drmem_lmb(lmb) {
463 			if (!drmem_lmb_reserved(lmb))
464 				continue;
465 
466 			dlpar_release_drc(lmb->drc_index);
467 			pr_info("Memory at %llx was hot-removed\n",
468 				lmb->base_addr);
469 
470 			drmem_remove_lmb_reservation(lmb);
471 		}
472 		rc = 0;
473 	}
474 
475 	return rc;
476 }
477 
478 static int dlpar_memory_remove_by_index(u32 drc_index)
479 {
480 	struct drmem_lmb *lmb;
481 	int lmb_found;
482 	int rc;
483 
484 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
485 
486 	lmb_found = 0;
487 	for_each_drmem_lmb(lmb) {
488 		if (lmb->drc_index == drc_index) {
489 			lmb_found = 1;
490 			rc = dlpar_remove_lmb(lmb);
491 			if (!rc)
492 				dlpar_release_drc(lmb->drc_index);
493 
494 			break;
495 		}
496 	}
497 
498 	if (!lmb_found)
499 		rc = -EINVAL;
500 
501 	if (rc)
502 		pr_info("Failed to hot-remove memory at %llx\n",
503 			lmb->base_addr);
504 	else
505 		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
506 
507 	return rc;
508 }
509 
510 static int dlpar_memory_readd_by_index(u32 drc_index)
511 {
512 	struct drmem_lmb *lmb;
513 	int lmb_found;
514 	int rc;
515 
516 	pr_info("Attempting to update LMB, drc index %x\n", drc_index);
517 
518 	lmb_found = 0;
519 	for_each_drmem_lmb(lmb) {
520 		if (lmb->drc_index == drc_index) {
521 			lmb_found = 1;
522 			rc = dlpar_remove_lmb(lmb);
523 			if (!rc) {
524 				rc = dlpar_add_lmb(lmb);
525 				if (rc)
526 					dlpar_release_drc(lmb->drc_index);
527 			}
528 			break;
529 		}
530 	}
531 
532 	if (!lmb_found)
533 		rc = -EINVAL;
534 
535 	if (rc)
536 		pr_info("Failed to update memory at %llx\n",
537 			lmb->base_addr);
538 	else
539 		pr_info("Memory at %llx was updated\n", lmb->base_addr);
540 
541 	return rc;
542 }
543 
544 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
545 {
546 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
547 	int lmbs_available = 0;
548 	int rc;
549 
550 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
551 		lmbs_to_remove, drc_index);
552 
553 	if (lmbs_to_remove == 0)
554 		return -EINVAL;
555 
556 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
557 	if (rc)
558 		return -EINVAL;
559 
560 	/* Validate that there are enough LMBs to satisfy the request */
561 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
562 		if (lmb->flags & DRCONF_MEM_RESERVED)
563 			break;
564 
565 		lmbs_available++;
566 	}
567 
568 	if (lmbs_available < lmbs_to_remove)
569 		return -EINVAL;
570 
571 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
572 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
573 			continue;
574 
575 		rc = dlpar_remove_lmb(lmb);
576 		if (rc)
577 			break;
578 
579 		drmem_mark_lmb_reserved(lmb);
580 	}
581 
582 	if (rc) {
583 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
584 
585 
586 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
587 			if (!drmem_lmb_reserved(lmb))
588 				continue;
589 
590 			rc = dlpar_add_lmb(lmb);
591 			if (rc)
592 				pr_err("Failed to add LMB, drc index %x\n",
593 				       lmb->drc_index);
594 
595 			drmem_remove_lmb_reservation(lmb);
596 		}
597 		rc = -EINVAL;
598 	} else {
599 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
600 			if (!drmem_lmb_reserved(lmb))
601 				continue;
602 
603 			dlpar_release_drc(lmb->drc_index);
604 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
605 				lmb->base_addr, lmb->drc_index);
606 
607 			drmem_remove_lmb_reservation(lmb);
608 		}
609 	}
610 
611 	return rc;
612 }
613 
614 #else
615 static inline int pseries_remove_memblock(unsigned long base,
616 					  unsigned int memblock_size)
617 {
618 	return -EOPNOTSUPP;
619 }
620 static inline int pseries_remove_mem_node(struct device_node *np)
621 {
622 	return 0;
623 }
624 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
625 {
626 	return -EOPNOTSUPP;
627 }
628 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
629 {
630 	return -EOPNOTSUPP;
631 }
632 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
633 {
634 	return -EOPNOTSUPP;
635 }
636 static int dlpar_memory_remove_by_index(u32 drc_index)
637 {
638 	return -EOPNOTSUPP;
639 }
640 static int dlpar_memory_readd_by_index(u32 drc_index)
641 {
642 	return -EOPNOTSUPP;
643 }
644 
645 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
646 {
647 	return -EOPNOTSUPP;
648 }
649 #endif /* CONFIG_MEMORY_HOTREMOVE */
650 
651 static int dlpar_add_lmb(struct drmem_lmb *lmb)
652 {
653 	unsigned long block_sz;
654 	int rc;
655 
656 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
657 		return -EINVAL;
658 
659 	rc = update_lmb_associativity_index(lmb);
660 	if (rc) {
661 		dlpar_release_drc(lmb->drc_index);
662 		return rc;
663 	}
664 
665 	lmb_set_nid(lmb);
666 	block_sz = memory_block_size_bytes();
667 
668 	/* Add the memory */
669 	rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
670 	if (rc) {
671 		invalidate_lmb_associativity_index(lmb);
672 		return rc;
673 	}
674 
675 	rc = dlpar_online_lmb(lmb);
676 	if (rc) {
677 		__remove_memory(lmb->nid, lmb->base_addr, block_sz);
678 		invalidate_lmb_associativity_index(lmb);
679 		lmb_clear_nid(lmb);
680 	} else {
681 		lmb->flags |= DRCONF_MEM_ASSIGNED;
682 	}
683 
684 	return rc;
685 }
686 
687 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
688 {
689 	struct drmem_lmb *lmb;
690 	int lmbs_available = 0;
691 	int lmbs_added = 0;
692 	int rc;
693 
694 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
695 
696 	if (lmbs_to_add == 0)
697 		return -EINVAL;
698 
699 	/* Validate that there are enough LMBs to satisfy the request */
700 	for_each_drmem_lmb(lmb) {
701 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
702 			lmbs_available++;
703 
704 		if (lmbs_available == lmbs_to_add)
705 			break;
706 	}
707 
708 	if (lmbs_available < lmbs_to_add)
709 		return -EINVAL;
710 
711 	for_each_drmem_lmb(lmb) {
712 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
713 			continue;
714 
715 		rc = dlpar_acquire_drc(lmb->drc_index);
716 		if (rc)
717 			continue;
718 
719 		rc = dlpar_add_lmb(lmb);
720 		if (rc) {
721 			dlpar_release_drc(lmb->drc_index);
722 			continue;
723 		}
724 
725 		/* Mark this lmb so we can remove it later if all of the
726 		 * requested LMBs cannot be added.
727 		 */
728 		drmem_mark_lmb_reserved(lmb);
729 
730 		lmbs_added++;
731 		if (lmbs_added == lmbs_to_add)
732 			break;
733 	}
734 
735 	if (lmbs_added != lmbs_to_add) {
736 		pr_err("Memory hot-add failed, removing any added LMBs\n");
737 
738 		for_each_drmem_lmb(lmb) {
739 			if (!drmem_lmb_reserved(lmb))
740 				continue;
741 
742 			rc = dlpar_remove_lmb(lmb);
743 			if (rc)
744 				pr_err("Failed to remove LMB, drc index %x\n",
745 				       lmb->drc_index);
746 			else
747 				dlpar_release_drc(lmb->drc_index);
748 
749 			drmem_remove_lmb_reservation(lmb);
750 		}
751 		rc = -EINVAL;
752 	} else {
753 		for_each_drmem_lmb(lmb) {
754 			if (!drmem_lmb_reserved(lmb))
755 				continue;
756 
757 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
758 				lmb->base_addr, lmb->drc_index);
759 			drmem_remove_lmb_reservation(lmb);
760 		}
761 		rc = 0;
762 	}
763 
764 	return rc;
765 }
766 
767 static int dlpar_memory_add_by_index(u32 drc_index)
768 {
769 	struct drmem_lmb *lmb;
770 	int rc, lmb_found;
771 
772 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
773 
774 	lmb_found = 0;
775 	for_each_drmem_lmb(lmb) {
776 		if (lmb->drc_index == drc_index) {
777 			lmb_found = 1;
778 			rc = dlpar_acquire_drc(lmb->drc_index);
779 			if (!rc) {
780 				rc = dlpar_add_lmb(lmb);
781 				if (rc)
782 					dlpar_release_drc(lmb->drc_index);
783 			}
784 
785 			break;
786 		}
787 	}
788 
789 	if (!lmb_found)
790 		rc = -EINVAL;
791 
792 	if (rc)
793 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
794 	else
795 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
796 			lmb->base_addr, drc_index);
797 
798 	return rc;
799 }
800 
801 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
802 {
803 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
804 	int lmbs_available = 0;
805 	int rc;
806 
807 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
808 		lmbs_to_add, drc_index);
809 
810 	if (lmbs_to_add == 0)
811 		return -EINVAL;
812 
813 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
814 	if (rc)
815 		return -EINVAL;
816 
817 	/* Validate that the LMBs in this range are not reserved */
818 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
819 		if (lmb->flags & DRCONF_MEM_RESERVED)
820 			break;
821 
822 		lmbs_available++;
823 	}
824 
825 	if (lmbs_available < lmbs_to_add)
826 		return -EINVAL;
827 
828 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
829 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
830 			continue;
831 
832 		rc = dlpar_acquire_drc(lmb->drc_index);
833 		if (rc)
834 			break;
835 
836 		rc = dlpar_add_lmb(lmb);
837 		if (rc) {
838 			dlpar_release_drc(lmb->drc_index);
839 			break;
840 		}
841 
842 		drmem_mark_lmb_reserved(lmb);
843 	}
844 
845 	if (rc) {
846 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
847 
848 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
849 			if (!drmem_lmb_reserved(lmb))
850 				continue;
851 
852 			rc = dlpar_remove_lmb(lmb);
853 			if (rc)
854 				pr_err("Failed to remove LMB, drc index %x\n",
855 				       lmb->drc_index);
856 			else
857 				dlpar_release_drc(lmb->drc_index);
858 
859 			drmem_remove_lmb_reservation(lmb);
860 		}
861 		rc = -EINVAL;
862 	} else {
863 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
864 			if (!drmem_lmb_reserved(lmb))
865 				continue;
866 
867 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
868 				lmb->base_addr, lmb->drc_index);
869 			drmem_remove_lmb_reservation(lmb);
870 		}
871 	}
872 
873 	return rc;
874 }
875 
876 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
877 {
878 	u32 count, drc_index;
879 	int rc;
880 
881 	lock_device_hotplug();
882 
883 	switch (hp_elog->action) {
884 	case PSERIES_HP_ELOG_ACTION_ADD:
885 		switch (hp_elog->id_type) {
886 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
887 			count = hp_elog->_drc_u.drc_count;
888 			rc = dlpar_memory_add_by_count(count);
889 			break;
890 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
891 			drc_index = hp_elog->_drc_u.drc_index;
892 			rc = dlpar_memory_add_by_index(drc_index);
893 			break;
894 		case PSERIES_HP_ELOG_ID_DRC_IC:
895 			count = hp_elog->_drc_u.ic.count;
896 			drc_index = hp_elog->_drc_u.ic.index;
897 			rc = dlpar_memory_add_by_ic(count, drc_index);
898 			break;
899 		default:
900 			rc = -EINVAL;
901 			break;
902 		}
903 
904 		break;
905 	case PSERIES_HP_ELOG_ACTION_REMOVE:
906 		switch (hp_elog->id_type) {
907 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
908 			count = hp_elog->_drc_u.drc_count;
909 			rc = dlpar_memory_remove_by_count(count);
910 			break;
911 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
912 			drc_index = hp_elog->_drc_u.drc_index;
913 			rc = dlpar_memory_remove_by_index(drc_index);
914 			break;
915 		case PSERIES_HP_ELOG_ID_DRC_IC:
916 			count = hp_elog->_drc_u.ic.count;
917 			drc_index = hp_elog->_drc_u.ic.index;
918 			rc = dlpar_memory_remove_by_ic(count, drc_index);
919 			break;
920 		default:
921 			rc = -EINVAL;
922 			break;
923 		}
924 
925 		break;
926 	case PSERIES_HP_ELOG_ACTION_READD:
927 		drc_index = hp_elog->_drc_u.drc_index;
928 		rc = dlpar_memory_readd_by_index(drc_index);
929 		break;
930 	default:
931 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
932 		rc = -EINVAL;
933 		break;
934 	}
935 
936 	if (!rc) {
937 		rtas_hp_event = true;
938 		rc = drmem_update_dt();
939 		rtas_hp_event = false;
940 	}
941 
942 	unlock_device_hotplug();
943 	return rc;
944 }
945 
946 static int pseries_add_mem_node(struct device_node *np)
947 {
948 	const __be32 *regs;
949 	unsigned long base;
950 	unsigned int lmb_size;
951 	int ret = -EINVAL;
952 
953 	/*
954 	 * Check to see if we are actually adding memory
955 	 */
956 	if (!of_node_is_type(np, "memory"))
957 		return 0;
958 
959 	/*
960 	 * Find the base and size of the memblock
961 	 */
962 	regs = of_get_property(np, "reg", NULL);
963 	if (!regs)
964 		return ret;
965 
966 	base = be64_to_cpu(*(unsigned long *)regs);
967 	lmb_size = be32_to_cpu(regs[3]);
968 
969 	/*
970 	 * Update memory region to represent the memory add
971 	 */
972 	ret = memblock_add(base, lmb_size);
973 	return (ret < 0) ? -EINVAL : 0;
974 }
975 
976 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
977 {
978 	struct of_drconf_cell_v1 *new_drmem, *old_drmem;
979 	unsigned long memblock_size;
980 	u32 entries;
981 	__be32 *p;
982 	int i, rc = -EINVAL;
983 
984 	if (rtas_hp_event)
985 		return 0;
986 
987 	memblock_size = pseries_memory_block_size();
988 	if (!memblock_size)
989 		return -EINVAL;
990 
991 	if (!pr->old_prop)
992 		return 0;
993 
994 	p = (__be32 *) pr->old_prop->value;
995 	if (!p)
996 		return -EINVAL;
997 
998 	/* The first int of the property is the number of lmb's described
999 	 * by the property. This is followed by an array of of_drconf_cell
1000 	 * entries. Get the number of entries and skip to the array of
1001 	 * of_drconf_cell's.
1002 	 */
1003 	entries = be32_to_cpu(*p++);
1004 	old_drmem = (struct of_drconf_cell_v1 *)p;
1005 
1006 	p = (__be32 *)pr->prop->value;
1007 	p++;
1008 	new_drmem = (struct of_drconf_cell_v1 *)p;
1009 
1010 	for (i = 0; i < entries; i++) {
1011 		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1012 		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1013 			rc = pseries_remove_memblock(
1014 				be64_to_cpu(old_drmem[i].base_addr),
1015 						     memblock_size);
1016 			break;
1017 		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
1018 			    DRCONF_MEM_ASSIGNED)) &&
1019 			    (be32_to_cpu(new_drmem[i].flags) &
1020 			    DRCONF_MEM_ASSIGNED)) {
1021 			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1022 					  memblock_size);
1023 			rc = (rc < 0) ? -EINVAL : 0;
1024 			break;
1025 		}
1026 	}
1027 	return rc;
1028 }
1029 
1030 static int pseries_memory_notifier(struct notifier_block *nb,
1031 				   unsigned long action, void *data)
1032 {
1033 	struct of_reconfig_data *rd = data;
1034 	int err = 0;
1035 
1036 	switch (action) {
1037 	case OF_RECONFIG_ATTACH_NODE:
1038 		err = pseries_add_mem_node(rd->dn);
1039 		break;
1040 	case OF_RECONFIG_DETACH_NODE:
1041 		err = pseries_remove_mem_node(rd->dn);
1042 		break;
1043 	case OF_RECONFIG_UPDATE_PROPERTY:
1044 		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1045 			err = pseries_update_drconf_memory(rd);
1046 		break;
1047 	}
1048 	return notifier_from_errno(err);
1049 }
1050 
1051 static struct notifier_block pseries_mem_nb = {
1052 	.notifier_call = pseries_memory_notifier,
1053 };
1054 
1055 static int __init pseries_memory_hotplug_init(void)
1056 {
1057 	if (firmware_has_feature(FW_FEATURE_LPAR))
1058 		of_reconfig_notifier_register(&pseries_mem_nb);
1059 
1060 	return 0;
1061 }
1062 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1063