1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries Memory Hotplug infrastructure.
4  *
5  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6  */
7 
8 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
16 
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/sparsemem.h>
20 #include <asm/fadump.h>
21 #include <asm/drmem.h>
22 #include "pseries.h"
23 
24 unsigned long pseries_memory_block_size(void)
25 {
26 	struct device_node *np;
27 	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
28 	struct resource r;
29 
30 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
31 	if (np) {
32 		int len;
33 		int size_cells;
34 		const __be32 *prop;
35 
36 		size_cells = of_n_size_cells(np);
37 
38 		prop = of_get_property(np, "ibm,lmb-size", &len);
39 		if (prop && len >= size_cells * sizeof(__be32))
40 			memblock_size = of_read_number(prop, size_cells);
41 		of_node_put(np);
42 
43 	} else  if (machine_is(pseries)) {
44 		/* This fallback really only applies to pseries */
45 		unsigned int memzero_size = 0;
46 
47 		np = of_find_node_by_path("/memory@0");
48 		if (np) {
49 			if (!of_address_to_resource(np, 0, &r))
50 				memzero_size = resource_size(&r);
51 			of_node_put(np);
52 		}
53 
54 		if (memzero_size) {
55 			/* We now know the size of memory@0, use this to find
56 			 * the first memoryblock and get its size.
57 			 */
58 			char buf[64];
59 
60 			sprintf(buf, "/memory@%x", memzero_size);
61 			np = of_find_node_by_path(buf);
62 			if (np) {
63 				if (!of_address_to_resource(np, 0, &r))
64 					memblock_size = resource_size(&r);
65 				of_node_put(np);
66 			}
67 		}
68 	}
69 	return memblock_size;
70 }
71 
72 static void dlpar_free_property(struct property *prop)
73 {
74 	kfree(prop->name);
75 	kfree(prop->value);
76 	kfree(prop);
77 }
78 
79 static struct property *dlpar_clone_property(struct property *prop,
80 					     u32 prop_size)
81 {
82 	struct property *new_prop;
83 
84 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
85 	if (!new_prop)
86 		return NULL;
87 
88 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
89 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
90 	if (!new_prop->name || !new_prop->value) {
91 		dlpar_free_property(new_prop);
92 		return NULL;
93 	}
94 
95 	memcpy(new_prop->value, prop->value, prop->length);
96 	new_prop->length = prop_size;
97 
98 	of_property_set_flag(new_prop, OF_DYNAMIC);
99 	return new_prop;
100 }
101 
102 static bool find_aa_index(struct device_node *dr_node,
103 			 struct property *ala_prop,
104 			 const u32 *lmb_assoc, u32 *aa_index)
105 {
106 	u32 *assoc_arrays, new_prop_size;
107 	struct property *new_prop;
108 	int aa_arrays, aa_array_entries, aa_array_sz;
109 	int i, index;
110 
111 	/*
112 	 * The ibm,associativity-lookup-arrays property is defined to be
113 	 * a 32-bit value specifying the number of associativity arrays
114 	 * followed by a 32-bitvalue specifying the number of entries per
115 	 * array, followed by the associativity arrays.
116 	 */
117 	assoc_arrays = ala_prop->value;
118 
119 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
120 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
121 	aa_array_sz = aa_array_entries * sizeof(u32);
122 
123 	for (i = 0; i < aa_arrays; i++) {
124 		index = (i * aa_array_entries) + 2;
125 
126 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
127 			continue;
128 
129 		*aa_index = i;
130 		return true;
131 	}
132 
133 	new_prop_size = ala_prop->length + aa_array_sz;
134 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
135 	if (!new_prop)
136 		return false;
137 
138 	assoc_arrays = new_prop->value;
139 
140 	/* increment the number of entries in the lookup array */
141 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
142 
143 	/* copy the new associativity into the lookup array */
144 	index = aa_arrays * aa_array_entries + 2;
145 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
146 
147 	of_update_property(dr_node, new_prop);
148 
149 	/*
150 	 * The associativity lookup array index for this lmb is
151 	 * number of entries - 1 since we added its associativity
152 	 * to the end of the lookup array.
153 	 */
154 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
155 	return true;
156 }
157 
158 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
159 {
160 	struct device_node *parent, *lmb_node, *dr_node;
161 	struct property *ala_prop;
162 	const u32 *lmb_assoc;
163 	u32 aa_index;
164 	bool found;
165 
166 	parent = of_find_node_by_path("/");
167 	if (!parent)
168 		return -ENODEV;
169 
170 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
171 					     parent);
172 	of_node_put(parent);
173 	if (!lmb_node)
174 		return -EINVAL;
175 
176 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
177 	if (!lmb_assoc) {
178 		dlpar_free_cc_nodes(lmb_node);
179 		return -ENODEV;
180 	}
181 
182 	update_numa_distance(lmb_node);
183 
184 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
185 	if (!dr_node) {
186 		dlpar_free_cc_nodes(lmb_node);
187 		return -ENODEV;
188 	}
189 
190 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
191 				    NULL);
192 	if (!ala_prop) {
193 		of_node_put(dr_node);
194 		dlpar_free_cc_nodes(lmb_node);
195 		return -ENODEV;
196 	}
197 
198 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
199 
200 	of_node_put(dr_node);
201 	dlpar_free_cc_nodes(lmb_node);
202 
203 	if (!found) {
204 		pr_err("Could not find LMB associativity\n");
205 		return -1;
206 	}
207 
208 	lmb->aa_index = aa_index;
209 	return 0;
210 }
211 
212 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
213 {
214 	unsigned long section_nr;
215 	struct memory_block *mem_block;
216 
217 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
218 
219 	mem_block = find_memory_block(section_nr);
220 	return mem_block;
221 }
222 
223 static int get_lmb_range(u32 drc_index, int n_lmbs,
224 			 struct drmem_lmb **start_lmb,
225 			 struct drmem_lmb **end_lmb)
226 {
227 	struct drmem_lmb *lmb, *start, *end;
228 	struct drmem_lmb *limit;
229 
230 	start = NULL;
231 	for_each_drmem_lmb(lmb) {
232 		if (lmb->drc_index == drc_index) {
233 			start = lmb;
234 			break;
235 		}
236 	}
237 
238 	if (!start)
239 		return -EINVAL;
240 
241 	end = &start[n_lmbs];
242 
243 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
244 	if (end > limit)
245 		return -EINVAL;
246 
247 	*start_lmb = start;
248 	*end_lmb = end;
249 	return 0;
250 }
251 
252 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
253 {
254 	struct memory_block *mem_block;
255 	int rc;
256 
257 	mem_block = lmb_to_memblock(lmb);
258 	if (!mem_block)
259 		return -EINVAL;
260 
261 	if (online && mem_block->dev.offline)
262 		rc = device_online(&mem_block->dev);
263 	else if (!online && !mem_block->dev.offline)
264 		rc = device_offline(&mem_block->dev);
265 	else
266 		rc = 0;
267 
268 	put_device(&mem_block->dev);
269 
270 	return rc;
271 }
272 
273 static int dlpar_online_lmb(struct drmem_lmb *lmb)
274 {
275 	return dlpar_change_lmb_state(lmb, true);
276 }
277 
278 #ifdef CONFIG_MEMORY_HOTREMOVE
279 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
280 {
281 	return dlpar_change_lmb_state(lmb, false);
282 }
283 
284 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
285 {
286 	unsigned long block_sz, start_pfn;
287 	int sections_per_block;
288 	int i;
289 
290 	start_pfn = base >> PAGE_SHIFT;
291 
292 	lock_device_hotplug();
293 
294 	if (!pfn_valid(start_pfn))
295 		goto out;
296 
297 	block_sz = pseries_memory_block_size();
298 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
299 
300 	for (i = 0; i < sections_per_block; i++) {
301 		__remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
302 		base += MIN_MEMORY_BLOCK_SIZE;
303 	}
304 
305 out:
306 	/* Update memory regions for memory remove */
307 	memblock_remove(base, memblock_size);
308 	unlock_device_hotplug();
309 	return 0;
310 }
311 
312 static int pseries_remove_mem_node(struct device_node *np)
313 {
314 	int ret;
315 	struct resource res;
316 
317 	/*
318 	 * Check to see if we are actually removing memory
319 	 */
320 	if (!of_node_is_type(np, "memory"))
321 		return 0;
322 
323 	/*
324 	 * Find the base address and size of the memblock
325 	 */
326 	ret = of_address_to_resource(np, 0, &res);
327 	if (ret)
328 		return ret;
329 
330 	pseries_remove_memblock(res.start, resource_size(&res));
331 	return 0;
332 }
333 
334 static bool lmb_is_removable(struct drmem_lmb *lmb)
335 {
336 	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
337 		!(lmb->flags & DRCONF_MEM_ASSIGNED))
338 		return false;
339 
340 #ifdef CONFIG_FA_DUMP
341 	/*
342 	 * Don't hot-remove memory that falls in fadump boot memory area
343 	 * and memory that is reserved for capturing old kernel memory.
344 	 */
345 	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
346 		return false;
347 #endif
348 	/* device_offline() will determine if we can actually remove this lmb */
349 	return true;
350 }
351 
352 static int dlpar_add_lmb(struct drmem_lmb *);
353 
354 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
355 {
356 	struct memory_block *mem_block;
357 	unsigned long block_sz;
358 	int rc;
359 
360 	if (!lmb_is_removable(lmb))
361 		return -EINVAL;
362 
363 	mem_block = lmb_to_memblock(lmb);
364 	if (mem_block == NULL)
365 		return -EINVAL;
366 
367 	rc = dlpar_offline_lmb(lmb);
368 	if (rc) {
369 		put_device(&mem_block->dev);
370 		return rc;
371 	}
372 
373 	block_sz = pseries_memory_block_size();
374 
375 	__remove_memory(lmb->base_addr, block_sz);
376 	put_device(&mem_block->dev);
377 
378 	/* Update memory regions for memory remove */
379 	memblock_remove(lmb->base_addr, block_sz);
380 
381 	invalidate_lmb_associativity_index(lmb);
382 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
383 
384 	return 0;
385 }
386 
387 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
388 {
389 	struct drmem_lmb *lmb;
390 	int lmbs_reserved = 0;
391 	int lmbs_available = 0;
392 	int rc;
393 
394 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
395 
396 	if (lmbs_to_remove == 0)
397 		return -EINVAL;
398 
399 	/* Validate that there are enough LMBs to satisfy the request */
400 	for_each_drmem_lmb(lmb) {
401 		if (lmb_is_removable(lmb))
402 			lmbs_available++;
403 
404 		if (lmbs_available == lmbs_to_remove)
405 			break;
406 	}
407 
408 	if (lmbs_available < lmbs_to_remove) {
409 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
410 			lmbs_available, lmbs_to_remove);
411 		return -EINVAL;
412 	}
413 
414 	for_each_drmem_lmb(lmb) {
415 		rc = dlpar_remove_lmb(lmb);
416 		if (rc)
417 			continue;
418 
419 		/* Mark this lmb so we can add it later if all of the
420 		 * requested LMBs cannot be removed.
421 		 */
422 		drmem_mark_lmb_reserved(lmb);
423 
424 		lmbs_reserved++;
425 		if (lmbs_reserved == lmbs_to_remove)
426 			break;
427 	}
428 
429 	if (lmbs_reserved != lmbs_to_remove) {
430 		pr_err("Memory hot-remove failed, adding LMB's back\n");
431 
432 		for_each_drmem_lmb(lmb) {
433 			if (!drmem_lmb_reserved(lmb))
434 				continue;
435 
436 			rc = dlpar_add_lmb(lmb);
437 			if (rc)
438 				pr_err("Failed to add LMB back, drc index %x\n",
439 				       lmb->drc_index);
440 
441 			drmem_remove_lmb_reservation(lmb);
442 
443 			lmbs_reserved--;
444 			if (lmbs_reserved == 0)
445 				break;
446 		}
447 
448 		rc = -EINVAL;
449 	} else {
450 		for_each_drmem_lmb(lmb) {
451 			if (!drmem_lmb_reserved(lmb))
452 				continue;
453 
454 			dlpar_release_drc(lmb->drc_index);
455 			pr_info("Memory at %llx was hot-removed\n",
456 				lmb->base_addr);
457 
458 			drmem_remove_lmb_reservation(lmb);
459 
460 			lmbs_reserved--;
461 			if (lmbs_reserved == 0)
462 				break;
463 		}
464 		rc = 0;
465 	}
466 
467 	return rc;
468 }
469 
470 static int dlpar_memory_remove_by_index(u32 drc_index)
471 {
472 	struct drmem_lmb *lmb;
473 	int lmb_found;
474 	int rc;
475 
476 	pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
477 
478 	lmb_found = 0;
479 	for_each_drmem_lmb(lmb) {
480 		if (lmb->drc_index == drc_index) {
481 			lmb_found = 1;
482 			rc = dlpar_remove_lmb(lmb);
483 			if (!rc)
484 				dlpar_release_drc(lmb->drc_index);
485 
486 			break;
487 		}
488 	}
489 
490 	if (!lmb_found)
491 		rc = -EINVAL;
492 
493 	if (rc)
494 		pr_debug("Failed to hot-remove memory at %llx\n",
495 			 lmb->base_addr);
496 	else
497 		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
498 
499 	return rc;
500 }
501 
502 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
503 {
504 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
505 	int rc;
506 
507 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
508 		lmbs_to_remove, drc_index);
509 
510 	if (lmbs_to_remove == 0)
511 		return -EINVAL;
512 
513 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
514 	if (rc)
515 		return -EINVAL;
516 
517 	/*
518 	 * Validate that all LMBs in range are not reserved. Note that it
519 	 * is ok if they are !ASSIGNED since our goal here is to remove the
520 	 * LMB range, regardless of whether some LMBs were already removed
521 	 * by any other reason.
522 	 *
523 	 * This is a contrast to what is done in remove_by_count() where we
524 	 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
525 	 * because we want to remove a fixed amount of LMBs in that function.
526 	 */
527 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
528 		if (lmb->flags & DRCONF_MEM_RESERVED) {
529 			pr_err("Memory at %llx (drc index %x) is reserved\n",
530 				lmb->base_addr, lmb->drc_index);
531 			return -EINVAL;
532 		}
533 	}
534 
535 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
536 		/*
537 		 * dlpar_remove_lmb() will error out if the LMB is already
538 		 * !ASSIGNED, but this case is a no-op for us.
539 		 */
540 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
541 			continue;
542 
543 		rc = dlpar_remove_lmb(lmb);
544 		if (rc)
545 			break;
546 
547 		drmem_mark_lmb_reserved(lmb);
548 	}
549 
550 	if (rc) {
551 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
552 
553 
554 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
555 			if (!drmem_lmb_reserved(lmb))
556 				continue;
557 
558 			/*
559 			 * Setting the isolation state of an UNISOLATED/CONFIGURED
560 			 * device to UNISOLATE is a no-op, but the hypervisor can
561 			 * use it as a hint that the LMB removal failed.
562 			 */
563 			dlpar_unisolate_drc(lmb->drc_index);
564 
565 			rc = dlpar_add_lmb(lmb);
566 			if (rc)
567 				pr_err("Failed to add LMB, drc index %x\n",
568 				       lmb->drc_index);
569 
570 			drmem_remove_lmb_reservation(lmb);
571 		}
572 		rc = -EINVAL;
573 	} else {
574 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
575 			if (!drmem_lmb_reserved(lmb))
576 				continue;
577 
578 			dlpar_release_drc(lmb->drc_index);
579 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
580 				lmb->base_addr, lmb->drc_index);
581 
582 			drmem_remove_lmb_reservation(lmb);
583 		}
584 	}
585 
586 	return rc;
587 }
588 
589 #else
590 static inline int pseries_remove_memblock(unsigned long base,
591 					  unsigned long memblock_size)
592 {
593 	return -EOPNOTSUPP;
594 }
595 static inline int pseries_remove_mem_node(struct device_node *np)
596 {
597 	return 0;
598 }
599 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
600 {
601 	return -EOPNOTSUPP;
602 }
603 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
604 {
605 	return -EOPNOTSUPP;
606 }
607 static int dlpar_memory_remove_by_index(u32 drc_index)
608 {
609 	return -EOPNOTSUPP;
610 }
611 
612 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
613 {
614 	return -EOPNOTSUPP;
615 }
616 #endif /* CONFIG_MEMORY_HOTREMOVE */
617 
618 static int dlpar_add_lmb(struct drmem_lmb *lmb)
619 {
620 	unsigned long block_sz;
621 	int nid, rc;
622 
623 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
624 		return -EINVAL;
625 
626 	rc = update_lmb_associativity_index(lmb);
627 	if (rc) {
628 		dlpar_release_drc(lmb->drc_index);
629 		return rc;
630 	}
631 
632 	block_sz = memory_block_size_bytes();
633 
634 	/* Find the node id for this LMB.  Fake one if necessary. */
635 	nid = of_drconf_to_nid_single(lmb);
636 	if (nid < 0 || !node_possible(nid))
637 		nid = first_online_node;
638 
639 	/* Add the memory */
640 	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
641 	if (rc) {
642 		invalidate_lmb_associativity_index(lmb);
643 		return rc;
644 	}
645 
646 	rc = dlpar_online_lmb(lmb);
647 	if (rc) {
648 		__remove_memory(lmb->base_addr, block_sz);
649 		invalidate_lmb_associativity_index(lmb);
650 	} else {
651 		lmb->flags |= DRCONF_MEM_ASSIGNED;
652 	}
653 
654 	return rc;
655 }
656 
657 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
658 {
659 	struct drmem_lmb *lmb;
660 	int lmbs_available = 0;
661 	int lmbs_reserved = 0;
662 	int rc;
663 
664 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
665 
666 	if (lmbs_to_add == 0)
667 		return -EINVAL;
668 
669 	/* Validate that there are enough LMBs to satisfy the request */
670 	for_each_drmem_lmb(lmb) {
671 		if (lmb->flags & DRCONF_MEM_RESERVED)
672 			continue;
673 
674 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
675 			lmbs_available++;
676 
677 		if (lmbs_available == lmbs_to_add)
678 			break;
679 	}
680 
681 	if (lmbs_available < lmbs_to_add)
682 		return -EINVAL;
683 
684 	for_each_drmem_lmb(lmb) {
685 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
686 			continue;
687 
688 		rc = dlpar_acquire_drc(lmb->drc_index);
689 		if (rc)
690 			continue;
691 
692 		rc = dlpar_add_lmb(lmb);
693 		if (rc) {
694 			dlpar_release_drc(lmb->drc_index);
695 			continue;
696 		}
697 
698 		/* Mark this lmb so we can remove it later if all of the
699 		 * requested LMBs cannot be added.
700 		 */
701 		drmem_mark_lmb_reserved(lmb);
702 		lmbs_reserved++;
703 		if (lmbs_reserved == lmbs_to_add)
704 			break;
705 	}
706 
707 	if (lmbs_reserved != lmbs_to_add) {
708 		pr_err("Memory hot-add failed, removing any added LMBs\n");
709 
710 		for_each_drmem_lmb(lmb) {
711 			if (!drmem_lmb_reserved(lmb))
712 				continue;
713 
714 			rc = dlpar_remove_lmb(lmb);
715 			if (rc)
716 				pr_err("Failed to remove LMB, drc index %x\n",
717 				       lmb->drc_index);
718 			else
719 				dlpar_release_drc(lmb->drc_index);
720 
721 			drmem_remove_lmb_reservation(lmb);
722 			lmbs_reserved--;
723 
724 			if (lmbs_reserved == 0)
725 				break;
726 		}
727 		rc = -EINVAL;
728 	} else {
729 		for_each_drmem_lmb(lmb) {
730 			if (!drmem_lmb_reserved(lmb))
731 				continue;
732 
733 			pr_debug("Memory at %llx (drc index %x) was hot-added\n",
734 				 lmb->base_addr, lmb->drc_index);
735 			drmem_remove_lmb_reservation(lmb);
736 			lmbs_reserved--;
737 
738 			if (lmbs_reserved == 0)
739 				break;
740 		}
741 		rc = 0;
742 	}
743 
744 	return rc;
745 }
746 
747 static int dlpar_memory_add_by_index(u32 drc_index)
748 {
749 	struct drmem_lmb *lmb;
750 	int rc, lmb_found;
751 
752 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
753 
754 	lmb_found = 0;
755 	for_each_drmem_lmb(lmb) {
756 		if (lmb->drc_index == drc_index) {
757 			lmb_found = 1;
758 			rc = dlpar_acquire_drc(lmb->drc_index);
759 			if (!rc) {
760 				rc = dlpar_add_lmb(lmb);
761 				if (rc)
762 					dlpar_release_drc(lmb->drc_index);
763 			}
764 
765 			break;
766 		}
767 	}
768 
769 	if (!lmb_found)
770 		rc = -EINVAL;
771 
772 	if (rc)
773 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
774 	else
775 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
776 			lmb->base_addr, drc_index);
777 
778 	return rc;
779 }
780 
781 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
782 {
783 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
784 	int rc;
785 
786 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
787 		lmbs_to_add, drc_index);
788 
789 	if (lmbs_to_add == 0)
790 		return -EINVAL;
791 
792 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
793 	if (rc)
794 		return -EINVAL;
795 
796 	/* Validate that the LMBs in this range are not reserved */
797 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
798 		/* Fail immediately if the whole range can't be hot-added */
799 		if (lmb->flags & DRCONF_MEM_RESERVED) {
800 			pr_err("Memory at %llx (drc index %x) is reserved\n",
801 					lmb->base_addr, lmb->drc_index);
802 			return -EINVAL;
803 		}
804 	}
805 
806 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
807 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
808 			continue;
809 
810 		rc = dlpar_acquire_drc(lmb->drc_index);
811 		if (rc)
812 			break;
813 
814 		rc = dlpar_add_lmb(lmb);
815 		if (rc) {
816 			dlpar_release_drc(lmb->drc_index);
817 			break;
818 		}
819 
820 		drmem_mark_lmb_reserved(lmb);
821 	}
822 
823 	if (rc) {
824 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
825 
826 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
827 			if (!drmem_lmb_reserved(lmb))
828 				continue;
829 
830 			rc = dlpar_remove_lmb(lmb);
831 			if (rc)
832 				pr_err("Failed to remove LMB, drc index %x\n",
833 				       lmb->drc_index);
834 			else
835 				dlpar_release_drc(lmb->drc_index);
836 
837 			drmem_remove_lmb_reservation(lmb);
838 		}
839 		rc = -EINVAL;
840 	} else {
841 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
842 			if (!drmem_lmb_reserved(lmb))
843 				continue;
844 
845 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
846 				lmb->base_addr, lmb->drc_index);
847 			drmem_remove_lmb_reservation(lmb);
848 		}
849 	}
850 
851 	return rc;
852 }
853 
854 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
855 {
856 	u32 count, drc_index;
857 	int rc;
858 
859 	lock_device_hotplug();
860 
861 	switch (hp_elog->action) {
862 	case PSERIES_HP_ELOG_ACTION_ADD:
863 		switch (hp_elog->id_type) {
864 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
865 			count = hp_elog->_drc_u.drc_count;
866 			rc = dlpar_memory_add_by_count(count);
867 			break;
868 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
869 			drc_index = hp_elog->_drc_u.drc_index;
870 			rc = dlpar_memory_add_by_index(drc_index);
871 			break;
872 		case PSERIES_HP_ELOG_ID_DRC_IC:
873 			count = hp_elog->_drc_u.ic.count;
874 			drc_index = hp_elog->_drc_u.ic.index;
875 			rc = dlpar_memory_add_by_ic(count, drc_index);
876 			break;
877 		default:
878 			rc = -EINVAL;
879 			break;
880 		}
881 
882 		break;
883 	case PSERIES_HP_ELOG_ACTION_REMOVE:
884 		switch (hp_elog->id_type) {
885 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
886 			count = hp_elog->_drc_u.drc_count;
887 			rc = dlpar_memory_remove_by_count(count);
888 			break;
889 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
890 			drc_index = hp_elog->_drc_u.drc_index;
891 			rc = dlpar_memory_remove_by_index(drc_index);
892 			break;
893 		case PSERIES_HP_ELOG_ID_DRC_IC:
894 			count = hp_elog->_drc_u.ic.count;
895 			drc_index = hp_elog->_drc_u.ic.index;
896 			rc = dlpar_memory_remove_by_ic(count, drc_index);
897 			break;
898 		default:
899 			rc = -EINVAL;
900 			break;
901 		}
902 
903 		break;
904 	default:
905 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
906 		rc = -EINVAL;
907 		break;
908 	}
909 
910 	if (!rc)
911 		rc = drmem_update_dt();
912 
913 	unlock_device_hotplug();
914 	return rc;
915 }
916 
917 static int pseries_add_mem_node(struct device_node *np)
918 {
919 	int ret;
920 	struct resource res;
921 
922 	/*
923 	 * Check to see if we are actually adding memory
924 	 */
925 	if (!of_node_is_type(np, "memory"))
926 		return 0;
927 
928 	/*
929 	 * Find the base and size of the memblock
930 	 */
931 	ret = of_address_to_resource(np, 0, &res);
932 	if (ret)
933 		return ret;
934 
935 	/*
936 	 * Update memory region to represent the memory add
937 	 */
938 	ret = memblock_add(res.start, resource_size(&res));
939 	return (ret < 0) ? -EINVAL : 0;
940 }
941 
942 static int pseries_memory_notifier(struct notifier_block *nb,
943 				   unsigned long action, void *data)
944 {
945 	struct of_reconfig_data *rd = data;
946 	int err = 0;
947 
948 	switch (action) {
949 	case OF_RECONFIG_ATTACH_NODE:
950 		err = pseries_add_mem_node(rd->dn);
951 		break;
952 	case OF_RECONFIG_DETACH_NODE:
953 		err = pseries_remove_mem_node(rd->dn);
954 		break;
955 	case OF_RECONFIG_UPDATE_PROPERTY:
956 		if (!strcmp(rd->dn->name,
957 			    "ibm,dynamic-reconfiguration-memory"))
958 			drmem_update_lmbs(rd->prop);
959 	}
960 	return notifier_from_errno(err);
961 }
962 
963 static struct notifier_block pseries_mem_nb = {
964 	.notifier_call = pseries_memory_notifier,
965 };
966 
967 static int __init pseries_memory_hotplug_init(void)
968 {
969 	if (firmware_has_feature(FW_FEATURE_LPAR))
970 		of_reconfig_notifier_register(&pseries_mem_nb);
971 
972 	return 0;
973 }
974 machine_device_initcall(pseries, pseries_memory_hotplug_init);
975