1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * pseries Memory Hotplug infrastructure.
4  *
5  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6  */
7 
8 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9 
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/memblock.h>
13 #include <linux/memory.h>
14 #include <linux/memory_hotplug.h>
15 #include <linux/slab.h>
16 
17 #include <asm/firmware.h>
18 #include <asm/machdep.h>
19 #include <asm/prom.h>
20 #include <asm/sparsemem.h>
21 #include <asm/fadump.h>
22 #include <asm/drmem.h>
23 #include "pseries.h"
24 
25 unsigned long pseries_memory_block_size(void)
26 {
27 	struct device_node *np;
28 	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
29 	struct resource r;
30 
31 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
32 	if (np) {
33 		int len;
34 		int size_cells;
35 		const __be32 *prop;
36 
37 		size_cells = of_n_size_cells(np);
38 
39 		prop = of_get_property(np, "ibm,lmb-size", &len);
40 		if (prop && len >= size_cells * sizeof(__be32))
41 			memblock_size = of_read_number(prop, size_cells);
42 		of_node_put(np);
43 
44 	} else  if (machine_is(pseries)) {
45 		/* This fallback really only applies to pseries */
46 		unsigned int memzero_size = 0;
47 
48 		np = of_find_node_by_path("/memory@0");
49 		if (np) {
50 			if (!of_address_to_resource(np, 0, &r))
51 				memzero_size = resource_size(&r);
52 			of_node_put(np);
53 		}
54 
55 		if (memzero_size) {
56 			/* We now know the size of memory@0, use this to find
57 			 * the first memoryblock and get its size.
58 			 */
59 			char buf[64];
60 
61 			sprintf(buf, "/memory@%x", memzero_size);
62 			np = of_find_node_by_path(buf);
63 			if (np) {
64 				if (!of_address_to_resource(np, 0, &r))
65 					memblock_size = resource_size(&r);
66 				of_node_put(np);
67 			}
68 		}
69 	}
70 	return memblock_size;
71 }
72 
73 static void dlpar_free_property(struct property *prop)
74 {
75 	kfree(prop->name);
76 	kfree(prop->value);
77 	kfree(prop);
78 }
79 
80 static struct property *dlpar_clone_property(struct property *prop,
81 					     u32 prop_size)
82 {
83 	struct property *new_prop;
84 
85 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
86 	if (!new_prop)
87 		return NULL;
88 
89 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
90 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
91 	if (!new_prop->name || !new_prop->value) {
92 		dlpar_free_property(new_prop);
93 		return NULL;
94 	}
95 
96 	memcpy(new_prop->value, prop->value, prop->length);
97 	new_prop->length = prop_size;
98 
99 	of_property_set_flag(new_prop, OF_DYNAMIC);
100 	return new_prop;
101 }
102 
103 static bool find_aa_index(struct device_node *dr_node,
104 			 struct property *ala_prop,
105 			 const u32 *lmb_assoc, u32 *aa_index)
106 {
107 	u32 *assoc_arrays, new_prop_size;
108 	struct property *new_prop;
109 	int aa_arrays, aa_array_entries, aa_array_sz;
110 	int i, index;
111 
112 	/*
113 	 * The ibm,associativity-lookup-arrays property is defined to be
114 	 * a 32-bit value specifying the number of associativity arrays
115 	 * followed by a 32-bitvalue specifying the number of entries per
116 	 * array, followed by the associativity arrays.
117 	 */
118 	assoc_arrays = ala_prop->value;
119 
120 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
121 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
122 	aa_array_sz = aa_array_entries * sizeof(u32);
123 
124 	for (i = 0; i < aa_arrays; i++) {
125 		index = (i * aa_array_entries) + 2;
126 
127 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
128 			continue;
129 
130 		*aa_index = i;
131 		return true;
132 	}
133 
134 	new_prop_size = ala_prop->length + aa_array_sz;
135 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
136 	if (!new_prop)
137 		return false;
138 
139 	assoc_arrays = new_prop->value;
140 
141 	/* increment the number of entries in the lookup array */
142 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
143 
144 	/* copy the new associativity into the lookup array */
145 	index = aa_arrays * aa_array_entries + 2;
146 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
147 
148 	of_update_property(dr_node, new_prop);
149 
150 	/*
151 	 * The associativity lookup array index for this lmb is
152 	 * number of entries - 1 since we added its associativity
153 	 * to the end of the lookup array.
154 	 */
155 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
156 	return true;
157 }
158 
159 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
160 {
161 	struct device_node *parent, *lmb_node, *dr_node;
162 	struct property *ala_prop;
163 	const u32 *lmb_assoc;
164 	u32 aa_index;
165 	bool found;
166 
167 	parent = of_find_node_by_path("/");
168 	if (!parent)
169 		return -ENODEV;
170 
171 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
172 					     parent);
173 	of_node_put(parent);
174 	if (!lmb_node)
175 		return -EINVAL;
176 
177 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
178 	if (!lmb_assoc) {
179 		dlpar_free_cc_nodes(lmb_node);
180 		return -ENODEV;
181 	}
182 
183 	update_numa_distance(lmb_node);
184 
185 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
186 	if (!dr_node) {
187 		dlpar_free_cc_nodes(lmb_node);
188 		return -ENODEV;
189 	}
190 
191 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
192 				    NULL);
193 	if (!ala_prop) {
194 		of_node_put(dr_node);
195 		dlpar_free_cc_nodes(lmb_node);
196 		return -ENODEV;
197 	}
198 
199 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
200 
201 	of_node_put(dr_node);
202 	dlpar_free_cc_nodes(lmb_node);
203 
204 	if (!found) {
205 		pr_err("Could not find LMB associativity\n");
206 		return -1;
207 	}
208 
209 	lmb->aa_index = aa_index;
210 	return 0;
211 }
212 
213 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
214 {
215 	unsigned long section_nr;
216 	struct memory_block *mem_block;
217 
218 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
219 
220 	mem_block = find_memory_block(section_nr);
221 	return mem_block;
222 }
223 
224 static int get_lmb_range(u32 drc_index, int n_lmbs,
225 			 struct drmem_lmb **start_lmb,
226 			 struct drmem_lmb **end_lmb)
227 {
228 	struct drmem_lmb *lmb, *start, *end;
229 	struct drmem_lmb *limit;
230 
231 	start = NULL;
232 	for_each_drmem_lmb(lmb) {
233 		if (lmb->drc_index == drc_index) {
234 			start = lmb;
235 			break;
236 		}
237 	}
238 
239 	if (!start)
240 		return -EINVAL;
241 
242 	end = &start[n_lmbs];
243 
244 	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
245 	if (end > limit)
246 		return -EINVAL;
247 
248 	*start_lmb = start;
249 	*end_lmb = end;
250 	return 0;
251 }
252 
253 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
254 {
255 	struct memory_block *mem_block;
256 	int rc;
257 
258 	mem_block = lmb_to_memblock(lmb);
259 	if (!mem_block)
260 		return -EINVAL;
261 
262 	if (online && mem_block->dev.offline)
263 		rc = device_online(&mem_block->dev);
264 	else if (!online && !mem_block->dev.offline)
265 		rc = device_offline(&mem_block->dev);
266 	else
267 		rc = 0;
268 
269 	put_device(&mem_block->dev);
270 
271 	return rc;
272 }
273 
274 static int dlpar_online_lmb(struct drmem_lmb *lmb)
275 {
276 	return dlpar_change_lmb_state(lmb, true);
277 }
278 
279 #ifdef CONFIG_MEMORY_HOTREMOVE
280 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
281 {
282 	return dlpar_change_lmb_state(lmb, false);
283 }
284 
285 static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
286 {
287 	unsigned long block_sz, start_pfn;
288 	int sections_per_block;
289 	int i;
290 
291 	start_pfn = base >> PAGE_SHIFT;
292 
293 	lock_device_hotplug();
294 
295 	if (!pfn_valid(start_pfn))
296 		goto out;
297 
298 	block_sz = pseries_memory_block_size();
299 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
300 
301 	for (i = 0; i < sections_per_block; i++) {
302 		__remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
303 		base += MIN_MEMORY_BLOCK_SIZE;
304 	}
305 
306 out:
307 	/* Update memory regions for memory remove */
308 	memblock_remove(base, memblock_size);
309 	unlock_device_hotplug();
310 	return 0;
311 }
312 
313 static int pseries_remove_mem_node(struct device_node *np)
314 {
315 	const __be32 *prop;
316 	unsigned long base;
317 	unsigned long lmb_size;
318 	int ret = -EINVAL;
319 	int addr_cells, size_cells;
320 
321 	/*
322 	 * Check to see if we are actually removing memory
323 	 */
324 	if (!of_node_is_type(np, "memory"))
325 		return 0;
326 
327 	/*
328 	 * Find the base address and size of the memblock
329 	 */
330 	prop = of_get_property(np, "reg", NULL);
331 	if (!prop)
332 		return ret;
333 
334 	addr_cells = of_n_addr_cells(np);
335 	size_cells = of_n_size_cells(np);
336 
337 	/*
338 	 * "reg" property represents (addr,size) tuple.
339 	 */
340 	base = of_read_number(prop, addr_cells);
341 	prop += addr_cells;
342 	lmb_size = of_read_number(prop, size_cells);
343 
344 	pseries_remove_memblock(base, lmb_size);
345 	return 0;
346 }
347 
348 static bool lmb_is_removable(struct drmem_lmb *lmb)
349 {
350 	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
351 		!(lmb->flags & DRCONF_MEM_ASSIGNED))
352 		return false;
353 
354 #ifdef CONFIG_FA_DUMP
355 	/*
356 	 * Don't hot-remove memory that falls in fadump boot memory area
357 	 * and memory that is reserved for capturing old kernel memory.
358 	 */
359 	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
360 		return false;
361 #endif
362 	/* device_offline() will determine if we can actually remove this lmb */
363 	return true;
364 }
365 
366 static int dlpar_add_lmb(struct drmem_lmb *);
367 
368 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
369 {
370 	struct memory_block *mem_block;
371 	unsigned long block_sz;
372 	int rc;
373 
374 	if (!lmb_is_removable(lmb))
375 		return -EINVAL;
376 
377 	mem_block = lmb_to_memblock(lmb);
378 	if (mem_block == NULL)
379 		return -EINVAL;
380 
381 	rc = dlpar_offline_lmb(lmb);
382 	if (rc) {
383 		put_device(&mem_block->dev);
384 		return rc;
385 	}
386 
387 	block_sz = pseries_memory_block_size();
388 
389 	__remove_memory(lmb->base_addr, block_sz);
390 	put_device(&mem_block->dev);
391 
392 	/* Update memory regions for memory remove */
393 	memblock_remove(lmb->base_addr, block_sz);
394 
395 	invalidate_lmb_associativity_index(lmb);
396 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
397 
398 	return 0;
399 }
400 
401 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
402 {
403 	struct drmem_lmb *lmb;
404 	int lmbs_reserved = 0;
405 	int lmbs_available = 0;
406 	int rc;
407 
408 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
409 
410 	if (lmbs_to_remove == 0)
411 		return -EINVAL;
412 
413 	/* Validate that there are enough LMBs to satisfy the request */
414 	for_each_drmem_lmb(lmb) {
415 		if (lmb_is_removable(lmb))
416 			lmbs_available++;
417 
418 		if (lmbs_available == lmbs_to_remove)
419 			break;
420 	}
421 
422 	if (lmbs_available < lmbs_to_remove) {
423 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
424 			lmbs_available, lmbs_to_remove);
425 		return -EINVAL;
426 	}
427 
428 	for_each_drmem_lmb(lmb) {
429 		rc = dlpar_remove_lmb(lmb);
430 		if (rc)
431 			continue;
432 
433 		/* Mark this lmb so we can add it later if all of the
434 		 * requested LMBs cannot be removed.
435 		 */
436 		drmem_mark_lmb_reserved(lmb);
437 
438 		lmbs_reserved++;
439 		if (lmbs_reserved == lmbs_to_remove)
440 			break;
441 	}
442 
443 	if (lmbs_reserved != lmbs_to_remove) {
444 		pr_err("Memory hot-remove failed, adding LMB's back\n");
445 
446 		for_each_drmem_lmb(lmb) {
447 			if (!drmem_lmb_reserved(lmb))
448 				continue;
449 
450 			rc = dlpar_add_lmb(lmb);
451 			if (rc)
452 				pr_err("Failed to add LMB back, drc index %x\n",
453 				       lmb->drc_index);
454 
455 			drmem_remove_lmb_reservation(lmb);
456 
457 			lmbs_reserved--;
458 			if (lmbs_reserved == 0)
459 				break;
460 		}
461 
462 		rc = -EINVAL;
463 	} else {
464 		for_each_drmem_lmb(lmb) {
465 			if (!drmem_lmb_reserved(lmb))
466 				continue;
467 
468 			dlpar_release_drc(lmb->drc_index);
469 			pr_info("Memory at %llx was hot-removed\n",
470 				lmb->base_addr);
471 
472 			drmem_remove_lmb_reservation(lmb);
473 
474 			lmbs_reserved--;
475 			if (lmbs_reserved == 0)
476 				break;
477 		}
478 		rc = 0;
479 	}
480 
481 	return rc;
482 }
483 
484 static int dlpar_memory_remove_by_index(u32 drc_index)
485 {
486 	struct drmem_lmb *lmb;
487 	int lmb_found;
488 	int rc;
489 
490 	pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
491 
492 	lmb_found = 0;
493 	for_each_drmem_lmb(lmb) {
494 		if (lmb->drc_index == drc_index) {
495 			lmb_found = 1;
496 			rc = dlpar_remove_lmb(lmb);
497 			if (!rc)
498 				dlpar_release_drc(lmb->drc_index);
499 
500 			break;
501 		}
502 	}
503 
504 	if (!lmb_found)
505 		rc = -EINVAL;
506 
507 	if (rc)
508 		pr_debug("Failed to hot-remove memory at %llx\n",
509 			 lmb->base_addr);
510 	else
511 		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
512 
513 	return rc;
514 }
515 
516 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
517 {
518 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
519 	int rc;
520 
521 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
522 		lmbs_to_remove, drc_index);
523 
524 	if (lmbs_to_remove == 0)
525 		return -EINVAL;
526 
527 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
528 	if (rc)
529 		return -EINVAL;
530 
531 	/*
532 	 * Validate that all LMBs in range are not reserved. Note that it
533 	 * is ok if they are !ASSIGNED since our goal here is to remove the
534 	 * LMB range, regardless of whether some LMBs were already removed
535 	 * by any other reason.
536 	 *
537 	 * This is a contrast to what is done in remove_by_count() where we
538 	 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
539 	 * because we want to remove a fixed amount of LMBs in that function.
540 	 */
541 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
542 		if (lmb->flags & DRCONF_MEM_RESERVED) {
543 			pr_err("Memory at %llx (drc index %x) is reserved\n",
544 				lmb->base_addr, lmb->drc_index);
545 			return -EINVAL;
546 		}
547 	}
548 
549 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
550 		/*
551 		 * dlpar_remove_lmb() will error out if the LMB is already
552 		 * !ASSIGNED, but this case is a no-op for us.
553 		 */
554 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
555 			continue;
556 
557 		rc = dlpar_remove_lmb(lmb);
558 		if (rc)
559 			break;
560 
561 		drmem_mark_lmb_reserved(lmb);
562 	}
563 
564 	if (rc) {
565 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
566 
567 
568 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
569 			if (!drmem_lmb_reserved(lmb))
570 				continue;
571 
572 			/*
573 			 * Setting the isolation state of an UNISOLATED/CONFIGURED
574 			 * device to UNISOLATE is a no-op, but the hypervisor can
575 			 * use it as a hint that the LMB removal failed.
576 			 */
577 			dlpar_unisolate_drc(lmb->drc_index);
578 
579 			rc = dlpar_add_lmb(lmb);
580 			if (rc)
581 				pr_err("Failed to add LMB, drc index %x\n",
582 				       lmb->drc_index);
583 
584 			drmem_remove_lmb_reservation(lmb);
585 		}
586 		rc = -EINVAL;
587 	} else {
588 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
589 			if (!drmem_lmb_reserved(lmb))
590 				continue;
591 
592 			dlpar_release_drc(lmb->drc_index);
593 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
594 				lmb->base_addr, lmb->drc_index);
595 
596 			drmem_remove_lmb_reservation(lmb);
597 		}
598 	}
599 
600 	return rc;
601 }
602 
603 #else
604 static inline int pseries_remove_memblock(unsigned long base,
605 					  unsigned long memblock_size)
606 {
607 	return -EOPNOTSUPP;
608 }
609 static inline int pseries_remove_mem_node(struct device_node *np)
610 {
611 	return 0;
612 }
613 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
614 {
615 	return -EOPNOTSUPP;
616 }
617 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
618 {
619 	return -EOPNOTSUPP;
620 }
621 static int dlpar_memory_remove_by_index(u32 drc_index)
622 {
623 	return -EOPNOTSUPP;
624 }
625 
626 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
627 {
628 	return -EOPNOTSUPP;
629 }
630 #endif /* CONFIG_MEMORY_HOTREMOVE */
631 
632 static int dlpar_add_lmb(struct drmem_lmb *lmb)
633 {
634 	unsigned long block_sz;
635 	int nid, rc;
636 
637 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
638 		return -EINVAL;
639 
640 	rc = update_lmb_associativity_index(lmb);
641 	if (rc) {
642 		dlpar_release_drc(lmb->drc_index);
643 		return rc;
644 	}
645 
646 	block_sz = memory_block_size_bytes();
647 
648 	/* Find the node id for this LMB.  Fake one if necessary. */
649 	nid = of_drconf_to_nid_single(lmb);
650 	if (nid < 0 || !node_possible(nid))
651 		nid = first_online_node;
652 
653 	/* Add the memory */
654 	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
655 	if (rc) {
656 		invalidate_lmb_associativity_index(lmb);
657 		return rc;
658 	}
659 
660 	rc = dlpar_online_lmb(lmb);
661 	if (rc) {
662 		__remove_memory(lmb->base_addr, block_sz);
663 		invalidate_lmb_associativity_index(lmb);
664 	} else {
665 		lmb->flags |= DRCONF_MEM_ASSIGNED;
666 	}
667 
668 	return rc;
669 }
670 
671 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
672 {
673 	struct drmem_lmb *lmb;
674 	int lmbs_available = 0;
675 	int lmbs_reserved = 0;
676 	int rc;
677 
678 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
679 
680 	if (lmbs_to_add == 0)
681 		return -EINVAL;
682 
683 	/* Validate that there are enough LMBs to satisfy the request */
684 	for_each_drmem_lmb(lmb) {
685 		if (lmb->flags & DRCONF_MEM_RESERVED)
686 			continue;
687 
688 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
689 			lmbs_available++;
690 
691 		if (lmbs_available == lmbs_to_add)
692 			break;
693 	}
694 
695 	if (lmbs_available < lmbs_to_add)
696 		return -EINVAL;
697 
698 	for_each_drmem_lmb(lmb) {
699 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
700 			continue;
701 
702 		rc = dlpar_acquire_drc(lmb->drc_index);
703 		if (rc)
704 			continue;
705 
706 		rc = dlpar_add_lmb(lmb);
707 		if (rc) {
708 			dlpar_release_drc(lmb->drc_index);
709 			continue;
710 		}
711 
712 		/* Mark this lmb so we can remove it later if all of the
713 		 * requested LMBs cannot be added.
714 		 */
715 		drmem_mark_lmb_reserved(lmb);
716 		lmbs_reserved++;
717 		if (lmbs_reserved == lmbs_to_add)
718 			break;
719 	}
720 
721 	if (lmbs_reserved != lmbs_to_add) {
722 		pr_err("Memory hot-add failed, removing any added LMBs\n");
723 
724 		for_each_drmem_lmb(lmb) {
725 			if (!drmem_lmb_reserved(lmb))
726 				continue;
727 
728 			rc = dlpar_remove_lmb(lmb);
729 			if (rc)
730 				pr_err("Failed to remove LMB, drc index %x\n",
731 				       lmb->drc_index);
732 			else
733 				dlpar_release_drc(lmb->drc_index);
734 
735 			drmem_remove_lmb_reservation(lmb);
736 			lmbs_reserved--;
737 
738 			if (lmbs_reserved == 0)
739 				break;
740 		}
741 		rc = -EINVAL;
742 	} else {
743 		for_each_drmem_lmb(lmb) {
744 			if (!drmem_lmb_reserved(lmb))
745 				continue;
746 
747 			pr_debug("Memory at %llx (drc index %x) was hot-added\n",
748 				 lmb->base_addr, lmb->drc_index);
749 			drmem_remove_lmb_reservation(lmb);
750 			lmbs_reserved--;
751 
752 			if (lmbs_reserved == 0)
753 				break;
754 		}
755 		rc = 0;
756 	}
757 
758 	return rc;
759 }
760 
761 static int dlpar_memory_add_by_index(u32 drc_index)
762 {
763 	struct drmem_lmb *lmb;
764 	int rc, lmb_found;
765 
766 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
767 
768 	lmb_found = 0;
769 	for_each_drmem_lmb(lmb) {
770 		if (lmb->drc_index == drc_index) {
771 			lmb_found = 1;
772 			rc = dlpar_acquire_drc(lmb->drc_index);
773 			if (!rc) {
774 				rc = dlpar_add_lmb(lmb);
775 				if (rc)
776 					dlpar_release_drc(lmb->drc_index);
777 			}
778 
779 			break;
780 		}
781 	}
782 
783 	if (!lmb_found)
784 		rc = -EINVAL;
785 
786 	if (rc)
787 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
788 	else
789 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
790 			lmb->base_addr, drc_index);
791 
792 	return rc;
793 }
794 
795 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
796 {
797 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
798 	int rc;
799 
800 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
801 		lmbs_to_add, drc_index);
802 
803 	if (lmbs_to_add == 0)
804 		return -EINVAL;
805 
806 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
807 	if (rc)
808 		return -EINVAL;
809 
810 	/* Validate that the LMBs in this range are not reserved */
811 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
812 		/* Fail immediately if the whole range can't be hot-added */
813 		if (lmb->flags & DRCONF_MEM_RESERVED) {
814 			pr_err("Memory at %llx (drc index %x) is reserved\n",
815 					lmb->base_addr, lmb->drc_index);
816 			return -EINVAL;
817 		}
818 	}
819 
820 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
821 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
822 			continue;
823 
824 		rc = dlpar_acquire_drc(lmb->drc_index);
825 		if (rc)
826 			break;
827 
828 		rc = dlpar_add_lmb(lmb);
829 		if (rc) {
830 			dlpar_release_drc(lmb->drc_index);
831 			break;
832 		}
833 
834 		drmem_mark_lmb_reserved(lmb);
835 	}
836 
837 	if (rc) {
838 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
839 
840 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
841 			if (!drmem_lmb_reserved(lmb))
842 				continue;
843 
844 			rc = dlpar_remove_lmb(lmb);
845 			if (rc)
846 				pr_err("Failed to remove LMB, drc index %x\n",
847 				       lmb->drc_index);
848 			else
849 				dlpar_release_drc(lmb->drc_index);
850 
851 			drmem_remove_lmb_reservation(lmb);
852 		}
853 		rc = -EINVAL;
854 	} else {
855 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
856 			if (!drmem_lmb_reserved(lmb))
857 				continue;
858 
859 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
860 				lmb->base_addr, lmb->drc_index);
861 			drmem_remove_lmb_reservation(lmb);
862 		}
863 	}
864 
865 	return rc;
866 }
867 
868 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
869 {
870 	u32 count, drc_index;
871 	int rc;
872 
873 	lock_device_hotplug();
874 
875 	switch (hp_elog->action) {
876 	case PSERIES_HP_ELOG_ACTION_ADD:
877 		switch (hp_elog->id_type) {
878 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
879 			count = hp_elog->_drc_u.drc_count;
880 			rc = dlpar_memory_add_by_count(count);
881 			break;
882 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
883 			drc_index = hp_elog->_drc_u.drc_index;
884 			rc = dlpar_memory_add_by_index(drc_index);
885 			break;
886 		case PSERIES_HP_ELOG_ID_DRC_IC:
887 			count = hp_elog->_drc_u.ic.count;
888 			drc_index = hp_elog->_drc_u.ic.index;
889 			rc = dlpar_memory_add_by_ic(count, drc_index);
890 			break;
891 		default:
892 			rc = -EINVAL;
893 			break;
894 		}
895 
896 		break;
897 	case PSERIES_HP_ELOG_ACTION_REMOVE:
898 		switch (hp_elog->id_type) {
899 		case PSERIES_HP_ELOG_ID_DRC_COUNT:
900 			count = hp_elog->_drc_u.drc_count;
901 			rc = dlpar_memory_remove_by_count(count);
902 			break;
903 		case PSERIES_HP_ELOG_ID_DRC_INDEX:
904 			drc_index = hp_elog->_drc_u.drc_index;
905 			rc = dlpar_memory_remove_by_index(drc_index);
906 			break;
907 		case PSERIES_HP_ELOG_ID_DRC_IC:
908 			count = hp_elog->_drc_u.ic.count;
909 			drc_index = hp_elog->_drc_u.ic.index;
910 			rc = dlpar_memory_remove_by_ic(count, drc_index);
911 			break;
912 		default:
913 			rc = -EINVAL;
914 			break;
915 		}
916 
917 		break;
918 	default:
919 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
920 		rc = -EINVAL;
921 		break;
922 	}
923 
924 	if (!rc)
925 		rc = drmem_update_dt();
926 
927 	unlock_device_hotplug();
928 	return rc;
929 }
930 
931 static int pseries_add_mem_node(struct device_node *np)
932 {
933 	const __be32 *prop;
934 	unsigned long base;
935 	unsigned long lmb_size;
936 	int ret = -EINVAL;
937 	int addr_cells, size_cells;
938 
939 	/*
940 	 * Check to see if we are actually adding memory
941 	 */
942 	if (!of_node_is_type(np, "memory"))
943 		return 0;
944 
945 	/*
946 	 * Find the base and size of the memblock
947 	 */
948 	prop = of_get_property(np, "reg", NULL);
949 	if (!prop)
950 		return ret;
951 
952 	addr_cells = of_n_addr_cells(np);
953 	size_cells = of_n_size_cells(np);
954 	/*
955 	 * "reg" property represents (addr,size) tuple.
956 	 */
957 	base = of_read_number(prop, addr_cells);
958 	prop += addr_cells;
959 	lmb_size = of_read_number(prop, size_cells);
960 
961 	/*
962 	 * Update memory region to represent the memory add
963 	 */
964 	ret = memblock_add(base, lmb_size);
965 	return (ret < 0) ? -EINVAL : 0;
966 }
967 
968 static int pseries_memory_notifier(struct notifier_block *nb,
969 				   unsigned long action, void *data)
970 {
971 	struct of_reconfig_data *rd = data;
972 	int err = 0;
973 
974 	switch (action) {
975 	case OF_RECONFIG_ATTACH_NODE:
976 		err = pseries_add_mem_node(rd->dn);
977 		break;
978 	case OF_RECONFIG_DETACH_NODE:
979 		err = pseries_remove_mem_node(rd->dn);
980 		break;
981 	case OF_RECONFIG_UPDATE_PROPERTY:
982 		if (!strcmp(rd->dn->name,
983 			    "ibm,dynamic-reconfiguration-memory"))
984 			drmem_update_lmbs(rd->prop);
985 	}
986 	return notifier_from_errno(err);
987 }
988 
989 static struct notifier_block pseries_mem_nb = {
990 	.notifier_call = pseries_memory_notifier,
991 };
992 
993 static int __init pseries_memory_hotplug_init(void)
994 {
995 	if (firmware_has_feature(FW_FEATURE_LPAR))
996 		of_reconfig_notifier_register(&pseries_mem_nb);
997 
998 	return 0;
999 }
1000 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1001