1 /*
2  * pseries Memory Hotplug infrastructure.
3  *
4  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
13 
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
20 
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
26 #include <asm/drmem.h>
27 #include "pseries.h"
28 
29 static bool rtas_hp_event;
30 
31 unsigned long pseries_memory_block_size(void)
32 {
33 	struct device_node *np;
34 	unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
35 	struct resource r;
36 
37 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
38 	if (np) {
39 		const __be64 *size;
40 
41 		size = of_get_property(np, "ibm,lmb-size", NULL);
42 		if (size)
43 			memblock_size = be64_to_cpup(size);
44 		of_node_put(np);
45 	} else  if (machine_is(pseries)) {
46 		/* This fallback really only applies to pseries */
47 		unsigned int memzero_size = 0;
48 
49 		np = of_find_node_by_path("/memory@0");
50 		if (np) {
51 			if (!of_address_to_resource(np, 0, &r))
52 				memzero_size = resource_size(&r);
53 			of_node_put(np);
54 		}
55 
56 		if (memzero_size) {
57 			/* We now know the size of memory@0, use this to find
58 			 * the first memoryblock and get its size.
59 			 */
60 			char buf[64];
61 
62 			sprintf(buf, "/memory@%x", memzero_size);
63 			np = of_find_node_by_path(buf);
64 			if (np) {
65 				if (!of_address_to_resource(np, 0, &r))
66 					memblock_size = resource_size(&r);
67 				of_node_put(np);
68 			}
69 		}
70 	}
71 	return memblock_size;
72 }
73 
74 static void dlpar_free_property(struct property *prop)
75 {
76 	kfree(prop->name);
77 	kfree(prop->value);
78 	kfree(prop);
79 }
80 
81 static struct property *dlpar_clone_property(struct property *prop,
82 					     u32 prop_size)
83 {
84 	struct property *new_prop;
85 
86 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
87 	if (!new_prop)
88 		return NULL;
89 
90 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
91 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
92 	if (!new_prop->name || !new_prop->value) {
93 		dlpar_free_property(new_prop);
94 		return NULL;
95 	}
96 
97 	memcpy(new_prop->value, prop->value, prop->length);
98 	new_prop->length = prop_size;
99 
100 	of_property_set_flag(new_prop, OF_DYNAMIC);
101 	return new_prop;
102 }
103 
104 static bool find_aa_index(struct device_node *dr_node,
105 			 struct property *ala_prop,
106 			 const u32 *lmb_assoc, u32 *aa_index)
107 {
108 	u32 *assoc_arrays, new_prop_size;
109 	struct property *new_prop;
110 	int aa_arrays, aa_array_entries, aa_array_sz;
111 	int i, index;
112 
113 	/*
114 	 * The ibm,associativity-lookup-arrays property is defined to be
115 	 * a 32-bit value specifying the number of associativity arrays
116 	 * followed by a 32-bitvalue specifying the number of entries per
117 	 * array, followed by the associativity arrays.
118 	 */
119 	assoc_arrays = ala_prop->value;
120 
121 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
122 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
123 	aa_array_sz = aa_array_entries * sizeof(u32);
124 
125 	for (i = 0; i < aa_arrays; i++) {
126 		index = (i * aa_array_entries) + 2;
127 
128 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
129 			continue;
130 
131 		*aa_index = i;
132 		return true;
133 	}
134 
135 	new_prop_size = ala_prop->length + aa_array_sz;
136 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
137 	if (!new_prop)
138 		return false;
139 
140 	assoc_arrays = new_prop->value;
141 
142 	/* increment the number of entries in the lookup array */
143 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
144 
145 	/* copy the new associativity into the lookup array */
146 	index = aa_arrays * aa_array_entries + 2;
147 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
148 
149 	of_update_property(dr_node, new_prop);
150 
151 	/*
152 	 * The associativity lookup array index for this lmb is
153 	 * number of entries - 1 since we added its associativity
154 	 * to the end of the lookup array.
155 	 */
156 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
157 	return true;
158 }
159 
160 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
161 {
162 	struct device_node *parent, *lmb_node, *dr_node;
163 	struct property *ala_prop;
164 	const u32 *lmb_assoc;
165 	u32 aa_index;
166 	bool found;
167 
168 	parent = of_find_node_by_path("/");
169 	if (!parent)
170 		return -ENODEV;
171 
172 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
173 					     parent);
174 	of_node_put(parent);
175 	if (!lmb_node)
176 		return -EINVAL;
177 
178 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
179 	if (!lmb_assoc) {
180 		dlpar_free_cc_nodes(lmb_node);
181 		return -ENODEV;
182 	}
183 
184 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
185 	if (!dr_node) {
186 		dlpar_free_cc_nodes(lmb_node);
187 		return -ENODEV;
188 	}
189 
190 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
191 				    NULL);
192 	if (!ala_prop) {
193 		of_node_put(dr_node);
194 		dlpar_free_cc_nodes(lmb_node);
195 		return -ENODEV;
196 	}
197 
198 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
199 
200 	dlpar_free_cc_nodes(lmb_node);
201 
202 	if (!found) {
203 		pr_err("Could not find LMB associativity\n");
204 		return -1;
205 	}
206 
207 	lmb->aa_index = aa_index;
208 	return 0;
209 }
210 
211 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
212 {
213 	unsigned long section_nr;
214 	struct mem_section *mem_sect;
215 	struct memory_block *mem_block;
216 
217 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
218 	mem_sect = __nr_to_section(section_nr);
219 
220 	mem_block = find_memory_block(mem_sect);
221 	return mem_block;
222 }
223 
224 static int get_lmb_range(u32 drc_index, int n_lmbs,
225 			 struct drmem_lmb **start_lmb,
226 			 struct drmem_lmb **end_lmb)
227 {
228 	struct drmem_lmb *lmb, *start, *end;
229 	struct drmem_lmb *last_lmb;
230 
231 	start = NULL;
232 	for_each_drmem_lmb(lmb) {
233 		if (lmb->drc_index == drc_index) {
234 			start = lmb;
235 			break;
236 		}
237 	}
238 
239 	if (!start)
240 		return -EINVAL;
241 
242 	end = &start[n_lmbs - 1];
243 
244 	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
245 	if (end > last_lmb)
246 		return -EINVAL;
247 
248 	*start_lmb = start;
249 	*end_lmb = end;
250 	return 0;
251 }
252 
253 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
254 {
255 	struct memory_block *mem_block;
256 	int rc;
257 
258 	mem_block = lmb_to_memblock(lmb);
259 	if (!mem_block)
260 		return -EINVAL;
261 
262 	if (online && mem_block->dev.offline)
263 		rc = device_online(&mem_block->dev);
264 	else if (!online && !mem_block->dev.offline)
265 		rc = device_offline(&mem_block->dev);
266 	else
267 		rc = 0;
268 
269 	put_device(&mem_block->dev);
270 
271 	return rc;
272 }
273 
274 static int dlpar_online_lmb(struct drmem_lmb *lmb)
275 {
276 	return dlpar_change_lmb_state(lmb, true);
277 }
278 
279 #ifdef CONFIG_MEMORY_HOTREMOVE
280 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
281 {
282 	return dlpar_change_lmb_state(lmb, false);
283 }
284 
285 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
286 {
287 	unsigned long block_sz, start_pfn;
288 	int sections_per_block;
289 	int i, nid;
290 
291 	start_pfn = base >> PAGE_SHIFT;
292 
293 	lock_device_hotplug();
294 
295 	if (!pfn_valid(start_pfn))
296 		goto out;
297 
298 	block_sz = pseries_memory_block_size();
299 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
300 	nid = memory_add_physaddr_to_nid(base);
301 
302 	for (i = 0; i < sections_per_block; i++) {
303 		remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
304 		base += MIN_MEMORY_BLOCK_SIZE;
305 	}
306 
307 out:
308 	/* Update memory regions for memory remove */
309 	memblock_remove(base, memblock_size);
310 	unlock_device_hotplug();
311 	return 0;
312 }
313 
314 static int pseries_remove_mem_node(struct device_node *np)
315 {
316 	const char *type;
317 	const __be32 *regs;
318 	unsigned long base;
319 	unsigned int lmb_size;
320 	int ret = -EINVAL;
321 
322 	/*
323 	 * Check to see if we are actually removing memory
324 	 */
325 	type = of_get_property(np, "device_type", NULL);
326 	if (type == NULL || strcmp(type, "memory") != 0)
327 		return 0;
328 
329 	/*
330 	 * Find the base address and size of the memblock
331 	 */
332 	regs = of_get_property(np, "reg", NULL);
333 	if (!regs)
334 		return ret;
335 
336 	base = be64_to_cpu(*(unsigned long *)regs);
337 	lmb_size = be32_to_cpu(regs[3]);
338 
339 	pseries_remove_memblock(base, lmb_size);
340 	return 0;
341 }
342 
343 static bool lmb_is_removable(struct drmem_lmb *lmb)
344 {
345 	int i, scns_per_block;
346 	int rc = 1;
347 	unsigned long pfn, block_sz;
348 	u64 phys_addr;
349 
350 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
351 		return false;
352 
353 	block_sz = memory_block_size_bytes();
354 	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
355 	phys_addr = lmb->base_addr;
356 
357 #ifdef CONFIG_FA_DUMP
358 	/* Don't hot-remove memory that falls in fadump boot memory area */
359 	if (is_fadump_boot_memory_area(phys_addr, block_sz))
360 		return false;
361 #endif
362 
363 	for (i = 0; i < scns_per_block; i++) {
364 		pfn = PFN_DOWN(phys_addr);
365 		if (!pfn_present(pfn))
366 			continue;
367 
368 		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
369 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
370 	}
371 
372 	return rc ? true : false;
373 }
374 
375 static int dlpar_add_lmb(struct drmem_lmb *);
376 
377 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
378 {
379 	unsigned long block_sz;
380 	int nid, rc;
381 
382 	if (!lmb_is_removable(lmb))
383 		return -EINVAL;
384 
385 	rc = dlpar_offline_lmb(lmb);
386 	if (rc)
387 		return rc;
388 
389 	block_sz = pseries_memory_block_size();
390 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
391 
392 	remove_memory(nid, lmb->base_addr, block_sz);
393 
394 	/* Update memory regions for memory remove */
395 	memblock_remove(lmb->base_addr, block_sz);
396 
397 	invalidate_lmb_associativity_index(lmb);
398 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
399 
400 	return 0;
401 }
402 
403 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
404 {
405 	struct drmem_lmb *lmb;
406 	int lmbs_removed = 0;
407 	int lmbs_available = 0;
408 	int rc;
409 
410 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
411 
412 	if (lmbs_to_remove == 0)
413 		return -EINVAL;
414 
415 	/* Validate that there are enough LMBs to satisfy the request */
416 	for_each_drmem_lmb(lmb) {
417 		if (lmb_is_removable(lmb))
418 			lmbs_available++;
419 
420 		if (lmbs_available == lmbs_to_remove)
421 			break;
422 	}
423 
424 	if (lmbs_available < lmbs_to_remove) {
425 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
426 			lmbs_available, lmbs_to_remove);
427 		return -EINVAL;
428 	}
429 
430 	for_each_drmem_lmb(lmb) {
431 		rc = dlpar_remove_lmb(lmb);
432 		if (rc)
433 			continue;
434 
435 		/* Mark this lmb so we can add it later if all of the
436 		 * requested LMBs cannot be removed.
437 		 */
438 		drmem_mark_lmb_reserved(lmb);
439 
440 		lmbs_removed++;
441 		if (lmbs_removed == lmbs_to_remove)
442 			break;
443 	}
444 
445 	if (lmbs_removed != lmbs_to_remove) {
446 		pr_err("Memory hot-remove failed, adding LMB's back\n");
447 
448 		for_each_drmem_lmb(lmb) {
449 			if (!drmem_lmb_reserved(lmb))
450 				continue;
451 
452 			rc = dlpar_add_lmb(lmb);
453 			if (rc)
454 				pr_err("Failed to add LMB back, drc index %x\n",
455 				       lmb->drc_index);
456 
457 			drmem_remove_lmb_reservation(lmb);
458 		}
459 
460 		rc = -EINVAL;
461 	} else {
462 		for_each_drmem_lmb(lmb) {
463 			if (!drmem_lmb_reserved(lmb))
464 				continue;
465 
466 			dlpar_release_drc(lmb->drc_index);
467 			pr_info("Memory at %llx was hot-removed\n",
468 				lmb->base_addr);
469 
470 			drmem_remove_lmb_reservation(lmb);
471 		}
472 		rc = 0;
473 	}
474 
475 	return rc;
476 }
477 
478 static int dlpar_memory_remove_by_index(u32 drc_index)
479 {
480 	struct drmem_lmb *lmb;
481 	int lmb_found;
482 	int rc;
483 
484 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
485 
486 	lmb_found = 0;
487 	for_each_drmem_lmb(lmb) {
488 		if (lmb->drc_index == drc_index) {
489 			lmb_found = 1;
490 			rc = dlpar_remove_lmb(lmb);
491 			if (!rc)
492 				dlpar_release_drc(lmb->drc_index);
493 
494 			break;
495 		}
496 	}
497 
498 	if (!lmb_found)
499 		rc = -EINVAL;
500 
501 	if (rc)
502 		pr_info("Failed to hot-remove memory at %llx\n",
503 			lmb->base_addr);
504 	else
505 		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
506 
507 	return rc;
508 }
509 
510 static int dlpar_memory_readd_by_index(u32 drc_index)
511 {
512 	struct drmem_lmb *lmb;
513 	int lmb_found;
514 	int rc;
515 
516 	pr_info("Attempting to update LMB, drc index %x\n", drc_index);
517 
518 	lmb_found = 0;
519 	for_each_drmem_lmb(lmb) {
520 		if (lmb->drc_index == drc_index) {
521 			lmb_found = 1;
522 			rc = dlpar_remove_lmb(lmb);
523 			if (!rc) {
524 				rc = dlpar_add_lmb(lmb);
525 				if (rc)
526 					dlpar_release_drc(lmb->drc_index);
527 			}
528 			break;
529 		}
530 	}
531 
532 	if (!lmb_found)
533 		rc = -EINVAL;
534 
535 	if (rc)
536 		pr_info("Failed to update memory at %llx\n",
537 			lmb->base_addr);
538 	else
539 		pr_info("Memory at %llx was updated\n", lmb->base_addr);
540 
541 	return rc;
542 }
543 
544 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
545 {
546 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
547 	int lmbs_available = 0;
548 	int rc;
549 
550 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
551 		lmbs_to_remove, drc_index);
552 
553 	if (lmbs_to_remove == 0)
554 		return -EINVAL;
555 
556 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
557 	if (rc)
558 		return -EINVAL;
559 
560 	/* Validate that there are enough LMBs to satisfy the request */
561 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
562 		if (lmb->flags & DRCONF_MEM_RESERVED)
563 			break;
564 
565 		lmbs_available++;
566 	}
567 
568 	if (lmbs_available < lmbs_to_remove)
569 		return -EINVAL;
570 
571 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
572 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
573 			continue;
574 
575 		rc = dlpar_remove_lmb(lmb);
576 		if (rc)
577 			break;
578 
579 		drmem_mark_lmb_reserved(lmb);
580 	}
581 
582 	if (rc) {
583 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
584 
585 
586 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
587 			if (!drmem_lmb_reserved(lmb))
588 				continue;
589 
590 			rc = dlpar_add_lmb(lmb);
591 			if (rc)
592 				pr_err("Failed to add LMB, drc index %x\n",
593 				       lmb->drc_index);
594 
595 			drmem_remove_lmb_reservation(lmb);
596 		}
597 		rc = -EINVAL;
598 	} else {
599 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
600 			if (!drmem_lmb_reserved(lmb))
601 				continue;
602 
603 			dlpar_release_drc(lmb->drc_index);
604 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
605 				lmb->base_addr, lmb->drc_index);
606 
607 			drmem_remove_lmb_reservation(lmb);
608 		}
609 	}
610 
611 	return rc;
612 }
613 
614 #else
615 static inline int pseries_remove_memblock(unsigned long base,
616 					  unsigned int memblock_size)
617 {
618 	return -EOPNOTSUPP;
619 }
620 static inline int pseries_remove_mem_node(struct device_node *np)
621 {
622 	return 0;
623 }
624 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
625 {
626 	return -EOPNOTSUPP;
627 }
628 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
629 {
630 	return -EOPNOTSUPP;
631 }
632 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
633 {
634 	return -EOPNOTSUPP;
635 }
636 static int dlpar_memory_remove_by_index(u32 drc_index)
637 {
638 	return -EOPNOTSUPP;
639 }
640 static int dlpar_memory_readd_by_index(u32 drc_index)
641 {
642 	return -EOPNOTSUPP;
643 }
644 
645 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
646 {
647 	return -EOPNOTSUPP;
648 }
649 #endif /* CONFIG_MEMORY_HOTREMOVE */
650 
651 static int dlpar_add_lmb(struct drmem_lmb *lmb)
652 {
653 	unsigned long block_sz;
654 	int nid, rc;
655 
656 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
657 		return -EINVAL;
658 
659 	rc = update_lmb_associativity_index(lmb);
660 	if (rc) {
661 		dlpar_release_drc(lmb->drc_index);
662 		return rc;
663 	}
664 
665 	block_sz = memory_block_size_bytes();
666 
667 	/* Find the node id for this address */
668 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
669 
670 	/* Add the memory */
671 	rc = add_memory(nid, lmb->base_addr, block_sz);
672 	if (rc) {
673 		invalidate_lmb_associativity_index(lmb);
674 		return rc;
675 	}
676 
677 	rc = dlpar_online_lmb(lmb);
678 	if (rc) {
679 		remove_memory(nid, lmb->base_addr, block_sz);
680 		invalidate_lmb_associativity_index(lmb);
681 	} else {
682 		lmb->flags |= DRCONF_MEM_ASSIGNED;
683 	}
684 
685 	return rc;
686 }
687 
688 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
689 {
690 	struct drmem_lmb *lmb;
691 	int lmbs_available = 0;
692 	int lmbs_added = 0;
693 	int rc;
694 
695 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
696 
697 	if (lmbs_to_add == 0)
698 		return -EINVAL;
699 
700 	/* Validate that there are enough LMBs to satisfy the request */
701 	for_each_drmem_lmb(lmb) {
702 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
703 			lmbs_available++;
704 
705 		if (lmbs_available == lmbs_to_add)
706 			break;
707 	}
708 
709 	if (lmbs_available < lmbs_to_add)
710 		return -EINVAL;
711 
712 	for_each_drmem_lmb(lmb) {
713 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
714 			continue;
715 
716 		rc = dlpar_acquire_drc(lmb->drc_index);
717 		if (rc)
718 			continue;
719 
720 		rc = dlpar_add_lmb(lmb);
721 		if (rc) {
722 			dlpar_release_drc(lmb->drc_index);
723 			continue;
724 		}
725 
726 		/* Mark this lmb so we can remove it later if all of the
727 		 * requested LMBs cannot be added.
728 		 */
729 		drmem_mark_lmb_reserved(lmb);
730 
731 		lmbs_added++;
732 		if (lmbs_added == lmbs_to_add)
733 			break;
734 	}
735 
736 	if (lmbs_added != lmbs_to_add) {
737 		pr_err("Memory hot-add failed, removing any added LMBs\n");
738 
739 		for_each_drmem_lmb(lmb) {
740 			if (!drmem_lmb_reserved(lmb))
741 				continue;
742 
743 			rc = dlpar_remove_lmb(lmb);
744 			if (rc)
745 				pr_err("Failed to remove LMB, drc index %x\n",
746 				       lmb->drc_index);
747 			else
748 				dlpar_release_drc(lmb->drc_index);
749 
750 			drmem_remove_lmb_reservation(lmb);
751 		}
752 		rc = -EINVAL;
753 	} else {
754 		for_each_drmem_lmb(lmb) {
755 			if (!drmem_lmb_reserved(lmb))
756 				continue;
757 
758 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
759 				lmb->base_addr, lmb->drc_index);
760 			drmem_remove_lmb_reservation(lmb);
761 		}
762 		rc = 0;
763 	}
764 
765 	return rc;
766 }
767 
768 static int dlpar_memory_add_by_index(u32 drc_index)
769 {
770 	struct drmem_lmb *lmb;
771 	int rc, lmb_found;
772 
773 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
774 
775 	lmb_found = 0;
776 	for_each_drmem_lmb(lmb) {
777 		if (lmb->drc_index == drc_index) {
778 			lmb_found = 1;
779 			rc = dlpar_acquire_drc(lmb->drc_index);
780 			if (!rc) {
781 				rc = dlpar_add_lmb(lmb);
782 				if (rc)
783 					dlpar_release_drc(lmb->drc_index);
784 			}
785 
786 			break;
787 		}
788 	}
789 
790 	if (!lmb_found)
791 		rc = -EINVAL;
792 
793 	if (rc)
794 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
795 	else
796 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
797 			lmb->base_addr, drc_index);
798 
799 	return rc;
800 }
801 
802 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
803 {
804 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
805 	int lmbs_available = 0;
806 	int rc;
807 
808 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
809 		lmbs_to_add, drc_index);
810 
811 	if (lmbs_to_add == 0)
812 		return -EINVAL;
813 
814 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
815 	if (rc)
816 		return -EINVAL;
817 
818 	/* Validate that the LMBs in this range are not reserved */
819 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
820 		if (lmb->flags & DRCONF_MEM_RESERVED)
821 			break;
822 
823 		lmbs_available++;
824 	}
825 
826 	if (lmbs_available < lmbs_to_add)
827 		return -EINVAL;
828 
829 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
830 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
831 			continue;
832 
833 		rc = dlpar_acquire_drc(lmb->drc_index);
834 		if (rc)
835 			break;
836 
837 		rc = dlpar_add_lmb(lmb);
838 		if (rc) {
839 			dlpar_release_drc(lmb->drc_index);
840 			break;
841 		}
842 
843 		drmem_mark_lmb_reserved(lmb);
844 	}
845 
846 	if (rc) {
847 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
848 
849 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
850 			if (!drmem_lmb_reserved(lmb))
851 				continue;
852 
853 			rc = dlpar_remove_lmb(lmb);
854 			if (rc)
855 				pr_err("Failed to remove LMB, drc index %x\n",
856 				       lmb->drc_index);
857 			else
858 				dlpar_release_drc(lmb->drc_index);
859 
860 			drmem_remove_lmb_reservation(lmb);
861 		}
862 		rc = -EINVAL;
863 	} else {
864 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
865 			if (!drmem_lmb_reserved(lmb))
866 				continue;
867 
868 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
869 				lmb->base_addr, lmb->drc_index);
870 			drmem_remove_lmb_reservation(lmb);
871 		}
872 	}
873 
874 	return rc;
875 }
876 
877 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
878 {
879 	u32 count, drc_index;
880 	int rc;
881 
882 	lock_device_hotplug();
883 
884 	switch (hp_elog->action) {
885 	case PSERIES_HP_ELOG_ACTION_ADD:
886 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
887 			count = hp_elog->_drc_u.drc_count;
888 			rc = dlpar_memory_add_by_count(count);
889 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
890 			drc_index = hp_elog->_drc_u.drc_index;
891 			rc = dlpar_memory_add_by_index(drc_index);
892 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
893 			count = hp_elog->_drc_u.ic.count;
894 			drc_index = hp_elog->_drc_u.ic.index;
895 			rc = dlpar_memory_add_by_ic(count, drc_index);
896 		} else {
897 			rc = -EINVAL;
898 		}
899 
900 		break;
901 	case PSERIES_HP_ELOG_ACTION_REMOVE:
902 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
903 			count = hp_elog->_drc_u.drc_count;
904 			rc = dlpar_memory_remove_by_count(count);
905 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
906 			drc_index = hp_elog->_drc_u.drc_index;
907 			rc = dlpar_memory_remove_by_index(drc_index);
908 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
909 			count = hp_elog->_drc_u.ic.count;
910 			drc_index = hp_elog->_drc_u.ic.index;
911 			rc = dlpar_memory_remove_by_ic(count, drc_index);
912 		} else {
913 			rc = -EINVAL;
914 		}
915 
916 		break;
917 	case PSERIES_HP_ELOG_ACTION_READD:
918 		drc_index = hp_elog->_drc_u.drc_index;
919 		rc = dlpar_memory_readd_by_index(drc_index);
920 		break;
921 	default:
922 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
923 		rc = -EINVAL;
924 		break;
925 	}
926 
927 	if (!rc) {
928 		rtas_hp_event = true;
929 		rc = drmem_update_dt();
930 		rtas_hp_event = false;
931 	}
932 
933 	unlock_device_hotplug();
934 	return rc;
935 }
936 
937 static int pseries_add_mem_node(struct device_node *np)
938 {
939 	const char *type;
940 	const __be32 *regs;
941 	unsigned long base;
942 	unsigned int lmb_size;
943 	int ret = -EINVAL;
944 
945 	/*
946 	 * Check to see if we are actually adding memory
947 	 */
948 	type = of_get_property(np, "device_type", NULL);
949 	if (type == NULL || strcmp(type, "memory") != 0)
950 		return 0;
951 
952 	/*
953 	 * Find the base and size of the memblock
954 	 */
955 	regs = of_get_property(np, "reg", NULL);
956 	if (!regs)
957 		return ret;
958 
959 	base = be64_to_cpu(*(unsigned long *)regs);
960 	lmb_size = be32_to_cpu(regs[3]);
961 
962 	/*
963 	 * Update memory region to represent the memory add
964 	 */
965 	ret = memblock_add(base, lmb_size);
966 	return (ret < 0) ? -EINVAL : 0;
967 }
968 
969 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
970 {
971 	struct of_drconf_cell_v1 *new_drmem, *old_drmem;
972 	unsigned long memblock_size;
973 	u32 entries;
974 	__be32 *p;
975 	int i, rc = -EINVAL;
976 
977 	if (rtas_hp_event)
978 		return 0;
979 
980 	memblock_size = pseries_memory_block_size();
981 	if (!memblock_size)
982 		return -EINVAL;
983 
984 	p = (__be32 *) pr->old_prop->value;
985 	if (!p)
986 		return -EINVAL;
987 
988 	/* The first int of the property is the number of lmb's described
989 	 * by the property. This is followed by an array of of_drconf_cell
990 	 * entries. Get the number of entries and skip to the array of
991 	 * of_drconf_cell's.
992 	 */
993 	entries = be32_to_cpu(*p++);
994 	old_drmem = (struct of_drconf_cell_v1 *)p;
995 
996 	p = (__be32 *)pr->prop->value;
997 	p++;
998 	new_drmem = (struct of_drconf_cell_v1 *)p;
999 
1000 	for (i = 0; i < entries; i++) {
1001 		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1002 		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1003 			rc = pseries_remove_memblock(
1004 				be64_to_cpu(old_drmem[i].base_addr),
1005 						     memblock_size);
1006 			break;
1007 		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
1008 			    DRCONF_MEM_ASSIGNED)) &&
1009 			    (be32_to_cpu(new_drmem[i].flags) &
1010 			    DRCONF_MEM_ASSIGNED)) {
1011 			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1012 					  memblock_size);
1013 			rc = (rc < 0) ? -EINVAL : 0;
1014 			break;
1015 		}
1016 	}
1017 	return rc;
1018 }
1019 
1020 static int pseries_memory_notifier(struct notifier_block *nb,
1021 				   unsigned long action, void *data)
1022 {
1023 	struct of_reconfig_data *rd = data;
1024 	int err = 0;
1025 
1026 	switch (action) {
1027 	case OF_RECONFIG_ATTACH_NODE:
1028 		err = pseries_add_mem_node(rd->dn);
1029 		break;
1030 	case OF_RECONFIG_DETACH_NODE:
1031 		err = pseries_remove_mem_node(rd->dn);
1032 		break;
1033 	case OF_RECONFIG_UPDATE_PROPERTY:
1034 		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1035 			err = pseries_update_drconf_memory(rd);
1036 		break;
1037 	}
1038 	return notifier_from_errno(err);
1039 }
1040 
1041 static struct notifier_block pseries_mem_nb = {
1042 	.notifier_call = pseries_memory_notifier,
1043 };
1044 
1045 static int __init pseries_memory_hotplug_init(void)
1046 {
1047 	if (firmware_has_feature(FW_FEATURE_LPAR))
1048 		of_reconfig_notifier_register(&pseries_mem_nb);
1049 
1050 	return 0;
1051 }
1052 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1053