1 /*
2  * pseries Memory Hotplug infrastructure.
3  *
4  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
13 
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
20 
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
26 #include <asm/drmem.h>
27 #include "pseries.h"
28 
29 static bool rtas_hp_event;
30 
31 unsigned long pseries_memory_block_size(void)
32 {
33 	struct device_node *np;
34 	unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
35 	struct resource r;
36 
37 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
38 	if (np) {
39 		const __be64 *size;
40 
41 		size = of_get_property(np, "ibm,lmb-size", NULL);
42 		if (size)
43 			memblock_size = be64_to_cpup(size);
44 		of_node_put(np);
45 	} else  if (machine_is(pseries)) {
46 		/* This fallback really only applies to pseries */
47 		unsigned int memzero_size = 0;
48 
49 		np = of_find_node_by_path("/memory@0");
50 		if (np) {
51 			if (!of_address_to_resource(np, 0, &r))
52 				memzero_size = resource_size(&r);
53 			of_node_put(np);
54 		}
55 
56 		if (memzero_size) {
57 			/* We now know the size of memory@0, use this to find
58 			 * the first memoryblock and get its size.
59 			 */
60 			char buf[64];
61 
62 			sprintf(buf, "/memory@%x", memzero_size);
63 			np = of_find_node_by_path(buf);
64 			if (np) {
65 				if (!of_address_to_resource(np, 0, &r))
66 					memblock_size = resource_size(&r);
67 				of_node_put(np);
68 			}
69 		}
70 	}
71 	return memblock_size;
72 }
73 
74 static void dlpar_free_property(struct property *prop)
75 {
76 	kfree(prop->name);
77 	kfree(prop->value);
78 	kfree(prop);
79 }
80 
81 static struct property *dlpar_clone_property(struct property *prop,
82 					     u32 prop_size)
83 {
84 	struct property *new_prop;
85 
86 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
87 	if (!new_prop)
88 		return NULL;
89 
90 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
91 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
92 	if (!new_prop->name || !new_prop->value) {
93 		dlpar_free_property(new_prop);
94 		return NULL;
95 	}
96 
97 	memcpy(new_prop->value, prop->value, prop->length);
98 	new_prop->length = prop_size;
99 
100 	of_property_set_flag(new_prop, OF_DYNAMIC);
101 	return new_prop;
102 }
103 
104 static bool find_aa_index(struct device_node *dr_node,
105 			 struct property *ala_prop,
106 			 const u32 *lmb_assoc, u32 *aa_index)
107 {
108 	u32 *assoc_arrays, new_prop_size;
109 	struct property *new_prop;
110 	int aa_arrays, aa_array_entries, aa_array_sz;
111 	int i, index;
112 
113 	/*
114 	 * The ibm,associativity-lookup-arrays property is defined to be
115 	 * a 32-bit value specifying the number of associativity arrays
116 	 * followed by a 32-bitvalue specifying the number of entries per
117 	 * array, followed by the associativity arrays.
118 	 */
119 	assoc_arrays = ala_prop->value;
120 
121 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
122 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
123 	aa_array_sz = aa_array_entries * sizeof(u32);
124 
125 	for (i = 0; i < aa_arrays; i++) {
126 		index = (i * aa_array_entries) + 2;
127 
128 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
129 			continue;
130 
131 		*aa_index = i;
132 		return true;
133 	}
134 
135 	new_prop_size = ala_prop->length + aa_array_sz;
136 	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
137 	if (!new_prop)
138 		return false;
139 
140 	assoc_arrays = new_prop->value;
141 
142 	/* increment the number of entries in the lookup array */
143 	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
144 
145 	/* copy the new associativity into the lookup array */
146 	index = aa_arrays * aa_array_entries + 2;
147 	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
148 
149 	of_update_property(dr_node, new_prop);
150 
151 	/*
152 	 * The associativity lookup array index for this lmb is
153 	 * number of entries - 1 since we added its associativity
154 	 * to the end of the lookup array.
155 	 */
156 	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
157 	return true;
158 }
159 
160 static int update_lmb_associativity_index(struct drmem_lmb *lmb)
161 {
162 	struct device_node *parent, *lmb_node, *dr_node;
163 	struct property *ala_prop;
164 	const u32 *lmb_assoc;
165 	u32 aa_index;
166 	bool found;
167 
168 	parent = of_find_node_by_path("/");
169 	if (!parent)
170 		return -ENODEV;
171 
172 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
173 					     parent);
174 	of_node_put(parent);
175 	if (!lmb_node)
176 		return -EINVAL;
177 
178 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
179 	if (!lmb_assoc) {
180 		dlpar_free_cc_nodes(lmb_node);
181 		return -ENODEV;
182 	}
183 
184 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
185 	if (!dr_node) {
186 		dlpar_free_cc_nodes(lmb_node);
187 		return -ENODEV;
188 	}
189 
190 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
191 				    NULL);
192 	if (!ala_prop) {
193 		of_node_put(dr_node);
194 		dlpar_free_cc_nodes(lmb_node);
195 		return -ENODEV;
196 	}
197 
198 	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
199 
200 	of_node_put(dr_node);
201 	dlpar_free_cc_nodes(lmb_node);
202 
203 	if (!found) {
204 		pr_err("Could not find LMB associativity\n");
205 		return -1;
206 	}
207 
208 	lmb->aa_index = aa_index;
209 	return 0;
210 }
211 
212 static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
213 {
214 	unsigned long section_nr;
215 	struct mem_section *mem_sect;
216 	struct memory_block *mem_block;
217 
218 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
219 	mem_sect = __nr_to_section(section_nr);
220 
221 	mem_block = find_memory_block(mem_sect);
222 	return mem_block;
223 }
224 
225 static int get_lmb_range(u32 drc_index, int n_lmbs,
226 			 struct drmem_lmb **start_lmb,
227 			 struct drmem_lmb **end_lmb)
228 {
229 	struct drmem_lmb *lmb, *start, *end;
230 	struct drmem_lmb *last_lmb;
231 
232 	start = NULL;
233 	for_each_drmem_lmb(lmb) {
234 		if (lmb->drc_index == drc_index) {
235 			start = lmb;
236 			break;
237 		}
238 	}
239 
240 	if (!start)
241 		return -EINVAL;
242 
243 	end = &start[n_lmbs - 1];
244 
245 	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
246 	if (end > last_lmb)
247 		return -EINVAL;
248 
249 	*start_lmb = start;
250 	*end_lmb = end;
251 	return 0;
252 }
253 
254 static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
255 {
256 	struct memory_block *mem_block;
257 	int rc;
258 
259 	mem_block = lmb_to_memblock(lmb);
260 	if (!mem_block)
261 		return -EINVAL;
262 
263 	if (online && mem_block->dev.offline)
264 		rc = device_online(&mem_block->dev);
265 	else if (!online && !mem_block->dev.offline)
266 		rc = device_offline(&mem_block->dev);
267 	else
268 		rc = 0;
269 
270 	put_device(&mem_block->dev);
271 
272 	return rc;
273 }
274 
275 static int dlpar_online_lmb(struct drmem_lmb *lmb)
276 {
277 	return dlpar_change_lmb_state(lmb, true);
278 }
279 
280 #ifdef CONFIG_MEMORY_HOTREMOVE
281 static int dlpar_offline_lmb(struct drmem_lmb *lmb)
282 {
283 	return dlpar_change_lmb_state(lmb, false);
284 }
285 
286 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
287 {
288 	unsigned long block_sz, start_pfn;
289 	int sections_per_block;
290 	int i, nid;
291 
292 	start_pfn = base >> PAGE_SHIFT;
293 
294 	lock_device_hotplug();
295 
296 	if (!pfn_valid(start_pfn))
297 		goto out;
298 
299 	block_sz = pseries_memory_block_size();
300 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
301 	nid = memory_add_physaddr_to_nid(base);
302 
303 	for (i = 0; i < sections_per_block; i++) {
304 		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
305 		base += MIN_MEMORY_BLOCK_SIZE;
306 	}
307 
308 out:
309 	/* Update memory regions for memory remove */
310 	memblock_remove(base, memblock_size);
311 	unlock_device_hotplug();
312 	return 0;
313 }
314 
315 static int pseries_remove_mem_node(struct device_node *np)
316 {
317 	const __be32 *regs;
318 	unsigned long base;
319 	unsigned int lmb_size;
320 	int ret = -EINVAL;
321 
322 	/*
323 	 * Check to see if we are actually removing memory
324 	 */
325 	if (!of_node_is_type(np, "memory"))
326 		return 0;
327 
328 	/*
329 	 * Find the base address and size of the memblock
330 	 */
331 	regs = of_get_property(np, "reg", NULL);
332 	if (!regs)
333 		return ret;
334 
335 	base = be64_to_cpu(*(unsigned long *)regs);
336 	lmb_size = be32_to_cpu(regs[3]);
337 
338 	pseries_remove_memblock(base, lmb_size);
339 	return 0;
340 }
341 
342 static bool lmb_is_removable(struct drmem_lmb *lmb)
343 {
344 	int i, scns_per_block;
345 	int rc = 1;
346 	unsigned long pfn, block_sz;
347 	u64 phys_addr;
348 
349 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
350 		return false;
351 
352 	block_sz = memory_block_size_bytes();
353 	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
354 	phys_addr = lmb->base_addr;
355 
356 #ifdef CONFIG_FA_DUMP
357 	/*
358 	 * Don't hot-remove memory that falls in fadump boot memory area
359 	 * and memory that is reserved for capturing old kernel memory.
360 	 */
361 	if (is_fadump_memory_area(phys_addr, block_sz))
362 		return false;
363 #endif
364 
365 	for (i = 0; i < scns_per_block; i++) {
366 		pfn = PFN_DOWN(phys_addr);
367 		if (!pfn_present(pfn))
368 			continue;
369 
370 		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
371 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
372 	}
373 
374 	return rc ? true : false;
375 }
376 
377 static int dlpar_add_lmb(struct drmem_lmb *);
378 
379 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
380 {
381 	unsigned long block_sz;
382 	int rc;
383 
384 	if (!lmb_is_removable(lmb))
385 		return -EINVAL;
386 
387 	rc = dlpar_offline_lmb(lmb);
388 	if (rc)
389 		return rc;
390 
391 	block_sz = pseries_memory_block_size();
392 
393 	__remove_memory(lmb->nid, lmb->base_addr, block_sz);
394 
395 	/* Update memory regions for memory remove */
396 	memblock_remove(lmb->base_addr, block_sz);
397 
398 	invalidate_lmb_associativity_index(lmb);
399 	lmb_clear_nid(lmb);
400 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
401 
402 	return 0;
403 }
404 
405 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
406 {
407 	struct drmem_lmb *lmb;
408 	int lmbs_removed = 0;
409 	int lmbs_available = 0;
410 	int rc;
411 
412 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
413 
414 	if (lmbs_to_remove == 0)
415 		return -EINVAL;
416 
417 	/* Validate that there are enough LMBs to satisfy the request */
418 	for_each_drmem_lmb(lmb) {
419 		if (lmb_is_removable(lmb))
420 			lmbs_available++;
421 
422 		if (lmbs_available == lmbs_to_remove)
423 			break;
424 	}
425 
426 	if (lmbs_available < lmbs_to_remove) {
427 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
428 			lmbs_available, lmbs_to_remove);
429 		return -EINVAL;
430 	}
431 
432 	for_each_drmem_lmb(lmb) {
433 		rc = dlpar_remove_lmb(lmb);
434 		if (rc)
435 			continue;
436 
437 		/* Mark this lmb so we can add it later if all of the
438 		 * requested LMBs cannot be removed.
439 		 */
440 		drmem_mark_lmb_reserved(lmb);
441 
442 		lmbs_removed++;
443 		if (lmbs_removed == lmbs_to_remove)
444 			break;
445 	}
446 
447 	if (lmbs_removed != lmbs_to_remove) {
448 		pr_err("Memory hot-remove failed, adding LMB's back\n");
449 
450 		for_each_drmem_lmb(lmb) {
451 			if (!drmem_lmb_reserved(lmb))
452 				continue;
453 
454 			rc = dlpar_add_lmb(lmb);
455 			if (rc)
456 				pr_err("Failed to add LMB back, drc index %x\n",
457 				       lmb->drc_index);
458 
459 			drmem_remove_lmb_reservation(lmb);
460 		}
461 
462 		rc = -EINVAL;
463 	} else {
464 		for_each_drmem_lmb(lmb) {
465 			if (!drmem_lmb_reserved(lmb))
466 				continue;
467 
468 			dlpar_release_drc(lmb->drc_index);
469 			pr_info("Memory at %llx was hot-removed\n",
470 				lmb->base_addr);
471 
472 			drmem_remove_lmb_reservation(lmb);
473 		}
474 		rc = 0;
475 	}
476 
477 	return rc;
478 }
479 
480 static int dlpar_memory_remove_by_index(u32 drc_index)
481 {
482 	struct drmem_lmb *lmb;
483 	int lmb_found;
484 	int rc;
485 
486 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
487 
488 	lmb_found = 0;
489 	for_each_drmem_lmb(lmb) {
490 		if (lmb->drc_index == drc_index) {
491 			lmb_found = 1;
492 			rc = dlpar_remove_lmb(lmb);
493 			if (!rc)
494 				dlpar_release_drc(lmb->drc_index);
495 
496 			break;
497 		}
498 	}
499 
500 	if (!lmb_found)
501 		rc = -EINVAL;
502 
503 	if (rc)
504 		pr_info("Failed to hot-remove memory at %llx\n",
505 			lmb->base_addr);
506 	else
507 		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
508 
509 	return rc;
510 }
511 
512 static int dlpar_memory_readd_by_index(u32 drc_index)
513 {
514 	struct drmem_lmb *lmb;
515 	int lmb_found;
516 	int rc;
517 
518 	pr_info("Attempting to update LMB, drc index %x\n", drc_index);
519 
520 	lmb_found = 0;
521 	for_each_drmem_lmb(lmb) {
522 		if (lmb->drc_index == drc_index) {
523 			lmb_found = 1;
524 			rc = dlpar_remove_lmb(lmb);
525 			if (!rc) {
526 				rc = dlpar_add_lmb(lmb);
527 				if (rc)
528 					dlpar_release_drc(lmb->drc_index);
529 			}
530 			break;
531 		}
532 	}
533 
534 	if (!lmb_found)
535 		rc = -EINVAL;
536 
537 	if (rc)
538 		pr_info("Failed to update memory at %llx\n",
539 			lmb->base_addr);
540 	else
541 		pr_info("Memory at %llx was updated\n", lmb->base_addr);
542 
543 	return rc;
544 }
545 
546 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
547 {
548 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
549 	int lmbs_available = 0;
550 	int rc;
551 
552 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
553 		lmbs_to_remove, drc_index);
554 
555 	if (lmbs_to_remove == 0)
556 		return -EINVAL;
557 
558 	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
559 	if (rc)
560 		return -EINVAL;
561 
562 	/* Validate that there are enough LMBs to satisfy the request */
563 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
564 		if (lmb->flags & DRCONF_MEM_RESERVED)
565 			break;
566 
567 		lmbs_available++;
568 	}
569 
570 	if (lmbs_available < lmbs_to_remove)
571 		return -EINVAL;
572 
573 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
574 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
575 			continue;
576 
577 		rc = dlpar_remove_lmb(lmb);
578 		if (rc)
579 			break;
580 
581 		drmem_mark_lmb_reserved(lmb);
582 	}
583 
584 	if (rc) {
585 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
586 
587 
588 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
589 			if (!drmem_lmb_reserved(lmb))
590 				continue;
591 
592 			rc = dlpar_add_lmb(lmb);
593 			if (rc)
594 				pr_err("Failed to add LMB, drc index %x\n",
595 				       lmb->drc_index);
596 
597 			drmem_remove_lmb_reservation(lmb);
598 		}
599 		rc = -EINVAL;
600 	} else {
601 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
602 			if (!drmem_lmb_reserved(lmb))
603 				continue;
604 
605 			dlpar_release_drc(lmb->drc_index);
606 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
607 				lmb->base_addr, lmb->drc_index);
608 
609 			drmem_remove_lmb_reservation(lmb);
610 		}
611 	}
612 
613 	return rc;
614 }
615 
616 #else
617 static inline int pseries_remove_memblock(unsigned long base,
618 					  unsigned int memblock_size)
619 {
620 	return -EOPNOTSUPP;
621 }
622 static inline int pseries_remove_mem_node(struct device_node *np)
623 {
624 	return 0;
625 }
626 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
627 {
628 	return -EOPNOTSUPP;
629 }
630 static int dlpar_remove_lmb(struct drmem_lmb *lmb)
631 {
632 	return -EOPNOTSUPP;
633 }
634 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
635 {
636 	return -EOPNOTSUPP;
637 }
638 static int dlpar_memory_remove_by_index(u32 drc_index)
639 {
640 	return -EOPNOTSUPP;
641 }
642 static int dlpar_memory_readd_by_index(u32 drc_index)
643 {
644 	return -EOPNOTSUPP;
645 }
646 
647 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
648 {
649 	return -EOPNOTSUPP;
650 }
651 #endif /* CONFIG_MEMORY_HOTREMOVE */
652 
653 static int dlpar_add_lmb(struct drmem_lmb *lmb)
654 {
655 	unsigned long block_sz;
656 	int rc;
657 
658 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
659 		return -EINVAL;
660 
661 	rc = update_lmb_associativity_index(lmb);
662 	if (rc) {
663 		dlpar_release_drc(lmb->drc_index);
664 		return rc;
665 	}
666 
667 	lmb_set_nid(lmb);
668 	block_sz = memory_block_size_bytes();
669 
670 	/* Add the memory */
671 	rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
672 	if (rc) {
673 		invalidate_lmb_associativity_index(lmb);
674 		return rc;
675 	}
676 
677 	rc = dlpar_online_lmb(lmb);
678 	if (rc) {
679 		__remove_memory(lmb->nid, lmb->base_addr, block_sz);
680 		invalidate_lmb_associativity_index(lmb);
681 		lmb_clear_nid(lmb);
682 	} else {
683 		lmb->flags |= DRCONF_MEM_ASSIGNED;
684 	}
685 
686 	return rc;
687 }
688 
689 static int dlpar_memory_add_by_count(u32 lmbs_to_add)
690 {
691 	struct drmem_lmb *lmb;
692 	int lmbs_available = 0;
693 	int lmbs_added = 0;
694 	int rc;
695 
696 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
697 
698 	if (lmbs_to_add == 0)
699 		return -EINVAL;
700 
701 	/* Validate that there are enough LMBs to satisfy the request */
702 	for_each_drmem_lmb(lmb) {
703 		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
704 			lmbs_available++;
705 
706 		if (lmbs_available == lmbs_to_add)
707 			break;
708 	}
709 
710 	if (lmbs_available < lmbs_to_add)
711 		return -EINVAL;
712 
713 	for_each_drmem_lmb(lmb) {
714 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
715 			continue;
716 
717 		rc = dlpar_acquire_drc(lmb->drc_index);
718 		if (rc)
719 			continue;
720 
721 		rc = dlpar_add_lmb(lmb);
722 		if (rc) {
723 			dlpar_release_drc(lmb->drc_index);
724 			continue;
725 		}
726 
727 		/* Mark this lmb so we can remove it later if all of the
728 		 * requested LMBs cannot be added.
729 		 */
730 		drmem_mark_lmb_reserved(lmb);
731 
732 		lmbs_added++;
733 		if (lmbs_added == lmbs_to_add)
734 			break;
735 	}
736 
737 	if (lmbs_added != lmbs_to_add) {
738 		pr_err("Memory hot-add failed, removing any added LMBs\n");
739 
740 		for_each_drmem_lmb(lmb) {
741 			if (!drmem_lmb_reserved(lmb))
742 				continue;
743 
744 			rc = dlpar_remove_lmb(lmb);
745 			if (rc)
746 				pr_err("Failed to remove LMB, drc index %x\n",
747 				       lmb->drc_index);
748 			else
749 				dlpar_release_drc(lmb->drc_index);
750 
751 			drmem_remove_lmb_reservation(lmb);
752 		}
753 		rc = -EINVAL;
754 	} else {
755 		for_each_drmem_lmb(lmb) {
756 			if (!drmem_lmb_reserved(lmb))
757 				continue;
758 
759 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
760 				lmb->base_addr, lmb->drc_index);
761 			drmem_remove_lmb_reservation(lmb);
762 		}
763 		rc = 0;
764 	}
765 
766 	return rc;
767 }
768 
769 static int dlpar_memory_add_by_index(u32 drc_index)
770 {
771 	struct drmem_lmb *lmb;
772 	int rc, lmb_found;
773 
774 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
775 
776 	lmb_found = 0;
777 	for_each_drmem_lmb(lmb) {
778 		if (lmb->drc_index == drc_index) {
779 			lmb_found = 1;
780 			rc = dlpar_acquire_drc(lmb->drc_index);
781 			if (!rc) {
782 				rc = dlpar_add_lmb(lmb);
783 				if (rc)
784 					dlpar_release_drc(lmb->drc_index);
785 			}
786 
787 			break;
788 		}
789 	}
790 
791 	if (!lmb_found)
792 		rc = -EINVAL;
793 
794 	if (rc)
795 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
796 	else
797 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
798 			lmb->base_addr, drc_index);
799 
800 	return rc;
801 }
802 
803 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
804 {
805 	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
806 	int lmbs_available = 0;
807 	int rc;
808 
809 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
810 		lmbs_to_add, drc_index);
811 
812 	if (lmbs_to_add == 0)
813 		return -EINVAL;
814 
815 	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
816 	if (rc)
817 		return -EINVAL;
818 
819 	/* Validate that the LMBs in this range are not reserved */
820 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
821 		if (lmb->flags & DRCONF_MEM_RESERVED)
822 			break;
823 
824 		lmbs_available++;
825 	}
826 
827 	if (lmbs_available < lmbs_to_add)
828 		return -EINVAL;
829 
830 	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
831 		if (lmb->flags & DRCONF_MEM_ASSIGNED)
832 			continue;
833 
834 		rc = dlpar_acquire_drc(lmb->drc_index);
835 		if (rc)
836 			break;
837 
838 		rc = dlpar_add_lmb(lmb);
839 		if (rc) {
840 			dlpar_release_drc(lmb->drc_index);
841 			break;
842 		}
843 
844 		drmem_mark_lmb_reserved(lmb);
845 	}
846 
847 	if (rc) {
848 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
849 
850 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
851 			if (!drmem_lmb_reserved(lmb))
852 				continue;
853 
854 			rc = dlpar_remove_lmb(lmb);
855 			if (rc)
856 				pr_err("Failed to remove LMB, drc index %x\n",
857 				       lmb->drc_index);
858 			else
859 				dlpar_release_drc(lmb->drc_index);
860 
861 			drmem_remove_lmb_reservation(lmb);
862 		}
863 		rc = -EINVAL;
864 	} else {
865 		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
866 			if (!drmem_lmb_reserved(lmb))
867 				continue;
868 
869 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
870 				lmb->base_addr, lmb->drc_index);
871 			drmem_remove_lmb_reservation(lmb);
872 		}
873 	}
874 
875 	return rc;
876 }
877 
878 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
879 {
880 	u32 count, drc_index;
881 	int rc;
882 
883 	lock_device_hotplug();
884 
885 	switch (hp_elog->action) {
886 	case PSERIES_HP_ELOG_ACTION_ADD:
887 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
888 			count = hp_elog->_drc_u.drc_count;
889 			rc = dlpar_memory_add_by_count(count);
890 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
891 			drc_index = hp_elog->_drc_u.drc_index;
892 			rc = dlpar_memory_add_by_index(drc_index);
893 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
894 			count = hp_elog->_drc_u.ic.count;
895 			drc_index = hp_elog->_drc_u.ic.index;
896 			rc = dlpar_memory_add_by_ic(count, drc_index);
897 		} else {
898 			rc = -EINVAL;
899 		}
900 
901 		break;
902 	case PSERIES_HP_ELOG_ACTION_REMOVE:
903 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
904 			count = hp_elog->_drc_u.drc_count;
905 			rc = dlpar_memory_remove_by_count(count);
906 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
907 			drc_index = hp_elog->_drc_u.drc_index;
908 			rc = dlpar_memory_remove_by_index(drc_index);
909 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
910 			count = hp_elog->_drc_u.ic.count;
911 			drc_index = hp_elog->_drc_u.ic.index;
912 			rc = dlpar_memory_remove_by_ic(count, drc_index);
913 		} else {
914 			rc = -EINVAL;
915 		}
916 
917 		break;
918 	case PSERIES_HP_ELOG_ACTION_READD:
919 		drc_index = hp_elog->_drc_u.drc_index;
920 		rc = dlpar_memory_readd_by_index(drc_index);
921 		break;
922 	default:
923 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
924 		rc = -EINVAL;
925 		break;
926 	}
927 
928 	if (!rc) {
929 		rtas_hp_event = true;
930 		rc = drmem_update_dt();
931 		rtas_hp_event = false;
932 	}
933 
934 	unlock_device_hotplug();
935 	return rc;
936 }
937 
938 static int pseries_add_mem_node(struct device_node *np)
939 {
940 	const __be32 *regs;
941 	unsigned long base;
942 	unsigned int lmb_size;
943 	int ret = -EINVAL;
944 
945 	/*
946 	 * Check to see if we are actually adding memory
947 	 */
948 	if (!of_node_is_type(np, "memory"))
949 		return 0;
950 
951 	/*
952 	 * Find the base and size of the memblock
953 	 */
954 	regs = of_get_property(np, "reg", NULL);
955 	if (!regs)
956 		return ret;
957 
958 	base = be64_to_cpu(*(unsigned long *)regs);
959 	lmb_size = be32_to_cpu(regs[3]);
960 
961 	/*
962 	 * Update memory region to represent the memory add
963 	 */
964 	ret = memblock_add(base, lmb_size);
965 	return (ret < 0) ? -EINVAL : 0;
966 }
967 
968 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
969 {
970 	struct of_drconf_cell_v1 *new_drmem, *old_drmem;
971 	unsigned long memblock_size;
972 	u32 entries;
973 	__be32 *p;
974 	int i, rc = -EINVAL;
975 
976 	if (rtas_hp_event)
977 		return 0;
978 
979 	memblock_size = pseries_memory_block_size();
980 	if (!memblock_size)
981 		return -EINVAL;
982 
983 	p = (__be32 *) pr->old_prop->value;
984 	if (!p)
985 		return -EINVAL;
986 
987 	/* The first int of the property is the number of lmb's described
988 	 * by the property. This is followed by an array of of_drconf_cell
989 	 * entries. Get the number of entries and skip to the array of
990 	 * of_drconf_cell's.
991 	 */
992 	entries = be32_to_cpu(*p++);
993 	old_drmem = (struct of_drconf_cell_v1 *)p;
994 
995 	p = (__be32 *)pr->prop->value;
996 	p++;
997 	new_drmem = (struct of_drconf_cell_v1 *)p;
998 
999 	for (i = 0; i < entries; i++) {
1000 		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1001 		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1002 			rc = pseries_remove_memblock(
1003 				be64_to_cpu(old_drmem[i].base_addr),
1004 						     memblock_size);
1005 			break;
1006 		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
1007 			    DRCONF_MEM_ASSIGNED)) &&
1008 			    (be32_to_cpu(new_drmem[i].flags) &
1009 			    DRCONF_MEM_ASSIGNED)) {
1010 			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1011 					  memblock_size);
1012 			rc = (rc < 0) ? -EINVAL : 0;
1013 			break;
1014 		}
1015 	}
1016 	return rc;
1017 }
1018 
1019 static int pseries_memory_notifier(struct notifier_block *nb,
1020 				   unsigned long action, void *data)
1021 {
1022 	struct of_reconfig_data *rd = data;
1023 	int err = 0;
1024 
1025 	switch (action) {
1026 	case OF_RECONFIG_ATTACH_NODE:
1027 		err = pseries_add_mem_node(rd->dn);
1028 		break;
1029 	case OF_RECONFIG_DETACH_NODE:
1030 		err = pseries_remove_mem_node(rd->dn);
1031 		break;
1032 	case OF_RECONFIG_UPDATE_PROPERTY:
1033 		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1034 			err = pseries_update_drconf_memory(rd);
1035 		break;
1036 	}
1037 	return notifier_from_errno(err);
1038 }
1039 
1040 static struct notifier_block pseries_mem_nb = {
1041 	.notifier_call = pseries_memory_notifier,
1042 };
1043 
1044 static int __init pseries_memory_hotplug_init(void)
1045 {
1046 	if (firmware_has_feature(FW_FEATURE_LPAR))
1047 		of_reconfig_notifier_register(&pseries_mem_nb);
1048 
1049 	return 0;
1050 }
1051 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1052