1 /*
2  * pseries Memory Hotplug infrastructure.
3  *
4  * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
13 
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/memblock.h>
17 #include <linux/memory.h>
18 #include <linux/memory_hotplug.h>
19 #include <linux/slab.h>
20 
21 #include <asm/firmware.h>
22 #include <asm/machdep.h>
23 #include <asm/prom.h>
24 #include <asm/sparsemem.h>
25 #include <asm/fadump.h>
26 #include "pseries.h"
27 
28 static bool rtas_hp_event;
29 
30 unsigned long pseries_memory_block_size(void)
31 {
32 	struct device_node *np;
33 	unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
34 	struct resource r;
35 
36 	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
37 	if (np) {
38 		const __be64 *size;
39 
40 		size = of_get_property(np, "ibm,lmb-size", NULL);
41 		if (size)
42 			memblock_size = be64_to_cpup(size);
43 		of_node_put(np);
44 	} else  if (machine_is(pseries)) {
45 		/* This fallback really only applies to pseries */
46 		unsigned int memzero_size = 0;
47 
48 		np = of_find_node_by_path("/memory@0");
49 		if (np) {
50 			if (!of_address_to_resource(np, 0, &r))
51 				memzero_size = resource_size(&r);
52 			of_node_put(np);
53 		}
54 
55 		if (memzero_size) {
56 			/* We now know the size of memory@0, use this to find
57 			 * the first memoryblock and get its size.
58 			 */
59 			char buf[64];
60 
61 			sprintf(buf, "/memory@%x", memzero_size);
62 			np = of_find_node_by_path(buf);
63 			if (np) {
64 				if (!of_address_to_resource(np, 0, &r))
65 					memblock_size = resource_size(&r);
66 				of_node_put(np);
67 			}
68 		}
69 	}
70 	return memblock_size;
71 }
72 
73 static void dlpar_free_property(struct property *prop)
74 {
75 	kfree(prop->name);
76 	kfree(prop->value);
77 	kfree(prop);
78 }
79 
80 static struct property *dlpar_clone_property(struct property *prop,
81 					     u32 prop_size)
82 {
83 	struct property *new_prop;
84 
85 	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
86 	if (!new_prop)
87 		return NULL;
88 
89 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
90 	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
91 	if (!new_prop->name || !new_prop->value) {
92 		dlpar_free_property(new_prop);
93 		return NULL;
94 	}
95 
96 	memcpy(new_prop->value, prop->value, prop->length);
97 	new_prop->length = prop_size;
98 
99 	of_property_set_flag(new_prop, OF_DYNAMIC);
100 	return new_prop;
101 }
102 
103 static struct property *dlpar_clone_drconf_property(struct device_node *dn)
104 {
105 	struct property *prop, *new_prop;
106 	struct of_drconf_cell *lmbs;
107 	u32 num_lmbs, *p;
108 	int i;
109 
110 	prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
111 	if (!prop)
112 		return NULL;
113 
114 	new_prop = dlpar_clone_property(prop, prop->length);
115 	if (!new_prop)
116 		return NULL;
117 
118 	/* Convert the property to cpu endian-ness */
119 	p = new_prop->value;
120 	*p = be32_to_cpu(*p);
121 
122 	num_lmbs = *p++;
123 	lmbs = (struct of_drconf_cell *)p;
124 
125 	for (i = 0; i < num_lmbs; i++) {
126 		lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
127 		lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
128 		lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
129 		lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
130 	}
131 
132 	return new_prop;
133 }
134 
135 static void dlpar_update_drconf_property(struct device_node *dn,
136 					 struct property *prop)
137 {
138 	struct of_drconf_cell *lmbs;
139 	u32 num_lmbs, *p;
140 	int i;
141 
142 	/* Convert the property back to BE */
143 	p = prop->value;
144 	num_lmbs = *p;
145 	*p = cpu_to_be32(*p);
146 	p++;
147 
148 	lmbs = (struct of_drconf_cell *)p;
149 	for (i = 0; i < num_lmbs; i++) {
150 		lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
151 		lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
152 		lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
153 		lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
154 	}
155 
156 	rtas_hp_event = true;
157 	of_update_property(dn, prop);
158 	rtas_hp_event = false;
159 }
160 
161 static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb)
162 {
163 	struct device_node *dn;
164 	struct property *prop;
165 	struct of_drconf_cell *lmbs;
166 	u32 *p, num_lmbs;
167 	int i;
168 
169 	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
170 	if (!dn)
171 		return -ENODEV;
172 
173 	prop = dlpar_clone_drconf_property(dn);
174 	if (!prop) {
175 		of_node_put(dn);
176 		return -ENODEV;
177 	}
178 
179 	p = prop->value;
180 	num_lmbs = *p++;
181 	lmbs = (struct of_drconf_cell *)p;
182 
183 	for (i = 0; i < num_lmbs; i++) {
184 		if (lmbs[i].drc_index == lmb->drc_index) {
185 			lmbs[i].flags = lmb->flags;
186 			lmbs[i].aa_index = lmb->aa_index;
187 
188 			dlpar_update_drconf_property(dn, prop);
189 			break;
190 		}
191 	}
192 
193 	of_node_put(dn);
194 	return 0;
195 }
196 
197 static u32 find_aa_index(struct device_node *dr_node,
198 			 struct property *ala_prop, const u32 *lmb_assoc)
199 {
200 	u32 *assoc_arrays;
201 	u32 aa_index;
202 	int aa_arrays, aa_array_entries, aa_array_sz;
203 	int i, index;
204 
205 	/*
206 	 * The ibm,associativity-lookup-arrays property is defined to be
207 	 * a 32-bit value specifying the number of associativity arrays
208 	 * followed by a 32-bitvalue specifying the number of entries per
209 	 * array, followed by the associativity arrays.
210 	 */
211 	assoc_arrays = ala_prop->value;
212 
213 	aa_arrays = be32_to_cpu(assoc_arrays[0]);
214 	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
215 	aa_array_sz = aa_array_entries * sizeof(u32);
216 
217 	aa_index = -1;
218 	for (i = 0; i < aa_arrays; i++) {
219 		index = (i * aa_array_entries) + 2;
220 
221 		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
222 			continue;
223 
224 		aa_index = i;
225 		break;
226 	}
227 
228 	if (aa_index == -1) {
229 		struct property *new_prop;
230 		u32 new_prop_size;
231 
232 		new_prop_size = ala_prop->length + aa_array_sz;
233 		new_prop = dlpar_clone_property(ala_prop, new_prop_size);
234 		if (!new_prop)
235 			return -1;
236 
237 		assoc_arrays = new_prop->value;
238 
239 		/* increment the number of entries in the lookup array */
240 		assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
241 
242 		/* copy the new associativity into the lookup array */
243 		index = aa_arrays * aa_array_entries + 2;
244 		memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
245 
246 		of_update_property(dr_node, new_prop);
247 
248 		/*
249 		 * The associativity lookup array index for this lmb is
250 		 * number of entries - 1 since we added its associativity
251 		 * to the end of the lookup array.
252 		 */
253 		aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
254 	}
255 
256 	return aa_index;
257 }
258 
259 static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb)
260 {
261 	struct device_node *parent, *lmb_node, *dr_node;
262 	struct property *ala_prop;
263 	const u32 *lmb_assoc;
264 	u32 aa_index;
265 
266 	parent = of_find_node_by_path("/");
267 	if (!parent)
268 		return -ENODEV;
269 
270 	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
271 					     parent);
272 	of_node_put(parent);
273 	if (!lmb_node)
274 		return -EINVAL;
275 
276 	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
277 	if (!lmb_assoc) {
278 		dlpar_free_cc_nodes(lmb_node);
279 		return -ENODEV;
280 	}
281 
282 	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
283 	if (!dr_node) {
284 		dlpar_free_cc_nodes(lmb_node);
285 		return -ENODEV;
286 	}
287 
288 	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
289 				    NULL);
290 	if (!ala_prop) {
291 		of_node_put(dr_node);
292 		dlpar_free_cc_nodes(lmb_node);
293 		return -ENODEV;
294 	}
295 
296 	aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
297 
298 	dlpar_free_cc_nodes(lmb_node);
299 	return aa_index;
300 }
301 
302 static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb)
303 {
304 	int aa_index;
305 
306 	lmb->flags |= DRCONF_MEM_ASSIGNED;
307 
308 	aa_index = lookup_lmb_associativity_index(lmb);
309 	if (aa_index < 0) {
310 		pr_err("Couldn't find associativity index for drc index %x\n",
311 		       lmb->drc_index);
312 		return aa_index;
313 	}
314 
315 	lmb->aa_index = aa_index;
316 	return dlpar_update_device_tree_lmb(lmb);
317 }
318 
319 static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
320 {
321 	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
322 	lmb->aa_index = 0xffffffff;
323 	return dlpar_update_device_tree_lmb(lmb);
324 }
325 
326 static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
327 {
328 	unsigned long section_nr;
329 	struct mem_section *mem_sect;
330 	struct memory_block *mem_block;
331 
332 	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
333 	mem_sect = __nr_to_section(section_nr);
334 
335 	mem_block = find_memory_block(mem_sect);
336 	return mem_block;
337 }
338 
339 static int dlpar_change_lmb_state(struct of_drconf_cell *lmb, bool online)
340 {
341 	struct memory_block *mem_block;
342 	int rc;
343 
344 	mem_block = lmb_to_memblock(lmb);
345 	if (!mem_block)
346 		return -EINVAL;
347 
348 	if (online && mem_block->dev.offline)
349 		rc = device_online(&mem_block->dev);
350 	else if (!online && !mem_block->dev.offline)
351 		rc = device_offline(&mem_block->dev);
352 	else
353 		rc = 0;
354 
355 	put_device(&mem_block->dev);
356 
357 	return rc;
358 }
359 
360 static int dlpar_online_lmb(struct of_drconf_cell *lmb)
361 {
362 	return dlpar_change_lmb_state(lmb, true);
363 }
364 
365 #ifdef CONFIG_MEMORY_HOTREMOVE
366 static int dlpar_offline_lmb(struct of_drconf_cell *lmb)
367 {
368 	return dlpar_change_lmb_state(lmb, false);
369 }
370 
371 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
372 {
373 	unsigned long block_sz, start_pfn;
374 	int sections_per_block;
375 	int i, nid;
376 
377 	start_pfn = base >> PAGE_SHIFT;
378 
379 	lock_device_hotplug();
380 
381 	if (!pfn_valid(start_pfn))
382 		goto out;
383 
384 	block_sz = pseries_memory_block_size();
385 	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
386 	nid = memory_add_physaddr_to_nid(base);
387 
388 	for (i = 0; i < sections_per_block; i++) {
389 		remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
390 		base += MIN_MEMORY_BLOCK_SIZE;
391 	}
392 
393 out:
394 	/* Update memory regions for memory remove */
395 	memblock_remove(base, memblock_size);
396 	unlock_device_hotplug();
397 	return 0;
398 }
399 
400 static int pseries_remove_mem_node(struct device_node *np)
401 {
402 	const char *type;
403 	const __be32 *regs;
404 	unsigned long base;
405 	unsigned int lmb_size;
406 	int ret = -EINVAL;
407 
408 	/*
409 	 * Check to see if we are actually removing memory
410 	 */
411 	type = of_get_property(np, "device_type", NULL);
412 	if (type == NULL || strcmp(type, "memory") != 0)
413 		return 0;
414 
415 	/*
416 	 * Find the base address and size of the memblock
417 	 */
418 	regs = of_get_property(np, "reg", NULL);
419 	if (!regs)
420 		return ret;
421 
422 	base = be64_to_cpu(*(unsigned long *)regs);
423 	lmb_size = be32_to_cpu(regs[3]);
424 
425 	pseries_remove_memblock(base, lmb_size);
426 	return 0;
427 }
428 
429 static bool lmb_is_removable(struct of_drconf_cell *lmb)
430 {
431 	int i, scns_per_block;
432 	int rc = 1;
433 	unsigned long pfn, block_sz;
434 	u64 phys_addr;
435 
436 	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
437 		return false;
438 
439 	block_sz = memory_block_size_bytes();
440 	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
441 	phys_addr = lmb->base_addr;
442 
443 #ifdef CONFIG_FA_DUMP
444 	/* Don't hot-remove memory that falls in fadump boot memory area */
445 	if (is_fadump_boot_memory_area(phys_addr, block_sz))
446 		return false;
447 #endif
448 
449 	for (i = 0; i < scns_per_block; i++) {
450 		pfn = PFN_DOWN(phys_addr);
451 		if (!pfn_present(pfn))
452 			continue;
453 
454 		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
455 		phys_addr += MIN_MEMORY_BLOCK_SIZE;
456 	}
457 
458 	return rc ? true : false;
459 }
460 
461 static int dlpar_add_lmb(struct of_drconf_cell *);
462 
463 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
464 {
465 	unsigned long block_sz;
466 	int nid, rc;
467 
468 	if (!lmb_is_removable(lmb))
469 		return -EINVAL;
470 
471 	rc = dlpar_offline_lmb(lmb);
472 	if (rc)
473 		return rc;
474 
475 	block_sz = pseries_memory_block_size();
476 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
477 
478 	remove_memory(nid, lmb->base_addr, block_sz);
479 
480 	/* Update memory regions for memory remove */
481 	memblock_remove(lmb->base_addr, block_sz);
482 
483 	dlpar_remove_device_tree_lmb(lmb);
484 	return 0;
485 }
486 
487 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
488 					struct property *prop)
489 {
490 	struct of_drconf_cell *lmbs;
491 	int lmbs_removed = 0;
492 	int lmbs_available = 0;
493 	u32 num_lmbs, *p;
494 	int i, rc;
495 
496 	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
497 
498 	if (lmbs_to_remove == 0)
499 		return -EINVAL;
500 
501 	p = prop->value;
502 	num_lmbs = *p++;
503 	lmbs = (struct of_drconf_cell *)p;
504 
505 	/* Validate that there are enough LMBs to satisfy the request */
506 	for (i = 0; i < num_lmbs; i++) {
507 		if (lmb_is_removable(&lmbs[i]))
508 			lmbs_available++;
509 	}
510 
511 	if (lmbs_available < lmbs_to_remove) {
512 		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
513 			lmbs_available, lmbs_to_remove);
514 		return -EINVAL;
515 	}
516 
517 	for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
518 		rc = dlpar_remove_lmb(&lmbs[i]);
519 		if (rc)
520 			continue;
521 
522 		lmbs_removed++;
523 
524 		/* Mark this lmb so we can add it later if all of the
525 		 * requested LMBs cannot be removed.
526 		 */
527 		lmbs[i].reserved = 1;
528 	}
529 
530 	if (lmbs_removed != lmbs_to_remove) {
531 		pr_err("Memory hot-remove failed, adding LMB's back\n");
532 
533 		for (i = 0; i < num_lmbs; i++) {
534 			if (!lmbs[i].reserved)
535 				continue;
536 
537 			rc = dlpar_add_lmb(&lmbs[i]);
538 			if (rc)
539 				pr_err("Failed to add LMB back, drc index %x\n",
540 				       lmbs[i].drc_index);
541 
542 			lmbs[i].reserved = 0;
543 		}
544 
545 		rc = -EINVAL;
546 	} else {
547 		for (i = 0; i < num_lmbs; i++) {
548 			if (!lmbs[i].reserved)
549 				continue;
550 
551 			dlpar_release_drc(lmbs[i].drc_index);
552 			pr_info("Memory at %llx was hot-removed\n",
553 				lmbs[i].base_addr);
554 
555 			lmbs[i].reserved = 0;
556 		}
557 		rc = 0;
558 	}
559 
560 	return rc;
561 }
562 
563 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
564 {
565 	struct of_drconf_cell *lmbs;
566 	u32 num_lmbs, *p;
567 	int lmb_found;
568 	int i, rc;
569 
570 	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
571 
572 	p = prop->value;
573 	num_lmbs = *p++;
574 	lmbs = (struct of_drconf_cell *)p;
575 
576 	lmb_found = 0;
577 	for (i = 0; i < num_lmbs; i++) {
578 		if (lmbs[i].drc_index == drc_index) {
579 			lmb_found = 1;
580 			rc = dlpar_remove_lmb(&lmbs[i]);
581 			if (!rc)
582 				dlpar_release_drc(lmbs[i].drc_index);
583 
584 			break;
585 		}
586 	}
587 
588 	if (!lmb_found)
589 		rc = -EINVAL;
590 
591 	if (rc)
592 		pr_info("Failed to hot-remove memory at %llx\n",
593 			lmbs[i].base_addr);
594 	else
595 		pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
596 
597 	return rc;
598 }
599 
600 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
601 {
602 	struct of_drconf_cell *lmbs;
603 	u32 num_lmbs, *p;
604 	int lmb_found;
605 	int i, rc;
606 
607 	pr_info("Attempting to update LMB, drc index %x\n", drc_index);
608 
609 	p = prop->value;
610 	num_lmbs = *p++;
611 	lmbs = (struct of_drconf_cell *)p;
612 
613 	lmb_found = 0;
614 	for (i = 0; i < num_lmbs; i++) {
615 		if (lmbs[i].drc_index == drc_index) {
616 			lmb_found = 1;
617 			rc = dlpar_remove_lmb(&lmbs[i]);
618 			if (!rc) {
619 				rc = dlpar_add_lmb(&lmbs[i]);
620 				if (rc)
621 					dlpar_release_drc(lmbs[i].drc_index);
622 			}
623 			break;
624 		}
625 	}
626 
627 	if (!lmb_found)
628 		rc = -EINVAL;
629 
630 	if (rc)
631 		pr_info("Failed to update memory at %llx\n",
632 			lmbs[i].base_addr);
633 	else
634 		pr_info("Memory at %llx was updated\n", lmbs[i].base_addr);
635 
636 	return rc;
637 }
638 
639 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
640 				     struct property *prop)
641 {
642 	struct of_drconf_cell *lmbs;
643 	u32 num_lmbs, *p;
644 	int i, rc, start_lmb_found;
645 	int lmbs_available = 0, start_index = 0, end_index;
646 
647 	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
648 		lmbs_to_remove, drc_index);
649 
650 	if (lmbs_to_remove == 0)
651 		return -EINVAL;
652 
653 	p = prop->value;
654 	num_lmbs = *p++;
655 	lmbs = (struct of_drconf_cell *)p;
656 	start_lmb_found = 0;
657 
658 	/* Navigate to drc_index */
659 	while (start_index < num_lmbs) {
660 		if (lmbs[start_index].drc_index == drc_index) {
661 			start_lmb_found = 1;
662 			break;
663 		}
664 
665 		start_index++;
666 	}
667 
668 	if (!start_lmb_found)
669 		return -EINVAL;
670 
671 	end_index = start_index + lmbs_to_remove;
672 
673 	/* Validate that there are enough LMBs to satisfy the request */
674 	for (i = start_index; i < end_index; i++) {
675 		if (lmbs[i].flags & DRCONF_MEM_RESERVED)
676 			break;
677 
678 		lmbs_available++;
679 	}
680 
681 	if (lmbs_available < lmbs_to_remove)
682 		return -EINVAL;
683 
684 	for (i = start_index; i < end_index; i++) {
685 		if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
686 			continue;
687 
688 		rc = dlpar_remove_lmb(&lmbs[i]);
689 		if (rc)
690 			break;
691 
692 		lmbs[i].reserved = 1;
693 	}
694 
695 	if (rc) {
696 		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
697 
698 		for (i = start_index; i < end_index; i++) {
699 			if (!lmbs[i].reserved)
700 				continue;
701 
702 			rc = dlpar_add_lmb(&lmbs[i]);
703 			if (rc)
704 				pr_err("Failed to add LMB, drc index %x\n",
705 				       be32_to_cpu(lmbs[i].drc_index));
706 
707 			lmbs[i].reserved = 0;
708 		}
709 		rc = -EINVAL;
710 	} else {
711 		for (i = start_index; i < end_index; i++) {
712 			if (!lmbs[i].reserved)
713 				continue;
714 
715 			dlpar_release_drc(lmbs[i].drc_index);
716 			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
717 				lmbs[i].base_addr, lmbs[i].drc_index);
718 
719 			lmbs[i].reserved = 0;
720 		}
721 	}
722 
723 	return rc;
724 }
725 
726 #else
727 static inline int pseries_remove_memblock(unsigned long base,
728 					  unsigned int memblock_size)
729 {
730 	return -EOPNOTSUPP;
731 }
732 static inline int pseries_remove_mem_node(struct device_node *np)
733 {
734 	return 0;
735 }
736 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
737 {
738 	return -EOPNOTSUPP;
739 }
740 static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
741 {
742 	return -EOPNOTSUPP;
743 }
744 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
745 					struct property *prop)
746 {
747 	return -EOPNOTSUPP;
748 }
749 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
750 {
751 	return -EOPNOTSUPP;
752 }
753 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
754 {
755 	return -EOPNOTSUPP;
756 }
757 
758 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
759 				     struct property *prop)
760 {
761 	return -EOPNOTSUPP;
762 }
763 #endif /* CONFIG_MEMORY_HOTREMOVE */
764 
765 static int dlpar_add_lmb(struct of_drconf_cell *lmb)
766 {
767 	unsigned long block_sz;
768 	int nid, rc;
769 
770 	if (lmb->flags & DRCONF_MEM_ASSIGNED)
771 		return -EINVAL;
772 
773 	rc = dlpar_add_device_tree_lmb(lmb);
774 	if (rc) {
775 		pr_err("Couldn't update device tree for drc index %x\n",
776 		       lmb->drc_index);
777 		dlpar_release_drc(lmb->drc_index);
778 		return rc;
779 	}
780 
781 	block_sz = memory_block_size_bytes();
782 
783 	/* Find the node id for this address */
784 	nid = memory_add_physaddr_to_nid(lmb->base_addr);
785 
786 	/* Add the memory */
787 	rc = add_memory(nid, lmb->base_addr, block_sz);
788 	if (rc) {
789 		dlpar_remove_device_tree_lmb(lmb);
790 		return rc;
791 	}
792 
793 	rc = dlpar_online_lmb(lmb);
794 	if (rc) {
795 		remove_memory(nid, lmb->base_addr, block_sz);
796 		dlpar_remove_device_tree_lmb(lmb);
797 	} else {
798 		lmb->flags |= DRCONF_MEM_ASSIGNED;
799 	}
800 
801 	return rc;
802 }
803 
804 static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
805 {
806 	struct of_drconf_cell *lmbs;
807 	u32 num_lmbs, *p;
808 	int lmbs_available = 0;
809 	int lmbs_added = 0;
810 	int i, rc;
811 
812 	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
813 
814 	if (lmbs_to_add == 0)
815 		return -EINVAL;
816 
817 	p = prop->value;
818 	num_lmbs = *p++;
819 	lmbs = (struct of_drconf_cell *)p;
820 
821 	/* Validate that there are enough LMBs to satisfy the request */
822 	for (i = 0; i < num_lmbs; i++) {
823 		if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
824 			lmbs_available++;
825 	}
826 
827 	if (lmbs_available < lmbs_to_add)
828 		return -EINVAL;
829 
830 	for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
831 		if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
832 			continue;
833 
834 		rc = dlpar_acquire_drc(lmbs[i].drc_index);
835 		if (rc)
836 			continue;
837 
838 		rc = dlpar_add_lmb(&lmbs[i]);
839 		if (rc) {
840 			dlpar_release_drc(lmbs[i].drc_index);
841 			continue;
842 		}
843 
844 		lmbs_added++;
845 
846 		/* Mark this lmb so we can remove it later if all of the
847 		 * requested LMBs cannot be added.
848 		 */
849 		lmbs[i].reserved = 1;
850 	}
851 
852 	if (lmbs_added != lmbs_to_add) {
853 		pr_err("Memory hot-add failed, removing any added LMBs\n");
854 
855 		for (i = 0; i < num_lmbs; i++) {
856 			if (!lmbs[i].reserved)
857 				continue;
858 
859 			rc = dlpar_remove_lmb(&lmbs[i]);
860 			if (rc)
861 				pr_err("Failed to remove LMB, drc index %x\n",
862 				       be32_to_cpu(lmbs[i].drc_index));
863 			else
864 				dlpar_release_drc(lmbs[i].drc_index);
865 		}
866 		rc = -EINVAL;
867 	} else {
868 		for (i = 0; i < num_lmbs; i++) {
869 			if (!lmbs[i].reserved)
870 				continue;
871 
872 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
873 				lmbs[i].base_addr, lmbs[i].drc_index);
874 			lmbs[i].reserved = 0;
875 		}
876 		rc = 0;
877 	}
878 
879 	return rc;
880 }
881 
882 static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
883 {
884 	struct of_drconf_cell *lmbs;
885 	u32 num_lmbs, *p;
886 	int i, lmb_found;
887 	int rc;
888 
889 	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
890 
891 	p = prop->value;
892 	num_lmbs = *p++;
893 	lmbs = (struct of_drconf_cell *)p;
894 
895 	lmb_found = 0;
896 	for (i = 0; i < num_lmbs; i++) {
897 		if (lmbs[i].drc_index == drc_index) {
898 			lmb_found = 1;
899 			rc = dlpar_acquire_drc(lmbs[i].drc_index);
900 			if (!rc) {
901 				rc = dlpar_add_lmb(&lmbs[i]);
902 				if (rc)
903 					dlpar_release_drc(lmbs[i].drc_index);
904 			}
905 
906 			break;
907 		}
908 	}
909 
910 	if (!lmb_found)
911 		rc = -EINVAL;
912 
913 	if (rc)
914 		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
915 	else
916 		pr_info("Memory at %llx (drc index %x) was hot-added\n",
917 			lmbs[i].base_addr, drc_index);
918 
919 	return rc;
920 }
921 
922 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index,
923 				  struct property *prop)
924 {
925 	struct of_drconf_cell *lmbs;
926 	u32 num_lmbs, *p;
927 	int i, rc, start_lmb_found;
928 	int lmbs_available = 0, start_index = 0, end_index;
929 
930 	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
931 		lmbs_to_add, drc_index);
932 
933 	if (lmbs_to_add == 0)
934 		return -EINVAL;
935 
936 	p = prop->value;
937 	num_lmbs = *p++;
938 	lmbs = (struct of_drconf_cell *)p;
939 	start_lmb_found = 0;
940 
941 	/* Navigate to drc_index */
942 	while (start_index < num_lmbs) {
943 		if (lmbs[start_index].drc_index == drc_index) {
944 			start_lmb_found = 1;
945 			break;
946 		}
947 
948 		start_index++;
949 	}
950 
951 	if (!start_lmb_found)
952 		return -EINVAL;
953 
954 	end_index = start_index + lmbs_to_add;
955 
956 	/* Validate that the LMBs in this range are not reserved */
957 	for (i = start_index; i < end_index; i++) {
958 		if (lmbs[i].flags & DRCONF_MEM_RESERVED)
959 			break;
960 
961 		lmbs_available++;
962 	}
963 
964 	if (lmbs_available < lmbs_to_add)
965 		return -EINVAL;
966 
967 	for (i = start_index; i < end_index; i++) {
968 		if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
969 			continue;
970 
971 		rc = dlpar_acquire_drc(lmbs[i].drc_index);
972 		if (rc)
973 			break;
974 
975 		rc = dlpar_add_lmb(&lmbs[i]);
976 		if (rc) {
977 			dlpar_release_drc(lmbs[i].drc_index);
978 			break;
979 		}
980 
981 		lmbs[i].reserved = 1;
982 	}
983 
984 	if (rc) {
985 		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
986 
987 		for (i = start_index; i < end_index; i++) {
988 			if (!lmbs[i].reserved)
989 				continue;
990 
991 			rc = dlpar_remove_lmb(&lmbs[i]);
992 			if (rc)
993 				pr_err("Failed to remove LMB, drc index %x\n",
994 				       be32_to_cpu(lmbs[i].drc_index));
995 			else
996 				dlpar_release_drc(lmbs[i].drc_index);
997 		}
998 		rc = -EINVAL;
999 	} else {
1000 		for (i = start_index; i < end_index; i++) {
1001 			if (!lmbs[i].reserved)
1002 				continue;
1003 
1004 			pr_info("Memory at %llx (drc index %x) was hot-added\n",
1005 				lmbs[i].base_addr, lmbs[i].drc_index);
1006 			lmbs[i].reserved = 0;
1007 		}
1008 	}
1009 
1010 	return rc;
1011 }
1012 
1013 int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
1014 {
1015 	struct device_node *dn;
1016 	struct property *prop;
1017 	u32 count, drc_index;
1018 	int rc;
1019 
1020 	lock_device_hotplug();
1021 
1022 	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1023 	if (!dn) {
1024 		rc = -EINVAL;
1025 		goto dlpar_memory_out;
1026 	}
1027 
1028 	prop = dlpar_clone_drconf_property(dn);
1029 	if (!prop) {
1030 		rc = -EINVAL;
1031 		goto dlpar_memory_out;
1032 	}
1033 
1034 	switch (hp_elog->action) {
1035 	case PSERIES_HP_ELOG_ACTION_ADD:
1036 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
1037 			count = hp_elog->_drc_u.drc_count;
1038 			rc = dlpar_memory_add_by_count(count, prop);
1039 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
1040 			drc_index = hp_elog->_drc_u.drc_index;
1041 			rc = dlpar_memory_add_by_index(drc_index, prop);
1042 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
1043 			count = hp_elog->_drc_u.ic.count;
1044 			drc_index = hp_elog->_drc_u.ic.index;
1045 			rc = dlpar_memory_add_by_ic(count, drc_index, prop);
1046 		} else {
1047 			rc = -EINVAL;
1048 		}
1049 
1050 		break;
1051 	case PSERIES_HP_ELOG_ACTION_REMOVE:
1052 		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
1053 			count = hp_elog->_drc_u.drc_count;
1054 			rc = dlpar_memory_remove_by_count(count, prop);
1055 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
1056 			drc_index = hp_elog->_drc_u.drc_index;
1057 			rc = dlpar_memory_remove_by_index(drc_index, prop);
1058 		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
1059 			count = hp_elog->_drc_u.ic.count;
1060 			drc_index = hp_elog->_drc_u.ic.index;
1061 			rc = dlpar_memory_remove_by_ic(count, drc_index, prop);
1062 		} else {
1063 			rc = -EINVAL;
1064 		}
1065 
1066 		break;
1067 	case PSERIES_HP_ELOG_ACTION_READD:
1068 		drc_index = hp_elog->_drc_u.drc_index;
1069 		rc = dlpar_memory_readd_by_index(drc_index, prop);
1070 		break;
1071 	default:
1072 		pr_err("Invalid action (%d) specified\n", hp_elog->action);
1073 		rc = -EINVAL;
1074 		break;
1075 	}
1076 
1077 	dlpar_free_property(prop);
1078 
1079 dlpar_memory_out:
1080 	of_node_put(dn);
1081 	unlock_device_hotplug();
1082 	return rc;
1083 }
1084 
1085 static int pseries_add_mem_node(struct device_node *np)
1086 {
1087 	const char *type;
1088 	const __be32 *regs;
1089 	unsigned long base;
1090 	unsigned int lmb_size;
1091 	int ret = -EINVAL;
1092 
1093 	/*
1094 	 * Check to see if we are actually adding memory
1095 	 */
1096 	type = of_get_property(np, "device_type", NULL);
1097 	if (type == NULL || strcmp(type, "memory") != 0)
1098 		return 0;
1099 
1100 	/*
1101 	 * Find the base and size of the memblock
1102 	 */
1103 	regs = of_get_property(np, "reg", NULL);
1104 	if (!regs)
1105 		return ret;
1106 
1107 	base = be64_to_cpu(*(unsigned long *)regs);
1108 	lmb_size = be32_to_cpu(regs[3]);
1109 
1110 	/*
1111 	 * Update memory region to represent the memory add
1112 	 */
1113 	ret = memblock_add(base, lmb_size);
1114 	return (ret < 0) ? -EINVAL : 0;
1115 }
1116 
1117 static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
1118 {
1119 	struct of_drconf_cell *new_drmem, *old_drmem;
1120 	unsigned long memblock_size;
1121 	u32 entries;
1122 	__be32 *p;
1123 	int i, rc = -EINVAL;
1124 
1125 	if (rtas_hp_event)
1126 		return 0;
1127 
1128 	memblock_size = pseries_memory_block_size();
1129 	if (!memblock_size)
1130 		return -EINVAL;
1131 
1132 	p = (__be32 *) pr->old_prop->value;
1133 	if (!p)
1134 		return -EINVAL;
1135 
1136 	/* The first int of the property is the number of lmb's described
1137 	 * by the property. This is followed by an array of of_drconf_cell
1138 	 * entries. Get the number of entries and skip to the array of
1139 	 * of_drconf_cell's.
1140 	 */
1141 	entries = be32_to_cpu(*p++);
1142 	old_drmem = (struct of_drconf_cell *)p;
1143 
1144 	p = (__be32 *)pr->prop->value;
1145 	p++;
1146 	new_drmem = (struct of_drconf_cell *)p;
1147 
1148 	for (i = 0; i < entries; i++) {
1149 		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
1150 		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
1151 			rc = pseries_remove_memblock(
1152 				be64_to_cpu(old_drmem[i].base_addr),
1153 						     memblock_size);
1154 			break;
1155 		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
1156 			    DRCONF_MEM_ASSIGNED)) &&
1157 			    (be32_to_cpu(new_drmem[i].flags) &
1158 			    DRCONF_MEM_ASSIGNED)) {
1159 			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1160 					  memblock_size);
1161 			rc = (rc < 0) ? -EINVAL : 0;
1162 			break;
1163 		}
1164 	}
1165 	return rc;
1166 }
1167 
1168 static int pseries_memory_notifier(struct notifier_block *nb,
1169 				   unsigned long action, void *data)
1170 {
1171 	struct of_reconfig_data *rd = data;
1172 	int err = 0;
1173 
1174 	switch (action) {
1175 	case OF_RECONFIG_ATTACH_NODE:
1176 		err = pseries_add_mem_node(rd->dn);
1177 		break;
1178 	case OF_RECONFIG_DETACH_NODE:
1179 		err = pseries_remove_mem_node(rd->dn);
1180 		break;
1181 	case OF_RECONFIG_UPDATE_PROPERTY:
1182 		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
1183 			err = pseries_update_drconf_memory(rd);
1184 		break;
1185 	}
1186 	return notifier_from_errno(err);
1187 }
1188 
1189 static struct notifier_block pseries_mem_nb = {
1190 	.notifier_call = pseries_memory_notifier,
1191 };
1192 
1193 static int __init pseries_memory_hotplug_init(void)
1194 {
1195 	if (firmware_has_feature(FW_FEATURE_LPAR))
1196 		of_reconfig_notifier_register(&pseries_mem_nb);
1197 
1198 	return 0;
1199 }
1200 machine_device_initcall(pseries, pseries_memory_hotplug_init);
1201