1 /* 2 * pseries Memory Hotplug infrastructure. 3 * 4 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt 13 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/memblock.h> 17 #include <linux/memory.h> 18 #include <linux/memory_hotplug.h> 19 #include <linux/slab.h> 20 21 #include <asm/firmware.h> 22 #include <asm/machdep.h> 23 #include <asm/prom.h> 24 #include <asm/sparsemem.h> 25 #include "pseries.h" 26 27 static bool rtas_hp_event; 28 29 unsigned long pseries_memory_block_size(void) 30 { 31 struct device_node *np; 32 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; 33 struct resource r; 34 35 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 36 if (np) { 37 const __be64 *size; 38 39 size = of_get_property(np, "ibm,lmb-size", NULL); 40 if (size) 41 memblock_size = be64_to_cpup(size); 42 of_node_put(np); 43 } else if (machine_is(pseries)) { 44 /* This fallback really only applies to pseries */ 45 unsigned int memzero_size = 0; 46 47 np = of_find_node_by_path("/memory@0"); 48 if (np) { 49 if (!of_address_to_resource(np, 0, &r)) 50 memzero_size = resource_size(&r); 51 of_node_put(np); 52 } 53 54 if (memzero_size) { 55 /* We now know the size of memory@0, use this to find 56 * the first memoryblock and get its size. 57 */ 58 char buf[64]; 59 60 sprintf(buf, "/memory@%x", memzero_size); 61 np = of_find_node_by_path(buf); 62 if (np) { 63 if (!of_address_to_resource(np, 0, &r)) 64 memblock_size = resource_size(&r); 65 of_node_put(np); 66 } 67 } 68 } 69 return memblock_size; 70 } 71 72 static void dlpar_free_property(struct property *prop) 73 { 74 kfree(prop->name); 75 kfree(prop->value); 76 kfree(prop); 77 } 78 79 static struct property *dlpar_clone_property(struct property *prop, 80 u32 prop_size) 81 { 82 struct property *new_prop; 83 84 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); 85 if (!new_prop) 86 return NULL; 87 88 new_prop->name = kstrdup(prop->name, GFP_KERNEL); 89 new_prop->value = kzalloc(prop_size, GFP_KERNEL); 90 if (!new_prop->name || !new_prop->value) { 91 dlpar_free_property(new_prop); 92 return NULL; 93 } 94 95 memcpy(new_prop->value, prop->value, prop->length); 96 new_prop->length = prop_size; 97 98 of_property_set_flag(new_prop, OF_DYNAMIC); 99 return new_prop; 100 } 101 102 static struct property *dlpar_clone_drconf_property(struct device_node *dn) 103 { 104 struct property *prop, *new_prop; 105 struct of_drconf_cell *lmbs; 106 u32 num_lmbs, *p; 107 int i; 108 109 prop = of_find_property(dn, "ibm,dynamic-memory", NULL); 110 if (!prop) 111 return NULL; 112 113 new_prop = dlpar_clone_property(prop, prop->length); 114 if (!new_prop) 115 return NULL; 116 117 /* Convert the property to cpu endian-ness */ 118 p = new_prop->value; 119 *p = be32_to_cpu(*p); 120 121 num_lmbs = *p++; 122 lmbs = (struct of_drconf_cell *)p; 123 124 for (i = 0; i < num_lmbs; i++) { 125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 127 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 128 } 129 130 return new_prop; 131 } 132 133 static void dlpar_update_drconf_property(struct device_node *dn, 134 struct property *prop) 135 { 136 struct of_drconf_cell *lmbs; 137 u32 num_lmbs, *p; 138 int i; 139 140 /* Convert the property back to BE */ 141 p = prop->value; 142 num_lmbs = *p; 143 *p = cpu_to_be32(*p); 144 p++; 145 146 lmbs = (struct of_drconf_cell *)p; 147 for (i = 0; i < num_lmbs; i++) { 148 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 149 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 150 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 151 } 152 153 rtas_hp_event = true; 154 of_update_property(dn, prop); 155 rtas_hp_event = false; 156 } 157 158 static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb) 159 { 160 struct device_node *dn; 161 struct property *prop; 162 struct of_drconf_cell *lmbs; 163 u32 *p, num_lmbs; 164 int i; 165 166 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 167 if (!dn) 168 return -ENODEV; 169 170 prop = dlpar_clone_drconf_property(dn); 171 if (!prop) { 172 of_node_put(dn); 173 return -ENODEV; 174 } 175 176 p = prop->value; 177 num_lmbs = *p++; 178 lmbs = (struct of_drconf_cell *)p; 179 180 for (i = 0; i < num_lmbs; i++) { 181 if (lmbs[i].drc_index == lmb->drc_index) { 182 lmbs[i].flags = lmb->flags; 183 lmbs[i].aa_index = lmb->aa_index; 184 185 dlpar_update_drconf_property(dn, prop); 186 break; 187 } 188 } 189 190 of_node_put(dn); 191 return 0; 192 } 193 194 static u32 find_aa_index(struct device_node *dr_node, 195 struct property *ala_prop, const u32 *lmb_assoc) 196 { 197 u32 *assoc_arrays; 198 u32 aa_index; 199 int aa_arrays, aa_array_entries, aa_array_sz; 200 int i, index; 201 202 /* 203 * The ibm,associativity-lookup-arrays property is defined to be 204 * a 32-bit value specifying the number of associativity arrays 205 * followed by a 32-bitvalue specifying the number of entries per 206 * array, followed by the associativity arrays. 207 */ 208 assoc_arrays = ala_prop->value; 209 210 aa_arrays = be32_to_cpu(assoc_arrays[0]); 211 aa_array_entries = be32_to_cpu(assoc_arrays[1]); 212 aa_array_sz = aa_array_entries * sizeof(u32); 213 214 aa_index = -1; 215 for (i = 0; i < aa_arrays; i++) { 216 index = (i * aa_array_entries) + 2; 217 218 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz)) 219 continue; 220 221 aa_index = i; 222 break; 223 } 224 225 if (aa_index == -1) { 226 struct property *new_prop; 227 u32 new_prop_size; 228 229 new_prop_size = ala_prop->length + aa_array_sz; 230 new_prop = dlpar_clone_property(ala_prop, new_prop_size); 231 if (!new_prop) 232 return -1; 233 234 assoc_arrays = new_prop->value; 235 236 /* increment the number of entries in the lookup array */ 237 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); 238 239 /* copy the new associativity into the lookup array */ 240 index = aa_arrays * aa_array_entries + 2; 241 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); 242 243 of_update_property(dr_node, new_prop); 244 245 /* 246 * The associativity lookup array index for this lmb is 247 * number of entries - 1 since we added its associativity 248 * to the end of the lookup array. 249 */ 250 aa_index = be32_to_cpu(assoc_arrays[0]) - 1; 251 } 252 253 return aa_index; 254 } 255 256 static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb) 257 { 258 struct device_node *parent, *lmb_node, *dr_node; 259 struct property *ala_prop; 260 const u32 *lmb_assoc; 261 u32 aa_index; 262 263 parent = of_find_node_by_path("/"); 264 if (!parent) 265 return -ENODEV; 266 267 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index), 268 parent); 269 of_node_put(parent); 270 if (!lmb_node) 271 return -EINVAL; 272 273 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL); 274 if (!lmb_assoc) { 275 dlpar_free_cc_nodes(lmb_node); 276 return -ENODEV; 277 } 278 279 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 280 if (!dr_node) { 281 dlpar_free_cc_nodes(lmb_node); 282 return -ENODEV; 283 } 284 285 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays", 286 NULL); 287 if (!ala_prop) { 288 of_node_put(dr_node); 289 dlpar_free_cc_nodes(lmb_node); 290 return -ENODEV; 291 } 292 293 aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc); 294 295 dlpar_free_cc_nodes(lmb_node); 296 return aa_index; 297 } 298 299 static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb) 300 { 301 int aa_index; 302 303 lmb->flags |= DRCONF_MEM_ASSIGNED; 304 305 aa_index = lookup_lmb_associativity_index(lmb); 306 if (aa_index < 0) { 307 pr_err("Couldn't find associativity index for drc index %x\n", 308 lmb->drc_index); 309 return aa_index; 310 } 311 312 lmb->aa_index = aa_index; 313 return dlpar_update_device_tree_lmb(lmb); 314 } 315 316 static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) 317 { 318 lmb->flags &= ~DRCONF_MEM_ASSIGNED; 319 lmb->aa_index = 0xffffffff; 320 return dlpar_update_device_tree_lmb(lmb); 321 } 322 323 #ifdef CONFIG_MEMORY_HOTREMOVE 324 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 325 { 326 unsigned long block_sz, start_pfn; 327 int sections_per_block; 328 int i, nid; 329 330 start_pfn = base >> PAGE_SHIFT; 331 332 lock_device_hotplug(); 333 334 if (!pfn_valid(start_pfn)) 335 goto out; 336 337 block_sz = pseries_memory_block_size(); 338 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 339 nid = memory_add_physaddr_to_nid(base); 340 341 for (i = 0; i < sections_per_block; i++) { 342 remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); 343 base += MIN_MEMORY_BLOCK_SIZE; 344 } 345 346 out: 347 /* Update memory regions for memory remove */ 348 memblock_remove(base, memblock_size); 349 unlock_device_hotplug(); 350 return 0; 351 } 352 353 static int pseries_remove_mem_node(struct device_node *np) 354 { 355 const char *type; 356 const __be32 *regs; 357 unsigned long base; 358 unsigned int lmb_size; 359 int ret = -EINVAL; 360 361 /* 362 * Check to see if we are actually removing memory 363 */ 364 type = of_get_property(np, "device_type", NULL); 365 if (type == NULL || strcmp(type, "memory") != 0) 366 return 0; 367 368 /* 369 * Find the base address and size of the memblock 370 */ 371 regs = of_get_property(np, "reg", NULL); 372 if (!regs) 373 return ret; 374 375 base = be64_to_cpu(*(unsigned long *)regs); 376 lmb_size = be32_to_cpu(regs[3]); 377 378 pseries_remove_memblock(base, lmb_size); 379 return 0; 380 } 381 382 static bool lmb_is_removable(struct of_drconf_cell *lmb) 383 { 384 int i, scns_per_block; 385 int rc = 1; 386 unsigned long pfn, block_sz; 387 u64 phys_addr; 388 389 if (!(lmb->flags & DRCONF_MEM_ASSIGNED)) 390 return false; 391 392 block_sz = memory_block_size_bytes(); 393 scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 394 phys_addr = lmb->base_addr; 395 396 for (i = 0; i < scns_per_block; i++) { 397 pfn = PFN_DOWN(phys_addr); 398 if (!pfn_present(pfn)) 399 continue; 400 401 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 402 phys_addr += MIN_MEMORY_BLOCK_SIZE; 403 } 404 405 return rc ? true : false; 406 } 407 408 static int dlpar_add_lmb(struct of_drconf_cell *); 409 410 static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) 411 { 412 unsigned long section_nr; 413 struct mem_section *mem_sect; 414 struct memory_block *mem_block; 415 416 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); 417 mem_sect = __nr_to_section(section_nr); 418 419 mem_block = find_memory_block(mem_sect); 420 return mem_block; 421 } 422 423 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 424 { 425 struct memory_block *mem_block; 426 unsigned long block_sz; 427 int nid, rc; 428 429 if (!lmb_is_removable(lmb)) 430 return -EINVAL; 431 432 mem_block = lmb_to_memblock(lmb); 433 if (!mem_block) 434 return -EINVAL; 435 436 rc = device_offline(&mem_block->dev); 437 put_device(&mem_block->dev); 438 if (rc) 439 return rc; 440 441 block_sz = pseries_memory_block_size(); 442 nid = memory_add_physaddr_to_nid(lmb->base_addr); 443 444 remove_memory(nid, lmb->base_addr, block_sz); 445 446 /* Update memory regions for memory remove */ 447 memblock_remove(lmb->base_addr, block_sz); 448 449 dlpar_remove_device_tree_lmb(lmb); 450 return 0; 451 } 452 453 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, 454 struct property *prop) 455 { 456 struct of_drconf_cell *lmbs; 457 int lmbs_removed = 0; 458 int lmbs_available = 0; 459 u32 num_lmbs, *p; 460 int i, rc; 461 462 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove); 463 464 if (lmbs_to_remove == 0) 465 return -EINVAL; 466 467 p = prop->value; 468 num_lmbs = *p++; 469 lmbs = (struct of_drconf_cell *)p; 470 471 /* Validate that there are enough LMBs to satisfy the request */ 472 for (i = 0; i < num_lmbs; i++) { 473 if (lmb_is_removable(&lmbs[i])) 474 lmbs_available++; 475 } 476 477 if (lmbs_available < lmbs_to_remove) { 478 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n", 479 lmbs_available, lmbs_to_remove); 480 return -EINVAL; 481 } 482 483 for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) { 484 rc = dlpar_remove_lmb(&lmbs[i]); 485 if (rc) 486 continue; 487 488 lmbs_removed++; 489 490 /* Mark this lmb so we can add it later if all of the 491 * requested LMBs cannot be removed. 492 */ 493 lmbs[i].reserved = 1; 494 } 495 496 if (lmbs_removed != lmbs_to_remove) { 497 pr_err("Memory hot-remove failed, adding LMB's back\n"); 498 499 for (i = 0; i < num_lmbs; i++) { 500 if (!lmbs[i].reserved) 501 continue; 502 503 rc = dlpar_add_lmb(&lmbs[i]); 504 if (rc) 505 pr_err("Failed to add LMB back, drc index %x\n", 506 lmbs[i].drc_index); 507 508 lmbs[i].reserved = 0; 509 } 510 511 rc = -EINVAL; 512 } else { 513 for (i = 0; i < num_lmbs; i++) { 514 if (!lmbs[i].reserved) 515 continue; 516 517 dlpar_release_drc(lmbs[i].drc_index); 518 pr_info("Memory at %llx was hot-removed\n", 519 lmbs[i].base_addr); 520 521 lmbs[i].reserved = 0; 522 } 523 rc = 0; 524 } 525 526 return rc; 527 } 528 529 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) 530 { 531 struct of_drconf_cell *lmbs; 532 u32 num_lmbs, *p; 533 int lmb_found; 534 int i, rc; 535 536 pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index); 537 538 p = prop->value; 539 num_lmbs = *p++; 540 lmbs = (struct of_drconf_cell *)p; 541 542 lmb_found = 0; 543 for (i = 0; i < num_lmbs; i++) { 544 if (lmbs[i].drc_index == drc_index) { 545 lmb_found = 1; 546 rc = dlpar_remove_lmb(&lmbs[i]); 547 if (!rc) 548 dlpar_release_drc(lmbs[i].drc_index); 549 550 break; 551 } 552 } 553 554 if (!lmb_found) 555 rc = -EINVAL; 556 557 if (rc) 558 pr_info("Failed to hot-remove memory at %llx\n", 559 lmbs[i].base_addr); 560 else 561 pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr); 562 563 return rc; 564 } 565 566 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) 567 { 568 struct of_drconf_cell *lmbs; 569 u32 num_lmbs, *p; 570 int lmb_found; 571 int i, rc; 572 573 pr_info("Attempting to update LMB, drc index %x\n", drc_index); 574 575 p = prop->value; 576 num_lmbs = *p++; 577 lmbs = (struct of_drconf_cell *)p; 578 579 lmb_found = 0; 580 for (i = 0; i < num_lmbs; i++) { 581 if (lmbs[i].drc_index == drc_index) { 582 lmb_found = 1; 583 rc = dlpar_remove_lmb(&lmbs[i]); 584 if (!rc) { 585 rc = dlpar_add_lmb(&lmbs[i]); 586 if (rc) 587 dlpar_release_drc(lmbs[i].drc_index); 588 } 589 break; 590 } 591 } 592 593 if (!lmb_found) 594 rc = -EINVAL; 595 596 if (rc) 597 pr_info("Failed to update memory at %llx\n", 598 lmbs[i].base_addr); 599 else 600 pr_info("Memory at %llx was updated\n", lmbs[i].base_addr); 601 602 return rc; 603 } 604 #else 605 static inline int pseries_remove_memblock(unsigned long base, 606 unsigned int memblock_size) 607 { 608 return -EOPNOTSUPP; 609 } 610 static inline int pseries_remove_mem_node(struct device_node *np) 611 { 612 return 0; 613 } 614 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog) 615 { 616 return -EOPNOTSUPP; 617 } 618 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 619 { 620 return -EOPNOTSUPP; 621 } 622 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, 623 struct property *prop) 624 { 625 return -EOPNOTSUPP; 626 } 627 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) 628 { 629 return -EOPNOTSUPP; 630 } 631 632 #endif /* CONFIG_MEMORY_HOTREMOVE */ 633 634 static int dlpar_add_lmb(struct of_drconf_cell *lmb) 635 { 636 unsigned long block_sz; 637 int nid, rc; 638 639 if (lmb->flags & DRCONF_MEM_ASSIGNED) 640 return -EINVAL; 641 642 rc = dlpar_add_device_tree_lmb(lmb); 643 if (rc) { 644 pr_err("Couldn't update device tree for drc index %x\n", 645 lmb->drc_index); 646 dlpar_release_drc(lmb->drc_index); 647 return rc; 648 } 649 650 block_sz = memory_block_size_bytes(); 651 652 /* Find the node id for this address */ 653 nid = memory_add_physaddr_to_nid(lmb->base_addr); 654 655 /* Add the memory */ 656 rc = add_memory(nid, lmb->base_addr, block_sz); 657 if (rc) 658 dlpar_remove_device_tree_lmb(lmb); 659 else 660 lmb->flags |= DRCONF_MEM_ASSIGNED; 661 662 return rc; 663 } 664 665 static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) 666 { 667 struct of_drconf_cell *lmbs; 668 u32 num_lmbs, *p; 669 int lmbs_available = 0; 670 int lmbs_added = 0; 671 int i, rc; 672 673 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add); 674 675 if (lmbs_to_add == 0) 676 return -EINVAL; 677 678 p = prop->value; 679 num_lmbs = *p++; 680 lmbs = (struct of_drconf_cell *)p; 681 682 /* Validate that there are enough LMBs to satisfy the request */ 683 for (i = 0; i < num_lmbs; i++) { 684 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED)) 685 lmbs_available++; 686 } 687 688 if (lmbs_available < lmbs_to_add) 689 return -EINVAL; 690 691 for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) { 692 rc = dlpar_acquire_drc(lmbs[i].drc_index); 693 if (rc) 694 continue; 695 696 rc = dlpar_add_lmb(&lmbs[i]); 697 if (rc) { 698 dlpar_release_drc(lmbs[i].drc_index); 699 continue; 700 } 701 702 lmbs_added++; 703 704 /* Mark this lmb so we can remove it later if all of the 705 * requested LMBs cannot be added. 706 */ 707 lmbs[i].reserved = 1; 708 } 709 710 if (lmbs_added != lmbs_to_add) { 711 pr_err("Memory hot-add failed, removing any added LMBs\n"); 712 713 for (i = 0; i < num_lmbs; i++) { 714 if (!lmbs[i].reserved) 715 continue; 716 717 rc = dlpar_remove_lmb(&lmbs[i]); 718 if (rc) 719 pr_err("Failed to remove LMB, drc index %x\n", 720 be32_to_cpu(lmbs[i].drc_index)); 721 else 722 dlpar_release_drc(lmbs[i].drc_index); 723 } 724 rc = -EINVAL; 725 } else { 726 for (i = 0; i < num_lmbs; i++) { 727 if (!lmbs[i].reserved) 728 continue; 729 730 pr_info("Memory at %llx (drc index %x) was hot-added\n", 731 lmbs[i].base_addr, lmbs[i].drc_index); 732 lmbs[i].reserved = 0; 733 } 734 } 735 736 return rc; 737 } 738 739 static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop) 740 { 741 struct of_drconf_cell *lmbs; 742 u32 num_lmbs, *p; 743 int i, lmb_found; 744 int rc; 745 746 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index); 747 748 p = prop->value; 749 num_lmbs = *p++; 750 lmbs = (struct of_drconf_cell *)p; 751 752 lmb_found = 0; 753 for (i = 0; i < num_lmbs; i++) { 754 if (lmbs[i].drc_index == drc_index) { 755 lmb_found = 1; 756 rc = dlpar_acquire_drc(lmbs[i].drc_index); 757 if (!rc) { 758 rc = dlpar_add_lmb(&lmbs[i]); 759 if (rc) 760 dlpar_release_drc(lmbs[i].drc_index); 761 } 762 763 break; 764 } 765 } 766 767 if (!lmb_found) 768 rc = -EINVAL; 769 770 if (rc) 771 pr_info("Failed to hot-add memory, drc index %x\n", drc_index); 772 else 773 pr_info("Memory at %llx (drc index %x) was hot-added\n", 774 lmbs[i].base_addr, drc_index); 775 776 return rc; 777 } 778 779 int dlpar_memory(struct pseries_hp_errorlog *hp_elog) 780 { 781 struct device_node *dn; 782 struct property *prop; 783 u32 count, drc_index; 784 int rc; 785 786 count = hp_elog->_drc_u.drc_count; 787 drc_index = hp_elog->_drc_u.drc_index; 788 789 lock_device_hotplug(); 790 791 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 792 if (!dn) { 793 rc = -EINVAL; 794 goto dlpar_memory_out; 795 } 796 797 prop = dlpar_clone_drconf_property(dn); 798 if (!prop) { 799 rc = -EINVAL; 800 goto dlpar_memory_out; 801 } 802 803 switch (hp_elog->action) { 804 case PSERIES_HP_ELOG_ACTION_ADD: 805 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 806 rc = dlpar_memory_add_by_count(count, prop); 807 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 808 rc = dlpar_memory_add_by_index(drc_index, prop); 809 else 810 rc = -EINVAL; 811 break; 812 case PSERIES_HP_ELOG_ACTION_REMOVE: 813 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 814 rc = dlpar_memory_remove_by_count(count, prop); 815 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 816 rc = dlpar_memory_remove_by_index(drc_index, prop); 817 else 818 rc = -EINVAL; 819 break; 820 case PSERIES_HP_ELOG_ACTION_READD: 821 rc = dlpar_memory_readd_by_index(drc_index, prop); 822 break; 823 default: 824 pr_err("Invalid action (%d) specified\n", hp_elog->action); 825 rc = -EINVAL; 826 break; 827 } 828 829 dlpar_free_property(prop); 830 831 dlpar_memory_out: 832 of_node_put(dn); 833 unlock_device_hotplug(); 834 return rc; 835 } 836 837 static int pseries_add_mem_node(struct device_node *np) 838 { 839 const char *type; 840 const __be32 *regs; 841 unsigned long base; 842 unsigned int lmb_size; 843 int ret = -EINVAL; 844 845 /* 846 * Check to see if we are actually adding memory 847 */ 848 type = of_get_property(np, "device_type", NULL); 849 if (type == NULL || strcmp(type, "memory") != 0) 850 return 0; 851 852 /* 853 * Find the base and size of the memblock 854 */ 855 regs = of_get_property(np, "reg", NULL); 856 if (!regs) 857 return ret; 858 859 base = be64_to_cpu(*(unsigned long *)regs); 860 lmb_size = be32_to_cpu(regs[3]); 861 862 /* 863 * Update memory region to represent the memory add 864 */ 865 ret = memblock_add(base, lmb_size); 866 return (ret < 0) ? -EINVAL : 0; 867 } 868 869 static int pseries_update_drconf_memory(struct of_reconfig_data *pr) 870 { 871 struct of_drconf_cell *new_drmem, *old_drmem; 872 unsigned long memblock_size; 873 u32 entries; 874 __be32 *p; 875 int i, rc = -EINVAL; 876 877 if (rtas_hp_event) 878 return 0; 879 880 memblock_size = pseries_memory_block_size(); 881 if (!memblock_size) 882 return -EINVAL; 883 884 p = (__be32 *) pr->old_prop->value; 885 if (!p) 886 return -EINVAL; 887 888 /* The first int of the property is the number of lmb's described 889 * by the property. This is followed by an array of of_drconf_cell 890 * entries. Get the number of entries and skip to the array of 891 * of_drconf_cell's. 892 */ 893 entries = be32_to_cpu(*p++); 894 old_drmem = (struct of_drconf_cell *)p; 895 896 p = (__be32 *)pr->prop->value; 897 p++; 898 new_drmem = (struct of_drconf_cell *)p; 899 900 for (i = 0; i < entries; i++) { 901 if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && 902 (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { 903 rc = pseries_remove_memblock( 904 be64_to_cpu(old_drmem[i].base_addr), 905 memblock_size); 906 break; 907 } else if ((!(be32_to_cpu(old_drmem[i].flags) & 908 DRCONF_MEM_ASSIGNED)) && 909 (be32_to_cpu(new_drmem[i].flags) & 910 DRCONF_MEM_ASSIGNED)) { 911 rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), 912 memblock_size); 913 rc = (rc < 0) ? -EINVAL : 0; 914 break; 915 } 916 } 917 return rc; 918 } 919 920 static int pseries_memory_notifier(struct notifier_block *nb, 921 unsigned long action, void *data) 922 { 923 struct of_reconfig_data *rd = data; 924 int err = 0; 925 926 switch (action) { 927 case OF_RECONFIG_ATTACH_NODE: 928 err = pseries_add_mem_node(rd->dn); 929 break; 930 case OF_RECONFIG_DETACH_NODE: 931 err = pseries_remove_mem_node(rd->dn); 932 break; 933 case OF_RECONFIG_UPDATE_PROPERTY: 934 if (!strcmp(rd->prop->name, "ibm,dynamic-memory")) 935 err = pseries_update_drconf_memory(rd); 936 break; 937 } 938 return notifier_from_errno(err); 939 } 940 941 static struct notifier_block pseries_mem_nb = { 942 .notifier_call = pseries_memory_notifier, 943 }; 944 945 static int __init pseries_memory_hotplug_init(void) 946 { 947 if (firmware_has_feature(FW_FEATURE_LPAR)) 948 of_reconfig_notifier_register(&pseries_mem_nb); 949 950 return 0; 951 } 952 machine_device_initcall(pseries, pseries_memory_hotplug_init); 953