1 /* 2 * pseries Memory Hotplug infrastructure. 3 * 4 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt 13 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/memblock.h> 17 #include <linux/memory.h> 18 #include <linux/memory_hotplug.h> 19 #include <linux/slab.h> 20 21 #include <asm/firmware.h> 22 #include <asm/machdep.h> 23 #include <asm/prom.h> 24 #include <asm/sparsemem.h> 25 #include "pseries.h" 26 27 static bool rtas_hp_event; 28 29 unsigned long pseries_memory_block_size(void) 30 { 31 struct device_node *np; 32 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; 33 struct resource r; 34 35 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 36 if (np) { 37 const __be64 *size; 38 39 size = of_get_property(np, "ibm,lmb-size", NULL); 40 if (size) 41 memblock_size = be64_to_cpup(size); 42 of_node_put(np); 43 } else if (machine_is(pseries)) { 44 /* This fallback really only applies to pseries */ 45 unsigned int memzero_size = 0; 46 47 np = of_find_node_by_path("/memory@0"); 48 if (np) { 49 if (!of_address_to_resource(np, 0, &r)) 50 memzero_size = resource_size(&r); 51 of_node_put(np); 52 } 53 54 if (memzero_size) { 55 /* We now know the size of memory@0, use this to find 56 * the first memoryblock and get its size. 57 */ 58 char buf[64]; 59 60 sprintf(buf, "/memory@%x", memzero_size); 61 np = of_find_node_by_path(buf); 62 if (np) { 63 if (!of_address_to_resource(np, 0, &r)) 64 memblock_size = resource_size(&r); 65 of_node_put(np); 66 } 67 } 68 } 69 return memblock_size; 70 } 71 72 static void dlpar_free_property(struct property *prop) 73 { 74 kfree(prop->name); 75 kfree(prop->value); 76 kfree(prop); 77 } 78 79 static struct property *dlpar_clone_property(struct property *prop, 80 u32 prop_size) 81 { 82 struct property *new_prop; 83 84 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); 85 if (!new_prop) 86 return NULL; 87 88 new_prop->name = kstrdup(prop->name, GFP_KERNEL); 89 new_prop->value = kzalloc(prop_size, GFP_KERNEL); 90 if (!new_prop->name || !new_prop->value) { 91 dlpar_free_property(new_prop); 92 return NULL; 93 } 94 95 memcpy(new_prop->value, prop->value, prop->length); 96 new_prop->length = prop_size; 97 98 of_property_set_flag(new_prop, OF_DYNAMIC); 99 return new_prop; 100 } 101 102 static struct property *dlpar_clone_drconf_property(struct device_node *dn) 103 { 104 struct property *prop, *new_prop; 105 struct of_drconf_cell *lmbs; 106 u32 num_lmbs, *p; 107 int i; 108 109 prop = of_find_property(dn, "ibm,dynamic-memory", NULL); 110 if (!prop) 111 return NULL; 112 113 new_prop = dlpar_clone_property(prop, prop->length); 114 if (!new_prop) 115 return NULL; 116 117 /* Convert the property to cpu endian-ness */ 118 p = new_prop->value; 119 *p = be32_to_cpu(*p); 120 121 num_lmbs = *p++; 122 lmbs = (struct of_drconf_cell *)p; 123 124 for (i = 0; i < num_lmbs; i++) { 125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 127 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 128 } 129 130 return new_prop; 131 } 132 133 static void dlpar_update_drconf_property(struct device_node *dn, 134 struct property *prop) 135 { 136 struct of_drconf_cell *lmbs; 137 u32 num_lmbs, *p; 138 int i; 139 140 /* Convert the property back to BE */ 141 p = prop->value; 142 num_lmbs = *p; 143 *p = cpu_to_be32(*p); 144 p++; 145 146 lmbs = (struct of_drconf_cell *)p; 147 for (i = 0; i < num_lmbs; i++) { 148 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 149 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 150 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 151 } 152 153 rtas_hp_event = true; 154 of_update_property(dn, prop); 155 rtas_hp_event = false; 156 } 157 158 static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb) 159 { 160 struct device_node *dn; 161 struct property *prop; 162 struct of_drconf_cell *lmbs; 163 u32 *p, num_lmbs; 164 int i; 165 166 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 167 if (!dn) 168 return -ENODEV; 169 170 prop = dlpar_clone_drconf_property(dn); 171 if (!prop) { 172 of_node_put(dn); 173 return -ENODEV; 174 } 175 176 p = prop->value; 177 num_lmbs = *p++; 178 lmbs = (struct of_drconf_cell *)p; 179 180 for (i = 0; i < num_lmbs; i++) { 181 if (lmbs[i].drc_index == lmb->drc_index) { 182 lmbs[i].flags = lmb->flags; 183 lmbs[i].aa_index = lmb->aa_index; 184 185 dlpar_update_drconf_property(dn, prop); 186 break; 187 } 188 } 189 190 of_node_put(dn); 191 return 0; 192 } 193 194 static u32 find_aa_index(struct device_node *dr_node, 195 struct property *ala_prop, const u32 *lmb_assoc) 196 { 197 u32 *assoc_arrays; 198 u32 aa_index; 199 int aa_arrays, aa_array_entries, aa_array_sz; 200 int i, index; 201 202 /* 203 * The ibm,associativity-lookup-arrays property is defined to be 204 * a 32-bit value specifying the number of associativity arrays 205 * followed by a 32-bitvalue specifying the number of entries per 206 * array, followed by the associativity arrays. 207 */ 208 assoc_arrays = ala_prop->value; 209 210 aa_arrays = be32_to_cpu(assoc_arrays[0]); 211 aa_array_entries = be32_to_cpu(assoc_arrays[1]); 212 aa_array_sz = aa_array_entries * sizeof(u32); 213 214 aa_index = -1; 215 for (i = 0; i < aa_arrays; i++) { 216 index = (i * aa_array_entries) + 2; 217 218 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz)) 219 continue; 220 221 aa_index = i; 222 break; 223 } 224 225 if (aa_index == -1) { 226 struct property *new_prop; 227 u32 new_prop_size; 228 229 new_prop_size = ala_prop->length + aa_array_sz; 230 new_prop = dlpar_clone_property(ala_prop, new_prop_size); 231 if (!new_prop) 232 return -1; 233 234 assoc_arrays = new_prop->value; 235 236 /* increment the number of entries in the lookup array */ 237 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); 238 239 /* copy the new associativity into the lookup array */ 240 index = aa_arrays * aa_array_entries + 2; 241 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); 242 243 of_update_property(dr_node, new_prop); 244 245 /* 246 * The associativity lookup array index for this lmb is 247 * number of entries - 1 since we added its associativity 248 * to the end of the lookup array. 249 */ 250 aa_index = be32_to_cpu(assoc_arrays[0]) - 1; 251 } 252 253 return aa_index; 254 } 255 256 static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb) 257 { 258 struct device_node *parent, *lmb_node, *dr_node; 259 struct property *ala_prop; 260 const u32 *lmb_assoc; 261 u32 aa_index; 262 263 parent = of_find_node_by_path("/"); 264 if (!parent) 265 return -ENODEV; 266 267 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index), 268 parent); 269 of_node_put(parent); 270 if (!lmb_node) 271 return -EINVAL; 272 273 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL); 274 if (!lmb_assoc) { 275 dlpar_free_cc_nodes(lmb_node); 276 return -ENODEV; 277 } 278 279 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 280 if (!dr_node) { 281 dlpar_free_cc_nodes(lmb_node); 282 return -ENODEV; 283 } 284 285 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays", 286 NULL); 287 if (!ala_prop) { 288 of_node_put(dr_node); 289 dlpar_free_cc_nodes(lmb_node); 290 return -ENODEV; 291 } 292 293 aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc); 294 295 dlpar_free_cc_nodes(lmb_node); 296 return aa_index; 297 } 298 299 static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb) 300 { 301 int aa_index; 302 303 lmb->flags |= DRCONF_MEM_ASSIGNED; 304 305 aa_index = lookup_lmb_associativity_index(lmb); 306 if (aa_index < 0) { 307 pr_err("Couldn't find associativity index for drc index %x\n", 308 lmb->drc_index); 309 return aa_index; 310 } 311 312 lmb->aa_index = aa_index; 313 return dlpar_update_device_tree_lmb(lmb); 314 } 315 316 static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) 317 { 318 lmb->flags &= ~DRCONF_MEM_ASSIGNED; 319 lmb->aa_index = 0xffffffff; 320 return dlpar_update_device_tree_lmb(lmb); 321 } 322 323 #ifdef CONFIG_MEMORY_HOTREMOVE 324 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 325 { 326 unsigned long block_sz, start_pfn; 327 int sections_per_block; 328 int i, nid; 329 330 start_pfn = base >> PAGE_SHIFT; 331 332 lock_device_hotplug(); 333 334 if (!pfn_valid(start_pfn)) 335 goto out; 336 337 block_sz = pseries_memory_block_size(); 338 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 339 nid = memory_add_physaddr_to_nid(base); 340 341 for (i = 0; i < sections_per_block; i++) { 342 remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); 343 base += MIN_MEMORY_BLOCK_SIZE; 344 } 345 346 out: 347 /* Update memory regions for memory remove */ 348 memblock_remove(base, memblock_size); 349 unlock_device_hotplug(); 350 return 0; 351 } 352 353 static int pseries_remove_mem_node(struct device_node *np) 354 { 355 const char *type; 356 const __be32 *regs; 357 unsigned long base; 358 unsigned int lmb_size; 359 int ret = -EINVAL; 360 361 /* 362 * Check to see if we are actually removing memory 363 */ 364 type = of_get_property(np, "device_type", NULL); 365 if (type == NULL || strcmp(type, "memory") != 0) 366 return 0; 367 368 /* 369 * Find the base address and size of the memblock 370 */ 371 regs = of_get_property(np, "reg", NULL); 372 if (!regs) 373 return ret; 374 375 base = be64_to_cpu(*(unsigned long *)regs); 376 lmb_size = be32_to_cpu(regs[3]); 377 378 pseries_remove_memblock(base, lmb_size); 379 return 0; 380 } 381 382 static bool lmb_is_removable(struct of_drconf_cell *lmb) 383 { 384 int i, scns_per_block; 385 int rc = 1; 386 unsigned long pfn, block_sz; 387 u64 phys_addr; 388 389 if (!(lmb->flags & DRCONF_MEM_ASSIGNED)) 390 return false; 391 392 block_sz = memory_block_size_bytes(); 393 scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 394 phys_addr = lmb->base_addr; 395 396 for (i = 0; i < scns_per_block; i++) { 397 pfn = PFN_DOWN(phys_addr); 398 if (!pfn_present(pfn)) 399 continue; 400 401 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 402 phys_addr += MIN_MEMORY_BLOCK_SIZE; 403 } 404 405 return rc ? true : false; 406 } 407 408 static int dlpar_add_lmb(struct of_drconf_cell *); 409 410 static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) 411 { 412 unsigned long section_nr; 413 struct mem_section *mem_sect; 414 struct memory_block *mem_block; 415 416 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); 417 mem_sect = __nr_to_section(section_nr); 418 419 mem_block = find_memory_block(mem_sect); 420 return mem_block; 421 } 422 423 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 424 { 425 struct memory_block *mem_block; 426 unsigned long block_sz; 427 int nid, rc; 428 429 if (!lmb_is_removable(lmb)) 430 return -EINVAL; 431 432 mem_block = lmb_to_memblock(lmb); 433 if (!mem_block) 434 return -EINVAL; 435 436 rc = device_offline(&mem_block->dev); 437 put_device(&mem_block->dev); 438 if (rc) 439 return rc; 440 441 block_sz = pseries_memory_block_size(); 442 nid = memory_add_physaddr_to_nid(lmb->base_addr); 443 444 remove_memory(nid, lmb->base_addr, block_sz); 445 446 /* Update memory regions for memory remove */ 447 memblock_remove(lmb->base_addr, block_sz); 448 449 dlpar_release_drc(lmb->drc_index); 450 dlpar_remove_device_tree_lmb(lmb); 451 452 return 0; 453 } 454 455 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, 456 struct property *prop) 457 { 458 struct of_drconf_cell *lmbs; 459 int lmbs_removed = 0; 460 int lmbs_available = 0; 461 u32 num_lmbs, *p; 462 int i, rc; 463 464 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove); 465 466 if (lmbs_to_remove == 0) 467 return -EINVAL; 468 469 p = prop->value; 470 num_lmbs = *p++; 471 lmbs = (struct of_drconf_cell *)p; 472 473 /* Validate that there are enough LMBs to satisfy the request */ 474 for (i = 0; i < num_lmbs; i++) { 475 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED) 476 lmbs_available++; 477 } 478 479 if (lmbs_available < lmbs_to_remove) 480 return -EINVAL; 481 482 for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) { 483 rc = dlpar_remove_lmb(&lmbs[i]); 484 if (rc) 485 continue; 486 487 lmbs_removed++; 488 489 /* Mark this lmb so we can add it later if all of the 490 * requested LMBs cannot be removed. 491 */ 492 lmbs[i].reserved = 1; 493 } 494 495 if (lmbs_removed != lmbs_to_remove) { 496 pr_err("Memory hot-remove failed, adding LMB's back\n"); 497 498 for (i = 0; i < num_lmbs; i++) { 499 if (!lmbs[i].reserved) 500 continue; 501 502 rc = dlpar_add_lmb(&lmbs[i]); 503 if (rc) 504 pr_err("Failed to add LMB back, drc index %x\n", 505 lmbs[i].drc_index); 506 507 lmbs[i].reserved = 0; 508 } 509 510 rc = -EINVAL; 511 } else { 512 for (i = 0; i < num_lmbs; i++) { 513 if (!lmbs[i].reserved) 514 continue; 515 516 pr_info("Memory at %llx was hot-removed\n", 517 lmbs[i].base_addr); 518 519 lmbs[i].reserved = 0; 520 } 521 rc = 0; 522 } 523 524 return rc; 525 } 526 527 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) 528 { 529 struct of_drconf_cell *lmbs; 530 u32 num_lmbs, *p; 531 int lmb_found; 532 int i, rc; 533 534 pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index); 535 536 p = prop->value; 537 num_lmbs = *p++; 538 lmbs = (struct of_drconf_cell *)p; 539 540 lmb_found = 0; 541 for (i = 0; i < num_lmbs; i++) { 542 if (lmbs[i].drc_index == drc_index) { 543 lmb_found = 1; 544 rc = dlpar_remove_lmb(&lmbs[i]); 545 break; 546 } 547 } 548 549 if (!lmb_found) 550 rc = -EINVAL; 551 552 if (rc) 553 pr_info("Failed to hot-remove memory at %llx\n", 554 lmbs[i].base_addr); 555 else 556 pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr); 557 558 return rc; 559 } 560 561 #else 562 static inline int pseries_remove_memblock(unsigned long base, 563 unsigned int memblock_size) 564 { 565 return -EOPNOTSUPP; 566 } 567 static inline int pseries_remove_mem_node(struct device_node *np) 568 { 569 return 0; 570 } 571 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog) 572 { 573 return -EOPNOTSUPP; 574 } 575 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 576 { 577 return -EOPNOTSUPP; 578 } 579 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, 580 struct property *prop) 581 { 582 return -EOPNOTSUPP; 583 } 584 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) 585 { 586 return -EOPNOTSUPP; 587 } 588 589 #endif /* CONFIG_MEMORY_HOTREMOVE */ 590 591 static int dlpar_add_lmb(struct of_drconf_cell *lmb) 592 { 593 unsigned long block_sz; 594 int nid, rc; 595 596 if (lmb->flags & DRCONF_MEM_ASSIGNED) 597 return -EINVAL; 598 599 rc = dlpar_acquire_drc(lmb->drc_index); 600 if (rc) 601 return rc; 602 603 rc = dlpar_add_device_tree_lmb(lmb); 604 if (rc) { 605 pr_err("Couldn't update device tree for drc index %x\n", 606 lmb->drc_index); 607 dlpar_release_drc(lmb->drc_index); 608 return rc; 609 } 610 611 block_sz = memory_block_size_bytes(); 612 613 /* Find the node id for this address */ 614 nid = memory_add_physaddr_to_nid(lmb->base_addr); 615 616 /* Add the memory */ 617 rc = add_memory(nid, lmb->base_addr, block_sz); 618 if (rc) { 619 dlpar_remove_device_tree_lmb(lmb); 620 dlpar_release_drc(lmb->drc_index); 621 } else { 622 lmb->flags |= DRCONF_MEM_ASSIGNED; 623 } 624 625 return rc; 626 } 627 628 static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) 629 { 630 struct of_drconf_cell *lmbs; 631 u32 num_lmbs, *p; 632 int lmbs_available = 0; 633 int lmbs_added = 0; 634 int i, rc; 635 636 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add); 637 638 if (lmbs_to_add == 0) 639 return -EINVAL; 640 641 p = prop->value; 642 num_lmbs = *p++; 643 lmbs = (struct of_drconf_cell *)p; 644 645 /* Validate that there are enough LMBs to satisfy the request */ 646 for (i = 0; i < num_lmbs; i++) { 647 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED)) 648 lmbs_available++; 649 } 650 651 if (lmbs_available < lmbs_to_add) 652 return -EINVAL; 653 654 for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) { 655 rc = dlpar_add_lmb(&lmbs[i]); 656 if (rc) 657 continue; 658 659 lmbs_added++; 660 661 /* Mark this lmb so we can remove it later if all of the 662 * requested LMBs cannot be added. 663 */ 664 lmbs[i].reserved = 1; 665 } 666 667 if (lmbs_added != lmbs_to_add) { 668 pr_err("Memory hot-add failed, removing any added LMBs\n"); 669 670 for (i = 0; i < num_lmbs; i++) { 671 if (!lmbs[i].reserved) 672 continue; 673 674 rc = dlpar_remove_lmb(&lmbs[i]); 675 if (rc) 676 pr_err("Failed to remove LMB, drc index %x\n", 677 be32_to_cpu(lmbs[i].drc_index)); 678 } 679 rc = -EINVAL; 680 } else { 681 for (i = 0; i < num_lmbs; i++) { 682 if (!lmbs[i].reserved) 683 continue; 684 685 pr_info("Memory at %llx (drc index %x) was hot-added\n", 686 lmbs[i].base_addr, lmbs[i].drc_index); 687 lmbs[i].reserved = 0; 688 } 689 } 690 691 return rc; 692 } 693 694 static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop) 695 { 696 struct of_drconf_cell *lmbs; 697 u32 num_lmbs, *p; 698 int i, lmb_found; 699 int rc; 700 701 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index); 702 703 p = prop->value; 704 num_lmbs = *p++; 705 lmbs = (struct of_drconf_cell *)p; 706 707 lmb_found = 0; 708 for (i = 0; i < num_lmbs; i++) { 709 if (lmbs[i].drc_index == drc_index) { 710 lmb_found = 1; 711 rc = dlpar_add_lmb(&lmbs[i]); 712 break; 713 } 714 } 715 716 if (!lmb_found) 717 rc = -EINVAL; 718 719 if (rc) 720 pr_info("Failed to hot-add memory, drc index %x\n", drc_index); 721 else 722 pr_info("Memory at %llx (drc index %x) was hot-added\n", 723 lmbs[i].base_addr, drc_index); 724 725 return rc; 726 } 727 728 int dlpar_memory(struct pseries_hp_errorlog *hp_elog) 729 { 730 struct device_node *dn; 731 struct property *prop; 732 u32 count, drc_index; 733 int rc; 734 735 count = hp_elog->_drc_u.drc_count; 736 drc_index = hp_elog->_drc_u.drc_index; 737 738 lock_device_hotplug(); 739 740 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 741 if (!dn) { 742 rc = -EINVAL; 743 goto dlpar_memory_out; 744 } 745 746 prop = dlpar_clone_drconf_property(dn); 747 if (!prop) { 748 rc = -EINVAL; 749 goto dlpar_memory_out; 750 } 751 752 switch (hp_elog->action) { 753 case PSERIES_HP_ELOG_ACTION_ADD: 754 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 755 rc = dlpar_memory_add_by_count(count, prop); 756 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 757 rc = dlpar_memory_add_by_index(drc_index, prop); 758 else 759 rc = -EINVAL; 760 break; 761 case PSERIES_HP_ELOG_ACTION_REMOVE: 762 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) 763 rc = dlpar_memory_remove_by_count(count, prop); 764 else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) 765 rc = dlpar_memory_remove_by_index(drc_index, prop); 766 else 767 rc = -EINVAL; 768 break; 769 default: 770 pr_err("Invalid action (%d) specified\n", hp_elog->action); 771 rc = -EINVAL; 772 break; 773 } 774 775 dlpar_free_property(prop); 776 777 dlpar_memory_out: 778 of_node_put(dn); 779 unlock_device_hotplug(); 780 return rc; 781 } 782 783 static int pseries_add_mem_node(struct device_node *np) 784 { 785 const char *type; 786 const __be32 *regs; 787 unsigned long base; 788 unsigned int lmb_size; 789 int ret = -EINVAL; 790 791 /* 792 * Check to see if we are actually adding memory 793 */ 794 type = of_get_property(np, "device_type", NULL); 795 if (type == NULL || strcmp(type, "memory") != 0) 796 return 0; 797 798 /* 799 * Find the base and size of the memblock 800 */ 801 regs = of_get_property(np, "reg", NULL); 802 if (!regs) 803 return ret; 804 805 base = be64_to_cpu(*(unsigned long *)regs); 806 lmb_size = be32_to_cpu(regs[3]); 807 808 /* 809 * Update memory region to represent the memory add 810 */ 811 ret = memblock_add(base, lmb_size); 812 return (ret < 0) ? -EINVAL : 0; 813 } 814 815 static int pseries_update_drconf_memory(struct of_reconfig_data *pr) 816 { 817 struct of_drconf_cell *new_drmem, *old_drmem; 818 unsigned long memblock_size; 819 u32 entries; 820 __be32 *p; 821 int i, rc = -EINVAL; 822 823 if (rtas_hp_event) 824 return 0; 825 826 memblock_size = pseries_memory_block_size(); 827 if (!memblock_size) 828 return -EINVAL; 829 830 p = (__be32 *) pr->old_prop->value; 831 if (!p) 832 return -EINVAL; 833 834 /* The first int of the property is the number of lmb's described 835 * by the property. This is followed by an array of of_drconf_cell 836 * entries. Get the number of entries and skip to the array of 837 * of_drconf_cell's. 838 */ 839 entries = be32_to_cpu(*p++); 840 old_drmem = (struct of_drconf_cell *)p; 841 842 p = (__be32 *)pr->prop->value; 843 p++; 844 new_drmem = (struct of_drconf_cell *)p; 845 846 for (i = 0; i < entries; i++) { 847 if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && 848 (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { 849 rc = pseries_remove_memblock( 850 be64_to_cpu(old_drmem[i].base_addr), 851 memblock_size); 852 break; 853 } else if ((!(be32_to_cpu(old_drmem[i].flags) & 854 DRCONF_MEM_ASSIGNED)) && 855 (be32_to_cpu(new_drmem[i].flags) & 856 DRCONF_MEM_ASSIGNED)) { 857 rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), 858 memblock_size); 859 rc = (rc < 0) ? -EINVAL : 0; 860 break; 861 } 862 } 863 return rc; 864 } 865 866 static int pseries_memory_notifier(struct notifier_block *nb, 867 unsigned long action, void *data) 868 { 869 struct of_reconfig_data *rd = data; 870 int err = 0; 871 872 switch (action) { 873 case OF_RECONFIG_ATTACH_NODE: 874 err = pseries_add_mem_node(rd->dn); 875 break; 876 case OF_RECONFIG_DETACH_NODE: 877 err = pseries_remove_mem_node(rd->dn); 878 break; 879 case OF_RECONFIG_UPDATE_PROPERTY: 880 if (!strcmp(rd->prop->name, "ibm,dynamic-memory")) 881 err = pseries_update_drconf_memory(rd); 882 break; 883 } 884 return notifier_from_errno(err); 885 } 886 887 static struct notifier_block pseries_mem_nb = { 888 .notifier_call = pseries_memory_notifier, 889 }; 890 891 static int __init pseries_memory_hotplug_init(void) 892 { 893 if (firmware_has_feature(FW_FEATURE_LPAR)) 894 of_reconfig_notifier_register(&pseries_mem_nb); 895 896 return 0; 897 } 898 machine_device_initcall(pseries, pseries_memory_hotplug_init); 899