1 /* 2 * pseries Memory Hotplug infrastructure. 3 * 4 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) "pseries-hotplug-mem: " fmt 13 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/memblock.h> 17 #include <linux/memory.h> 18 #include <linux/memory_hotplug.h> 19 #include <linux/slab.h> 20 21 #include <asm/firmware.h> 22 #include <asm/machdep.h> 23 #include <asm/prom.h> 24 #include <asm/sparsemem.h> 25 #include <asm/fadump.h> 26 #include "pseries.h" 27 28 static bool rtas_hp_event; 29 30 unsigned long pseries_memory_block_size(void) 31 { 32 struct device_node *np; 33 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; 34 struct resource r; 35 36 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 37 if (np) { 38 const __be64 *size; 39 40 size = of_get_property(np, "ibm,lmb-size", NULL); 41 if (size) 42 memblock_size = be64_to_cpup(size); 43 of_node_put(np); 44 } else if (machine_is(pseries)) { 45 /* This fallback really only applies to pseries */ 46 unsigned int memzero_size = 0; 47 48 np = of_find_node_by_path("/memory@0"); 49 if (np) { 50 if (!of_address_to_resource(np, 0, &r)) 51 memzero_size = resource_size(&r); 52 of_node_put(np); 53 } 54 55 if (memzero_size) { 56 /* We now know the size of memory@0, use this to find 57 * the first memoryblock and get its size. 58 */ 59 char buf[64]; 60 61 sprintf(buf, "/memory@%x", memzero_size); 62 np = of_find_node_by_path(buf); 63 if (np) { 64 if (!of_address_to_resource(np, 0, &r)) 65 memblock_size = resource_size(&r); 66 of_node_put(np); 67 } 68 } 69 } 70 return memblock_size; 71 } 72 73 static void dlpar_free_property(struct property *prop) 74 { 75 kfree(prop->name); 76 kfree(prop->value); 77 kfree(prop); 78 } 79 80 static struct property *dlpar_clone_property(struct property *prop, 81 u32 prop_size) 82 { 83 struct property *new_prop; 84 85 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); 86 if (!new_prop) 87 return NULL; 88 89 new_prop->name = kstrdup(prop->name, GFP_KERNEL); 90 new_prop->value = kzalloc(prop_size, GFP_KERNEL); 91 if (!new_prop->name || !new_prop->value) { 92 dlpar_free_property(new_prop); 93 return NULL; 94 } 95 96 memcpy(new_prop->value, prop->value, prop->length); 97 new_prop->length = prop_size; 98 99 of_property_set_flag(new_prop, OF_DYNAMIC); 100 return new_prop; 101 } 102 103 static struct property *dlpar_clone_drconf_property(struct device_node *dn) 104 { 105 struct property *prop, *new_prop; 106 struct of_drconf_cell *lmbs; 107 u32 num_lmbs, *p; 108 int i; 109 110 prop = of_find_property(dn, "ibm,dynamic-memory", NULL); 111 if (!prop) 112 return NULL; 113 114 new_prop = dlpar_clone_property(prop, prop->length); 115 if (!new_prop) 116 return NULL; 117 118 /* Convert the property to cpu endian-ness */ 119 p = new_prop->value; 120 *p = be32_to_cpu(*p); 121 122 num_lmbs = *p++; 123 lmbs = (struct of_drconf_cell *)p; 124 125 for (i = 0; i < num_lmbs; i++) { 126 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 127 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 128 lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index); 129 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 130 } 131 132 return new_prop; 133 } 134 135 static void dlpar_update_drconf_property(struct device_node *dn, 136 struct property *prop) 137 { 138 struct of_drconf_cell *lmbs; 139 u32 num_lmbs, *p; 140 int i; 141 142 /* Convert the property back to BE */ 143 p = prop->value; 144 num_lmbs = *p; 145 *p = cpu_to_be32(*p); 146 p++; 147 148 lmbs = (struct of_drconf_cell *)p; 149 for (i = 0; i < num_lmbs; i++) { 150 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 151 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 152 lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index); 153 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 154 } 155 156 rtas_hp_event = true; 157 of_update_property(dn, prop); 158 rtas_hp_event = false; 159 } 160 161 static int dlpar_update_device_tree_lmb(struct of_drconf_cell *lmb) 162 { 163 struct device_node *dn; 164 struct property *prop; 165 struct of_drconf_cell *lmbs; 166 u32 *p, num_lmbs; 167 int i; 168 169 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 170 if (!dn) 171 return -ENODEV; 172 173 prop = dlpar_clone_drconf_property(dn); 174 if (!prop) { 175 of_node_put(dn); 176 return -ENODEV; 177 } 178 179 p = prop->value; 180 num_lmbs = *p++; 181 lmbs = (struct of_drconf_cell *)p; 182 183 for (i = 0; i < num_lmbs; i++) { 184 if (lmbs[i].drc_index == lmb->drc_index) { 185 lmbs[i].flags = lmb->flags; 186 lmbs[i].aa_index = lmb->aa_index; 187 188 dlpar_update_drconf_property(dn, prop); 189 break; 190 } 191 } 192 193 of_node_put(dn); 194 return 0; 195 } 196 197 static u32 find_aa_index(struct device_node *dr_node, 198 struct property *ala_prop, const u32 *lmb_assoc) 199 { 200 u32 *assoc_arrays; 201 u32 aa_index; 202 int aa_arrays, aa_array_entries, aa_array_sz; 203 int i, index; 204 205 /* 206 * The ibm,associativity-lookup-arrays property is defined to be 207 * a 32-bit value specifying the number of associativity arrays 208 * followed by a 32-bitvalue specifying the number of entries per 209 * array, followed by the associativity arrays. 210 */ 211 assoc_arrays = ala_prop->value; 212 213 aa_arrays = be32_to_cpu(assoc_arrays[0]); 214 aa_array_entries = be32_to_cpu(assoc_arrays[1]); 215 aa_array_sz = aa_array_entries * sizeof(u32); 216 217 aa_index = -1; 218 for (i = 0; i < aa_arrays; i++) { 219 index = (i * aa_array_entries) + 2; 220 221 if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz)) 222 continue; 223 224 aa_index = i; 225 break; 226 } 227 228 if (aa_index == -1) { 229 struct property *new_prop; 230 u32 new_prop_size; 231 232 new_prop_size = ala_prop->length + aa_array_sz; 233 new_prop = dlpar_clone_property(ala_prop, new_prop_size); 234 if (!new_prop) 235 return -1; 236 237 assoc_arrays = new_prop->value; 238 239 /* increment the number of entries in the lookup array */ 240 assoc_arrays[0] = cpu_to_be32(aa_arrays + 1); 241 242 /* copy the new associativity into the lookup array */ 243 index = aa_arrays * aa_array_entries + 2; 244 memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz); 245 246 of_update_property(dr_node, new_prop); 247 248 /* 249 * The associativity lookup array index for this lmb is 250 * number of entries - 1 since we added its associativity 251 * to the end of the lookup array. 252 */ 253 aa_index = be32_to_cpu(assoc_arrays[0]) - 1; 254 } 255 256 return aa_index; 257 } 258 259 static u32 lookup_lmb_associativity_index(struct of_drconf_cell *lmb) 260 { 261 struct device_node *parent, *lmb_node, *dr_node; 262 struct property *ala_prop; 263 const u32 *lmb_assoc; 264 u32 aa_index; 265 266 parent = of_find_node_by_path("/"); 267 if (!parent) 268 return -ENODEV; 269 270 lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index), 271 parent); 272 of_node_put(parent); 273 if (!lmb_node) 274 return -EINVAL; 275 276 lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL); 277 if (!lmb_assoc) { 278 dlpar_free_cc_nodes(lmb_node); 279 return -ENODEV; 280 } 281 282 dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 283 if (!dr_node) { 284 dlpar_free_cc_nodes(lmb_node); 285 return -ENODEV; 286 } 287 288 ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays", 289 NULL); 290 if (!ala_prop) { 291 of_node_put(dr_node); 292 dlpar_free_cc_nodes(lmb_node); 293 return -ENODEV; 294 } 295 296 aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc); 297 298 dlpar_free_cc_nodes(lmb_node); 299 return aa_index; 300 } 301 302 static int dlpar_add_device_tree_lmb(struct of_drconf_cell *lmb) 303 { 304 int aa_index; 305 306 lmb->flags |= DRCONF_MEM_ASSIGNED; 307 308 aa_index = lookup_lmb_associativity_index(lmb); 309 if (aa_index < 0) { 310 pr_err("Couldn't find associativity index for drc index %x\n", 311 lmb->drc_index); 312 return aa_index; 313 } 314 315 lmb->aa_index = aa_index; 316 return dlpar_update_device_tree_lmb(lmb); 317 } 318 319 static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) 320 { 321 lmb->flags &= ~DRCONF_MEM_ASSIGNED; 322 lmb->aa_index = 0xffffffff; 323 return dlpar_update_device_tree_lmb(lmb); 324 } 325 326 static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb) 327 { 328 unsigned long section_nr; 329 struct mem_section *mem_sect; 330 struct memory_block *mem_block; 331 332 section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr)); 333 mem_sect = __nr_to_section(section_nr); 334 335 mem_block = find_memory_block(mem_sect); 336 return mem_block; 337 } 338 339 #ifdef CONFIG_MEMORY_HOTREMOVE 340 static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 341 { 342 unsigned long block_sz, start_pfn; 343 int sections_per_block; 344 int i, nid; 345 346 start_pfn = base >> PAGE_SHIFT; 347 348 lock_device_hotplug(); 349 350 if (!pfn_valid(start_pfn)) 351 goto out; 352 353 block_sz = pseries_memory_block_size(); 354 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 355 nid = memory_add_physaddr_to_nid(base); 356 357 for (i = 0; i < sections_per_block; i++) { 358 remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE); 359 base += MIN_MEMORY_BLOCK_SIZE; 360 } 361 362 out: 363 /* Update memory regions for memory remove */ 364 memblock_remove(base, memblock_size); 365 unlock_device_hotplug(); 366 return 0; 367 } 368 369 static int pseries_remove_mem_node(struct device_node *np) 370 { 371 const char *type; 372 const __be32 *regs; 373 unsigned long base; 374 unsigned int lmb_size; 375 int ret = -EINVAL; 376 377 /* 378 * Check to see if we are actually removing memory 379 */ 380 type = of_get_property(np, "device_type", NULL); 381 if (type == NULL || strcmp(type, "memory") != 0) 382 return 0; 383 384 /* 385 * Find the base address and size of the memblock 386 */ 387 regs = of_get_property(np, "reg", NULL); 388 if (!regs) 389 return ret; 390 391 base = be64_to_cpu(*(unsigned long *)regs); 392 lmb_size = be32_to_cpu(regs[3]); 393 394 pseries_remove_memblock(base, lmb_size); 395 return 0; 396 } 397 398 static bool lmb_is_removable(struct of_drconf_cell *lmb) 399 { 400 int i, scns_per_block; 401 int rc = 1; 402 unsigned long pfn, block_sz; 403 u64 phys_addr; 404 405 if (!(lmb->flags & DRCONF_MEM_ASSIGNED)) 406 return false; 407 408 block_sz = memory_block_size_bytes(); 409 scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 410 phys_addr = lmb->base_addr; 411 412 #ifdef CONFIG_FA_DUMP 413 /* Don't hot-remove memory that falls in fadump boot memory area */ 414 if (is_fadump_boot_memory_area(phys_addr, block_sz)) 415 return false; 416 #endif 417 418 for (i = 0; i < scns_per_block; i++) { 419 pfn = PFN_DOWN(phys_addr); 420 if (!pfn_present(pfn)) 421 continue; 422 423 rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 424 phys_addr += MIN_MEMORY_BLOCK_SIZE; 425 } 426 427 return rc ? true : false; 428 } 429 430 static int dlpar_add_lmb(struct of_drconf_cell *); 431 432 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 433 { 434 struct memory_block *mem_block; 435 unsigned long block_sz; 436 int nid, rc; 437 438 if (!lmb_is_removable(lmb)) 439 return -EINVAL; 440 441 mem_block = lmb_to_memblock(lmb); 442 if (!mem_block) 443 return -EINVAL; 444 445 rc = device_offline(&mem_block->dev); 446 put_device(&mem_block->dev); 447 if (rc) 448 return rc; 449 450 block_sz = pseries_memory_block_size(); 451 nid = memory_add_physaddr_to_nid(lmb->base_addr); 452 453 remove_memory(nid, lmb->base_addr, block_sz); 454 455 /* Update memory regions for memory remove */ 456 memblock_remove(lmb->base_addr, block_sz); 457 458 dlpar_remove_device_tree_lmb(lmb); 459 return 0; 460 } 461 462 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, 463 struct property *prop) 464 { 465 struct of_drconf_cell *lmbs; 466 int lmbs_removed = 0; 467 int lmbs_available = 0; 468 u32 num_lmbs, *p; 469 int i, rc; 470 471 pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove); 472 473 if (lmbs_to_remove == 0) 474 return -EINVAL; 475 476 p = prop->value; 477 num_lmbs = *p++; 478 lmbs = (struct of_drconf_cell *)p; 479 480 /* Validate that there are enough LMBs to satisfy the request */ 481 for (i = 0; i < num_lmbs; i++) { 482 if (lmb_is_removable(&lmbs[i])) 483 lmbs_available++; 484 } 485 486 if (lmbs_available < lmbs_to_remove) { 487 pr_info("Not enough LMBs available (%d of %d) to satisfy request\n", 488 lmbs_available, lmbs_to_remove); 489 return -EINVAL; 490 } 491 492 for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) { 493 rc = dlpar_remove_lmb(&lmbs[i]); 494 if (rc) 495 continue; 496 497 lmbs_removed++; 498 499 /* Mark this lmb so we can add it later if all of the 500 * requested LMBs cannot be removed. 501 */ 502 lmbs[i].reserved = 1; 503 } 504 505 if (lmbs_removed != lmbs_to_remove) { 506 pr_err("Memory hot-remove failed, adding LMB's back\n"); 507 508 for (i = 0; i < num_lmbs; i++) { 509 if (!lmbs[i].reserved) 510 continue; 511 512 rc = dlpar_add_lmb(&lmbs[i]); 513 if (rc) 514 pr_err("Failed to add LMB back, drc index %x\n", 515 lmbs[i].drc_index); 516 517 lmbs[i].reserved = 0; 518 } 519 520 rc = -EINVAL; 521 } else { 522 for (i = 0; i < num_lmbs; i++) { 523 if (!lmbs[i].reserved) 524 continue; 525 526 dlpar_release_drc(lmbs[i].drc_index); 527 pr_info("Memory at %llx was hot-removed\n", 528 lmbs[i].base_addr); 529 530 lmbs[i].reserved = 0; 531 } 532 rc = 0; 533 } 534 535 return rc; 536 } 537 538 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) 539 { 540 struct of_drconf_cell *lmbs; 541 u32 num_lmbs, *p; 542 int lmb_found; 543 int i, rc; 544 545 pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index); 546 547 p = prop->value; 548 num_lmbs = *p++; 549 lmbs = (struct of_drconf_cell *)p; 550 551 lmb_found = 0; 552 for (i = 0; i < num_lmbs; i++) { 553 if (lmbs[i].drc_index == drc_index) { 554 lmb_found = 1; 555 rc = dlpar_remove_lmb(&lmbs[i]); 556 if (!rc) 557 dlpar_release_drc(lmbs[i].drc_index); 558 559 break; 560 } 561 } 562 563 if (!lmb_found) 564 rc = -EINVAL; 565 566 if (rc) 567 pr_info("Failed to hot-remove memory at %llx\n", 568 lmbs[i].base_addr); 569 else 570 pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr); 571 572 return rc; 573 } 574 575 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) 576 { 577 struct of_drconf_cell *lmbs; 578 u32 num_lmbs, *p; 579 int lmb_found; 580 int i, rc; 581 582 pr_info("Attempting to update LMB, drc index %x\n", drc_index); 583 584 p = prop->value; 585 num_lmbs = *p++; 586 lmbs = (struct of_drconf_cell *)p; 587 588 lmb_found = 0; 589 for (i = 0; i < num_lmbs; i++) { 590 if (lmbs[i].drc_index == drc_index) { 591 lmb_found = 1; 592 rc = dlpar_remove_lmb(&lmbs[i]); 593 if (!rc) { 594 rc = dlpar_add_lmb(&lmbs[i]); 595 if (rc) 596 dlpar_release_drc(lmbs[i].drc_index); 597 } 598 break; 599 } 600 } 601 602 if (!lmb_found) 603 rc = -EINVAL; 604 605 if (rc) 606 pr_info("Failed to update memory at %llx\n", 607 lmbs[i].base_addr); 608 else 609 pr_info("Memory at %llx was updated\n", lmbs[i].base_addr); 610 611 return rc; 612 } 613 614 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index, 615 struct property *prop) 616 { 617 struct of_drconf_cell *lmbs; 618 u32 num_lmbs, *p; 619 int i, rc, start_lmb_found; 620 int lmbs_available = 0, start_index = 0, end_index; 621 622 pr_info("Attempting to hot-remove %u LMB(s) at %x\n", 623 lmbs_to_remove, drc_index); 624 625 if (lmbs_to_remove == 0) 626 return -EINVAL; 627 628 p = prop->value; 629 num_lmbs = *p++; 630 lmbs = (struct of_drconf_cell *)p; 631 start_lmb_found = 0; 632 633 /* Navigate to drc_index */ 634 while (start_index < num_lmbs) { 635 if (lmbs[start_index].drc_index == drc_index) { 636 start_lmb_found = 1; 637 break; 638 } 639 640 start_index++; 641 } 642 643 if (!start_lmb_found) 644 return -EINVAL; 645 646 end_index = start_index + lmbs_to_remove; 647 648 /* Validate that there are enough LMBs to satisfy the request */ 649 for (i = start_index; i < end_index; i++) { 650 if (lmbs[i].flags & DRCONF_MEM_RESERVED) 651 break; 652 653 lmbs_available++; 654 } 655 656 if (lmbs_available < lmbs_to_remove) 657 return -EINVAL; 658 659 for (i = start_index; i < end_index; i++) { 660 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED)) 661 continue; 662 663 rc = dlpar_remove_lmb(&lmbs[i]); 664 if (rc) 665 break; 666 667 lmbs[i].reserved = 1; 668 } 669 670 if (rc) { 671 pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n"); 672 673 for (i = start_index; i < end_index; i++) { 674 if (!lmbs[i].reserved) 675 continue; 676 677 rc = dlpar_add_lmb(&lmbs[i]); 678 if (rc) 679 pr_err("Failed to add LMB, drc index %x\n", 680 be32_to_cpu(lmbs[i].drc_index)); 681 682 lmbs[i].reserved = 0; 683 } 684 rc = -EINVAL; 685 } else { 686 for (i = start_index; i < end_index; i++) { 687 if (!lmbs[i].reserved) 688 continue; 689 690 dlpar_release_drc(lmbs[i].drc_index); 691 pr_info("Memory at %llx (drc index %x) was hot-removed\n", 692 lmbs[i].base_addr, lmbs[i].drc_index); 693 694 lmbs[i].reserved = 0; 695 } 696 } 697 698 return rc; 699 } 700 701 #else 702 static inline int pseries_remove_memblock(unsigned long base, 703 unsigned int memblock_size) 704 { 705 return -EOPNOTSUPP; 706 } 707 static inline int pseries_remove_mem_node(struct device_node *np) 708 { 709 return 0; 710 } 711 static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog) 712 { 713 return -EOPNOTSUPP; 714 } 715 static int dlpar_remove_lmb(struct of_drconf_cell *lmb) 716 { 717 return -EOPNOTSUPP; 718 } 719 static int dlpar_memory_remove_by_count(u32 lmbs_to_remove, 720 struct property *prop) 721 { 722 return -EOPNOTSUPP; 723 } 724 static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) 725 { 726 return -EOPNOTSUPP; 727 } 728 static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) 729 { 730 return -EOPNOTSUPP; 731 } 732 733 static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index, 734 struct property *prop) 735 { 736 return -EOPNOTSUPP; 737 } 738 #endif /* CONFIG_MEMORY_HOTREMOVE */ 739 740 static int dlpar_online_lmb(struct of_drconf_cell *lmb) 741 { 742 struct memory_block *mem_block; 743 int rc; 744 745 mem_block = lmb_to_memblock(lmb); 746 if (!mem_block) 747 return -EINVAL; 748 749 rc = device_online(&mem_block->dev); 750 put_device(&mem_block->dev); 751 return rc; 752 } 753 754 static int dlpar_add_lmb(struct of_drconf_cell *lmb) 755 { 756 unsigned long block_sz; 757 int nid, rc; 758 759 if (lmb->flags & DRCONF_MEM_ASSIGNED) 760 return -EINVAL; 761 762 rc = dlpar_add_device_tree_lmb(lmb); 763 if (rc) { 764 pr_err("Couldn't update device tree for drc index %x\n", 765 lmb->drc_index); 766 dlpar_release_drc(lmb->drc_index); 767 return rc; 768 } 769 770 block_sz = memory_block_size_bytes(); 771 772 /* Find the node id for this address */ 773 nid = memory_add_physaddr_to_nid(lmb->base_addr); 774 775 /* Add the memory */ 776 rc = add_memory(nid, lmb->base_addr, block_sz); 777 if (rc) { 778 dlpar_remove_device_tree_lmb(lmb); 779 return rc; 780 } 781 782 rc = dlpar_online_lmb(lmb); 783 if (rc) { 784 remove_memory(nid, lmb->base_addr, block_sz); 785 dlpar_remove_device_tree_lmb(lmb); 786 } else { 787 lmb->flags |= DRCONF_MEM_ASSIGNED; 788 } 789 790 return rc; 791 } 792 793 static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop) 794 { 795 struct of_drconf_cell *lmbs; 796 u32 num_lmbs, *p; 797 int lmbs_available = 0; 798 int lmbs_added = 0; 799 int i, rc; 800 801 pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add); 802 803 if (lmbs_to_add == 0) 804 return -EINVAL; 805 806 p = prop->value; 807 num_lmbs = *p++; 808 lmbs = (struct of_drconf_cell *)p; 809 810 /* Validate that there are enough LMBs to satisfy the request */ 811 for (i = 0; i < num_lmbs; i++) { 812 if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED)) 813 lmbs_available++; 814 } 815 816 if (lmbs_available < lmbs_to_add) 817 return -EINVAL; 818 819 for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) { 820 rc = dlpar_acquire_drc(lmbs[i].drc_index); 821 if (rc) 822 continue; 823 824 rc = dlpar_add_lmb(&lmbs[i]); 825 if (rc) { 826 dlpar_release_drc(lmbs[i].drc_index); 827 continue; 828 } 829 830 lmbs_added++; 831 832 /* Mark this lmb so we can remove it later if all of the 833 * requested LMBs cannot be added. 834 */ 835 lmbs[i].reserved = 1; 836 } 837 838 if (lmbs_added != lmbs_to_add) { 839 pr_err("Memory hot-add failed, removing any added LMBs\n"); 840 841 for (i = 0; i < num_lmbs; i++) { 842 if (!lmbs[i].reserved) 843 continue; 844 845 rc = dlpar_remove_lmb(&lmbs[i]); 846 if (rc) 847 pr_err("Failed to remove LMB, drc index %x\n", 848 be32_to_cpu(lmbs[i].drc_index)); 849 else 850 dlpar_release_drc(lmbs[i].drc_index); 851 } 852 rc = -EINVAL; 853 } else { 854 for (i = 0; i < num_lmbs; i++) { 855 if (!lmbs[i].reserved) 856 continue; 857 858 pr_info("Memory at %llx (drc index %x) was hot-added\n", 859 lmbs[i].base_addr, lmbs[i].drc_index); 860 lmbs[i].reserved = 0; 861 } 862 } 863 864 return rc; 865 } 866 867 static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop) 868 { 869 struct of_drconf_cell *lmbs; 870 u32 num_lmbs, *p; 871 int i, lmb_found; 872 int rc; 873 874 pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index); 875 876 p = prop->value; 877 num_lmbs = *p++; 878 lmbs = (struct of_drconf_cell *)p; 879 880 lmb_found = 0; 881 for (i = 0; i < num_lmbs; i++) { 882 if (lmbs[i].drc_index == drc_index) { 883 lmb_found = 1; 884 rc = dlpar_acquire_drc(lmbs[i].drc_index); 885 if (!rc) { 886 rc = dlpar_add_lmb(&lmbs[i]); 887 if (rc) 888 dlpar_release_drc(lmbs[i].drc_index); 889 } 890 891 break; 892 } 893 } 894 895 if (!lmb_found) 896 rc = -EINVAL; 897 898 if (rc) 899 pr_info("Failed to hot-add memory, drc index %x\n", drc_index); 900 else 901 pr_info("Memory at %llx (drc index %x) was hot-added\n", 902 lmbs[i].base_addr, drc_index); 903 904 return rc; 905 } 906 907 static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index, 908 struct property *prop) 909 { 910 struct of_drconf_cell *lmbs; 911 u32 num_lmbs, *p; 912 int i, rc, start_lmb_found; 913 int lmbs_available = 0, start_index = 0, end_index; 914 915 pr_info("Attempting to hot-add %u LMB(s) at index %x\n", 916 lmbs_to_add, drc_index); 917 918 if (lmbs_to_add == 0) 919 return -EINVAL; 920 921 p = prop->value; 922 num_lmbs = *p++; 923 lmbs = (struct of_drconf_cell *)p; 924 start_lmb_found = 0; 925 926 /* Navigate to drc_index */ 927 while (start_index < num_lmbs) { 928 if (lmbs[start_index].drc_index == drc_index) { 929 start_lmb_found = 1; 930 break; 931 } 932 933 start_index++; 934 } 935 936 if (!start_lmb_found) 937 return -EINVAL; 938 939 end_index = start_index + lmbs_to_add; 940 941 /* Validate that the LMBs in this range are not reserved */ 942 for (i = start_index; i < end_index; i++) { 943 if (lmbs[i].flags & DRCONF_MEM_RESERVED) 944 break; 945 946 lmbs_available++; 947 } 948 949 if (lmbs_available < lmbs_to_add) 950 return -EINVAL; 951 952 for (i = start_index; i < end_index; i++) { 953 if (lmbs[i].flags & DRCONF_MEM_ASSIGNED) 954 continue; 955 956 rc = dlpar_acquire_drc(lmbs[i].drc_index); 957 if (rc) 958 break; 959 960 rc = dlpar_add_lmb(&lmbs[i]); 961 if (rc) { 962 dlpar_release_drc(lmbs[i].drc_index); 963 break; 964 } 965 966 lmbs[i].reserved = 1; 967 } 968 969 if (rc) { 970 pr_err("Memory indexed-count-add failed, removing any added LMBs\n"); 971 972 for (i = start_index; i < end_index; i++) { 973 if (!lmbs[i].reserved) 974 continue; 975 976 rc = dlpar_remove_lmb(&lmbs[i]); 977 if (rc) 978 pr_err("Failed to remove LMB, drc index %x\n", 979 be32_to_cpu(lmbs[i].drc_index)); 980 else 981 dlpar_release_drc(lmbs[i].drc_index); 982 } 983 rc = -EINVAL; 984 } else { 985 for (i = start_index; i < end_index; i++) { 986 if (!lmbs[i].reserved) 987 continue; 988 989 pr_info("Memory at %llx (drc index %x) was hot-added\n", 990 lmbs[i].base_addr, lmbs[i].drc_index); 991 lmbs[i].reserved = 0; 992 } 993 } 994 995 return rc; 996 } 997 998 int dlpar_memory(struct pseries_hp_errorlog *hp_elog) 999 { 1000 struct device_node *dn; 1001 struct property *prop; 1002 u32 count, drc_index; 1003 int rc; 1004 1005 lock_device_hotplug(); 1006 1007 dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1008 if (!dn) { 1009 rc = -EINVAL; 1010 goto dlpar_memory_out; 1011 } 1012 1013 prop = dlpar_clone_drconf_property(dn); 1014 if (!prop) { 1015 rc = -EINVAL; 1016 goto dlpar_memory_out; 1017 } 1018 1019 switch (hp_elog->action) { 1020 case PSERIES_HP_ELOG_ACTION_ADD: 1021 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) { 1022 count = hp_elog->_drc_u.drc_count; 1023 rc = dlpar_memory_add_by_count(count, prop); 1024 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { 1025 drc_index = hp_elog->_drc_u.drc_index; 1026 rc = dlpar_memory_add_by_index(drc_index, prop); 1027 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) { 1028 count = hp_elog->_drc_u.ic.count; 1029 drc_index = hp_elog->_drc_u.ic.index; 1030 rc = dlpar_memory_add_by_ic(count, drc_index, prop); 1031 } else { 1032 rc = -EINVAL; 1033 } 1034 1035 break; 1036 case PSERIES_HP_ELOG_ACTION_REMOVE: 1037 if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) { 1038 count = hp_elog->_drc_u.drc_count; 1039 rc = dlpar_memory_remove_by_count(count, prop); 1040 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) { 1041 drc_index = hp_elog->_drc_u.drc_index; 1042 rc = dlpar_memory_remove_by_index(drc_index, prop); 1043 } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) { 1044 count = hp_elog->_drc_u.ic.count; 1045 drc_index = hp_elog->_drc_u.ic.index; 1046 rc = dlpar_memory_remove_by_ic(count, drc_index, prop); 1047 } else { 1048 rc = -EINVAL; 1049 } 1050 1051 break; 1052 case PSERIES_HP_ELOG_ACTION_READD: 1053 drc_index = hp_elog->_drc_u.drc_index; 1054 rc = dlpar_memory_readd_by_index(drc_index, prop); 1055 break; 1056 default: 1057 pr_err("Invalid action (%d) specified\n", hp_elog->action); 1058 rc = -EINVAL; 1059 break; 1060 } 1061 1062 dlpar_free_property(prop); 1063 1064 dlpar_memory_out: 1065 of_node_put(dn); 1066 unlock_device_hotplug(); 1067 return rc; 1068 } 1069 1070 static int pseries_add_mem_node(struct device_node *np) 1071 { 1072 const char *type; 1073 const __be32 *regs; 1074 unsigned long base; 1075 unsigned int lmb_size; 1076 int ret = -EINVAL; 1077 1078 /* 1079 * Check to see if we are actually adding memory 1080 */ 1081 type = of_get_property(np, "device_type", NULL); 1082 if (type == NULL || strcmp(type, "memory") != 0) 1083 return 0; 1084 1085 /* 1086 * Find the base and size of the memblock 1087 */ 1088 regs = of_get_property(np, "reg", NULL); 1089 if (!regs) 1090 return ret; 1091 1092 base = be64_to_cpu(*(unsigned long *)regs); 1093 lmb_size = be32_to_cpu(regs[3]); 1094 1095 /* 1096 * Update memory region to represent the memory add 1097 */ 1098 ret = memblock_add(base, lmb_size); 1099 return (ret < 0) ? -EINVAL : 0; 1100 } 1101 1102 static int pseries_update_drconf_memory(struct of_reconfig_data *pr) 1103 { 1104 struct of_drconf_cell *new_drmem, *old_drmem; 1105 unsigned long memblock_size; 1106 u32 entries; 1107 __be32 *p; 1108 int i, rc = -EINVAL; 1109 1110 if (rtas_hp_event) 1111 return 0; 1112 1113 memblock_size = pseries_memory_block_size(); 1114 if (!memblock_size) 1115 return -EINVAL; 1116 1117 p = (__be32 *) pr->old_prop->value; 1118 if (!p) 1119 return -EINVAL; 1120 1121 /* The first int of the property is the number of lmb's described 1122 * by the property. This is followed by an array of of_drconf_cell 1123 * entries. Get the number of entries and skip to the array of 1124 * of_drconf_cell's. 1125 */ 1126 entries = be32_to_cpu(*p++); 1127 old_drmem = (struct of_drconf_cell *)p; 1128 1129 p = (__be32 *)pr->prop->value; 1130 p++; 1131 new_drmem = (struct of_drconf_cell *)p; 1132 1133 for (i = 0; i < entries; i++) { 1134 if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && 1135 (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { 1136 rc = pseries_remove_memblock( 1137 be64_to_cpu(old_drmem[i].base_addr), 1138 memblock_size); 1139 break; 1140 } else if ((!(be32_to_cpu(old_drmem[i].flags) & 1141 DRCONF_MEM_ASSIGNED)) && 1142 (be32_to_cpu(new_drmem[i].flags) & 1143 DRCONF_MEM_ASSIGNED)) { 1144 rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), 1145 memblock_size); 1146 rc = (rc < 0) ? -EINVAL : 0; 1147 break; 1148 } 1149 } 1150 return rc; 1151 } 1152 1153 static int pseries_memory_notifier(struct notifier_block *nb, 1154 unsigned long action, void *data) 1155 { 1156 struct of_reconfig_data *rd = data; 1157 int err = 0; 1158 1159 switch (action) { 1160 case OF_RECONFIG_ATTACH_NODE: 1161 err = pseries_add_mem_node(rd->dn); 1162 break; 1163 case OF_RECONFIG_DETACH_NODE: 1164 err = pseries_remove_mem_node(rd->dn); 1165 break; 1166 case OF_RECONFIG_UPDATE_PROPERTY: 1167 if (!strcmp(rd->prop->name, "ibm,dynamic-memory")) 1168 err = pseries_update_drconf_memory(rd); 1169 break; 1170 } 1171 return notifier_from_errno(err); 1172 } 1173 1174 static struct notifier_block pseries_mem_nb = { 1175 .notifier_call = pseries_memory_notifier, 1176 }; 1177 1178 static int __init pseries_memory_hotplug_init(void) 1179 { 1180 if (firmware_has_feature(FW_FEATURE_LPAR)) 1181 of_reconfig_notifier_register(&pseries_mem_nb); 1182 1183 return 0; 1184 } 1185 machine_device_initcall(pseries, pseries_memory_hotplug_init); 1186