1 /* 2 * Support for dynamic reconfiguration for PCI, Memory, and CPU 3 * Hotplug and Dynamic Logical Partitioning on RPA platforms. 4 * 5 * Copyright (C) 2009 Nathan Fontenot 6 * Copyright (C) 2009 IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 */ 12 13 #define pr_fmt(fmt) "dlpar: " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/notifier.h> 17 #include <linux/spinlock.h> 18 #include <linux/cpu.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 22 #include "of_helpers.h" 23 #include "offline_states.h" 24 #include "pseries.h" 25 26 #include <asm/prom.h> 27 #include <asm/machdep.h> 28 #include <asm/uaccess.h> 29 #include <asm/rtas.h> 30 31 struct cc_workarea { 32 __be32 drc_index; 33 __be32 zero; 34 __be32 name_offset; 35 __be32 prop_length; 36 __be32 prop_offset; 37 }; 38 39 void dlpar_free_cc_property(struct property *prop) 40 { 41 kfree(prop->name); 42 kfree(prop->value); 43 kfree(prop); 44 } 45 46 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) 47 { 48 struct property *prop; 49 char *name; 50 char *value; 51 52 prop = kzalloc(sizeof(*prop), GFP_KERNEL); 53 if (!prop) 54 return NULL; 55 56 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 57 prop->name = kstrdup(name, GFP_KERNEL); 58 59 prop->length = be32_to_cpu(ccwa->prop_length); 60 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); 61 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 62 if (!prop->value) { 63 dlpar_free_cc_property(prop); 64 return NULL; 65 } 66 67 return prop; 68 } 69 70 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, 71 const char *path) 72 { 73 struct device_node *dn; 74 char *name; 75 76 /* If parent node path is "/" advance path to NULL terminator to 77 * prevent double leading slashs in full_name. 78 */ 79 if (!path[1]) 80 path++; 81 82 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 83 if (!dn) 84 return NULL; 85 86 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 87 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); 88 if (!dn->full_name) { 89 kfree(dn); 90 return NULL; 91 } 92 93 of_node_set_flag(dn, OF_DYNAMIC); 94 of_node_init(dn); 95 96 return dn; 97 } 98 99 static void dlpar_free_one_cc_node(struct device_node *dn) 100 { 101 struct property *prop; 102 103 while (dn->properties) { 104 prop = dn->properties; 105 dn->properties = prop->next; 106 dlpar_free_cc_property(prop); 107 } 108 109 kfree(dn->full_name); 110 kfree(dn); 111 } 112 113 void dlpar_free_cc_nodes(struct device_node *dn) 114 { 115 if (dn->child) 116 dlpar_free_cc_nodes(dn->child); 117 118 if (dn->sibling) 119 dlpar_free_cc_nodes(dn->sibling); 120 121 dlpar_free_one_cc_node(dn); 122 } 123 124 #define COMPLETE 0 125 #define NEXT_SIBLING 1 126 #define NEXT_CHILD 2 127 #define NEXT_PROPERTY 3 128 #define PREV_PARENT 4 129 #define MORE_MEMORY 5 130 #define CALL_AGAIN -2 131 #define ERR_CFG_USE -9003 132 133 struct device_node *dlpar_configure_connector(__be32 drc_index, 134 struct device_node *parent) 135 { 136 struct device_node *dn; 137 struct device_node *first_dn = NULL; 138 struct device_node *last_dn = NULL; 139 struct property *property; 140 struct property *last_property = NULL; 141 struct cc_workarea *ccwa; 142 char *data_buf; 143 const char *parent_path = parent->full_name; 144 int cc_token; 145 int rc = -1; 146 147 cc_token = rtas_token("ibm,configure-connector"); 148 if (cc_token == RTAS_UNKNOWN_SERVICE) 149 return NULL; 150 151 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 152 if (!data_buf) 153 return NULL; 154 155 ccwa = (struct cc_workarea *)&data_buf[0]; 156 ccwa->drc_index = drc_index; 157 ccwa->zero = 0; 158 159 do { 160 /* Since we release the rtas_data_buf lock between configure 161 * connector calls we want to re-populate the rtas_data_buffer 162 * with the contents of the previous call. 163 */ 164 spin_lock(&rtas_data_buf_lock); 165 166 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); 167 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 168 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); 169 170 spin_unlock(&rtas_data_buf_lock); 171 172 switch (rc) { 173 case COMPLETE: 174 break; 175 176 case NEXT_SIBLING: 177 dn = dlpar_parse_cc_node(ccwa, parent_path); 178 if (!dn) 179 goto cc_error; 180 181 dn->parent = last_dn->parent; 182 last_dn->sibling = dn; 183 last_dn = dn; 184 break; 185 186 case NEXT_CHILD: 187 if (first_dn) 188 parent_path = last_dn->full_name; 189 190 dn = dlpar_parse_cc_node(ccwa, parent_path); 191 if (!dn) 192 goto cc_error; 193 194 if (!first_dn) { 195 dn->parent = parent; 196 first_dn = dn; 197 } else { 198 dn->parent = last_dn; 199 if (last_dn) 200 last_dn->child = dn; 201 } 202 203 last_dn = dn; 204 break; 205 206 case NEXT_PROPERTY: 207 property = dlpar_parse_cc_property(ccwa); 208 if (!property) 209 goto cc_error; 210 211 if (!last_dn->properties) 212 last_dn->properties = property; 213 else 214 last_property->next = property; 215 216 last_property = property; 217 break; 218 219 case PREV_PARENT: 220 last_dn = last_dn->parent; 221 parent_path = last_dn->parent->full_name; 222 break; 223 224 case CALL_AGAIN: 225 break; 226 227 case MORE_MEMORY: 228 case ERR_CFG_USE: 229 default: 230 printk(KERN_ERR "Unexpected Error (%d) " 231 "returned from configure-connector\n", rc); 232 goto cc_error; 233 } 234 } while (rc); 235 236 cc_error: 237 kfree(data_buf); 238 239 if (rc) { 240 if (first_dn) 241 dlpar_free_cc_nodes(first_dn); 242 243 return NULL; 244 } 245 246 return first_dn; 247 } 248 249 int dlpar_attach_node(struct device_node *dn) 250 { 251 int rc; 252 253 dn->parent = pseries_of_derive_parent(dn->full_name); 254 if (IS_ERR(dn->parent)) 255 return PTR_ERR(dn->parent); 256 257 rc = of_attach_node(dn); 258 if (rc) { 259 printk(KERN_ERR "Failed to add device node %s\n", 260 dn->full_name); 261 return rc; 262 } 263 264 of_node_put(dn->parent); 265 return 0; 266 } 267 268 int dlpar_detach_node(struct device_node *dn) 269 { 270 struct device_node *child; 271 int rc; 272 273 child = of_get_next_child(dn, NULL); 274 while (child) { 275 dlpar_detach_node(child); 276 child = of_get_next_child(dn, child); 277 } 278 279 rc = of_detach_node(dn); 280 if (rc) 281 return rc; 282 283 of_node_put(dn); /* Must decrement the refcount */ 284 return 0; 285 } 286 287 #define DR_ENTITY_SENSE 9003 288 #define DR_ENTITY_PRESENT 1 289 #define DR_ENTITY_UNUSABLE 2 290 #define ALLOCATION_STATE 9003 291 #define ALLOC_UNUSABLE 0 292 #define ALLOC_USABLE 1 293 #define ISOLATION_STATE 9001 294 #define ISOLATE 0 295 #define UNISOLATE 1 296 297 int dlpar_acquire_drc(u32 drc_index) 298 { 299 int dr_status, rc; 300 301 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 302 DR_ENTITY_SENSE, drc_index); 303 if (rc || dr_status != DR_ENTITY_UNUSABLE) 304 return -1; 305 306 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); 307 if (rc) 308 return rc; 309 310 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 311 if (rc) { 312 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 313 return rc; 314 } 315 316 return 0; 317 } 318 319 int dlpar_release_drc(u32 drc_index) 320 { 321 int dr_status, rc; 322 323 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 324 DR_ENTITY_SENSE, drc_index); 325 if (rc || dr_status != DR_ENTITY_PRESENT) 326 return -1; 327 328 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); 329 if (rc) 330 return rc; 331 332 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 333 if (rc) { 334 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 335 return rc; 336 } 337 338 return 0; 339 } 340 341 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 342 343 static int dlpar_online_cpu(struct device_node *dn) 344 { 345 int rc = 0; 346 unsigned int cpu; 347 int len, nthreads, i; 348 const __be32 *intserv; 349 u32 thread; 350 351 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 352 if (!intserv) 353 return -EINVAL; 354 355 nthreads = len / sizeof(u32); 356 357 cpu_maps_update_begin(); 358 for (i = 0; i < nthreads; i++) { 359 thread = be32_to_cpu(intserv[i]); 360 for_each_present_cpu(cpu) { 361 if (get_hard_smp_processor_id(cpu) != thread) 362 continue; 363 BUG_ON(get_cpu_current_state(cpu) 364 != CPU_STATE_OFFLINE); 365 cpu_maps_update_done(); 366 rc = device_online(get_cpu_device(cpu)); 367 if (rc) 368 goto out; 369 cpu_maps_update_begin(); 370 371 break; 372 } 373 if (cpu == num_possible_cpus()) 374 printk(KERN_WARNING "Could not find cpu to online " 375 "with physical id 0x%x\n", thread); 376 } 377 cpu_maps_update_done(); 378 379 out: 380 return rc; 381 382 } 383 384 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 385 { 386 struct device_node *dn, *parent; 387 u32 drc_index; 388 int rc; 389 390 rc = kstrtou32(buf, 0, &drc_index); 391 if (rc) 392 return -EINVAL; 393 394 rc = dlpar_acquire_drc(drc_index); 395 if (rc) 396 return -EINVAL; 397 398 parent = of_find_node_by_path("/cpus"); 399 if (!parent) 400 return -ENODEV; 401 402 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); 403 of_node_put(parent); 404 if (!dn) { 405 dlpar_release_drc(drc_index); 406 return -EINVAL; 407 } 408 409 rc = dlpar_attach_node(dn); 410 if (rc) { 411 dlpar_release_drc(drc_index); 412 dlpar_free_cc_nodes(dn); 413 return rc; 414 } 415 416 rc = dlpar_online_cpu(dn); 417 if (rc) 418 return rc; 419 420 return count; 421 } 422 423 static int dlpar_offline_cpu(struct device_node *dn) 424 { 425 int rc = 0; 426 unsigned int cpu; 427 int len, nthreads, i; 428 const __be32 *intserv; 429 u32 thread; 430 431 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 432 if (!intserv) 433 return -EINVAL; 434 435 nthreads = len / sizeof(u32); 436 437 cpu_maps_update_begin(); 438 for (i = 0; i < nthreads; i++) { 439 thread = be32_to_cpu(intserv[i]); 440 for_each_present_cpu(cpu) { 441 if (get_hard_smp_processor_id(cpu) != thread) 442 continue; 443 444 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) 445 break; 446 447 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 448 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 449 cpu_maps_update_done(); 450 rc = device_offline(get_cpu_device(cpu)); 451 if (rc) 452 goto out; 453 cpu_maps_update_begin(); 454 break; 455 456 } 457 458 /* 459 * The cpu is in CPU_STATE_INACTIVE. 460 * Upgrade it's state to CPU_STATE_OFFLINE. 461 */ 462 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 463 BUG_ON(plpar_hcall_norets(H_PROD, thread) 464 != H_SUCCESS); 465 __cpu_die(cpu); 466 break; 467 } 468 if (cpu == num_possible_cpus()) 469 printk(KERN_WARNING "Could not find cpu to offline " 470 "with physical id 0x%x\n", thread); 471 } 472 cpu_maps_update_done(); 473 474 out: 475 return rc; 476 477 } 478 479 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 480 { 481 struct device_node *dn; 482 u32 drc_index; 483 int rc; 484 485 dn = of_find_node_by_path(buf); 486 if (!dn) 487 return -EINVAL; 488 489 rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); 490 if (rc) { 491 of_node_put(dn); 492 return -EINVAL; 493 } 494 495 rc = dlpar_offline_cpu(dn); 496 if (rc) { 497 of_node_put(dn); 498 return -EINVAL; 499 } 500 501 rc = dlpar_release_drc(drc_index); 502 if (rc) { 503 of_node_put(dn); 504 return rc; 505 } 506 507 rc = dlpar_detach_node(dn); 508 if (rc) { 509 dlpar_acquire_drc(drc_index); 510 return rc; 511 } 512 513 of_node_put(dn); 514 515 return count; 516 } 517 518 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 519 520 static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) 521 { 522 int rc; 523 524 /* pseries error logs are in BE format, convert to cpu type */ 525 switch (hp_elog->id_type) { 526 case PSERIES_HP_ELOG_ID_DRC_COUNT: 527 hp_elog->_drc_u.drc_count = 528 be32_to_cpu(hp_elog->_drc_u.drc_count); 529 break; 530 case PSERIES_HP_ELOG_ID_DRC_INDEX: 531 hp_elog->_drc_u.drc_index = 532 be32_to_cpu(hp_elog->_drc_u.drc_index); 533 } 534 535 switch (hp_elog->resource) { 536 case PSERIES_HP_ELOG_RESOURCE_MEM: 537 rc = dlpar_memory(hp_elog); 538 break; 539 default: 540 pr_warn_ratelimited("Invalid resource (%d) specified\n", 541 hp_elog->resource); 542 rc = -EINVAL; 543 } 544 545 return rc; 546 } 547 548 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, 549 const char *buf, size_t count) 550 { 551 struct pseries_hp_errorlog *hp_elog; 552 const char *arg; 553 int rc; 554 555 hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); 556 if (!hp_elog) { 557 rc = -ENOMEM; 558 goto dlpar_store_out; 559 } 560 561 /* Parse out the request from the user, this will be in the form 562 * <resource> <action> <id_type> <id> 563 */ 564 arg = buf; 565 if (!strncmp(arg, "memory", 6)) { 566 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; 567 arg += strlen("memory "); 568 } else { 569 pr_err("Invalid resource specified: \"%s\"\n", buf); 570 rc = -EINVAL; 571 goto dlpar_store_out; 572 } 573 574 if (!strncmp(arg, "add", 3)) { 575 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD; 576 arg += strlen("add "); 577 } else if (!strncmp(arg, "remove", 6)) { 578 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE; 579 arg += strlen("remove "); 580 } else { 581 pr_err("Invalid action specified: \"%s\"\n", buf); 582 rc = -EINVAL; 583 goto dlpar_store_out; 584 } 585 586 if (!strncmp(arg, "index", 5)) { 587 u32 index; 588 589 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; 590 arg += strlen("index "); 591 if (kstrtou32(arg, 0, &index)) { 592 rc = -EINVAL; 593 pr_err("Invalid drc_index specified: \"%s\"\n", buf); 594 goto dlpar_store_out; 595 } 596 597 hp_elog->_drc_u.drc_index = cpu_to_be32(index); 598 } else if (!strncmp(arg, "count", 5)) { 599 u32 count; 600 601 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT; 602 arg += strlen("count "); 603 if (kstrtou32(arg, 0, &count)) { 604 rc = -EINVAL; 605 pr_err("Invalid count specified: \"%s\"\n", buf); 606 goto dlpar_store_out; 607 } 608 609 hp_elog->_drc_u.drc_count = cpu_to_be32(count); 610 } else { 611 pr_err("Invalid id_type specified: \"%s\"\n", buf); 612 rc = -EINVAL; 613 goto dlpar_store_out; 614 } 615 616 rc = handle_dlpar_errorlog(hp_elog); 617 618 dlpar_store_out: 619 kfree(hp_elog); 620 return rc ? rc : count; 621 } 622 623 static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store); 624 625 static int __init pseries_dlpar_init(void) 626 { 627 int rc; 628 629 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 630 ppc_md.cpu_probe = dlpar_cpu_probe; 631 ppc_md.cpu_release = dlpar_cpu_release; 632 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 633 634 rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 635 636 return rc; 637 } 638 machine_device_initcall(pseries, pseries_dlpar_init); 639 640