1 /* 2 * Support for dynamic reconfiguration for PCI, Memory, and CPU 3 * Hotplug and Dynamic Logical Partitioning on RPA platforms. 4 * 5 * Copyright (C) 2009 Nathan Fontenot 6 * Copyright (C) 2009 IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 */ 12 13 #define pr_fmt(fmt) "dlpar: " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/notifier.h> 17 #include <linux/spinlock.h> 18 #include <linux/cpu.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 22 #include "of_helpers.h" 23 #include "pseries.h" 24 25 #include <asm/prom.h> 26 #include <asm/machdep.h> 27 #include <linux/uaccess.h> 28 #include <asm/rtas.h> 29 30 static struct workqueue_struct *pseries_hp_wq; 31 32 struct pseries_hp_work { 33 struct work_struct work; 34 struct pseries_hp_errorlog *errlog; 35 struct completion *hp_completion; 36 int *rc; 37 }; 38 39 struct cc_workarea { 40 __be32 drc_index; 41 __be32 zero; 42 __be32 name_offset; 43 __be32 prop_length; 44 __be32 prop_offset; 45 }; 46 47 void dlpar_free_cc_property(struct property *prop) 48 { 49 kfree(prop->name); 50 kfree(prop->value); 51 kfree(prop); 52 } 53 54 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) 55 { 56 struct property *prop; 57 char *name; 58 char *value; 59 60 prop = kzalloc(sizeof(*prop), GFP_KERNEL); 61 if (!prop) 62 return NULL; 63 64 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 65 prop->name = kstrdup(name, GFP_KERNEL); 66 67 prop->length = be32_to_cpu(ccwa->prop_length); 68 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); 69 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 70 if (!prop->value) { 71 dlpar_free_cc_property(prop); 72 return NULL; 73 } 74 75 return prop; 76 } 77 78 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, 79 const char *path) 80 { 81 struct device_node *dn; 82 char *name; 83 84 /* If parent node path is "/" advance path to NULL terminator to 85 * prevent double leading slashs in full_name. 86 */ 87 if (!path[1]) 88 path++; 89 90 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 91 if (!dn) 92 return NULL; 93 94 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 95 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); 96 if (!dn->full_name) { 97 kfree(dn); 98 return NULL; 99 } 100 101 of_node_set_flag(dn, OF_DYNAMIC); 102 of_node_init(dn); 103 104 return dn; 105 } 106 107 static void dlpar_free_one_cc_node(struct device_node *dn) 108 { 109 struct property *prop; 110 111 while (dn->properties) { 112 prop = dn->properties; 113 dn->properties = prop->next; 114 dlpar_free_cc_property(prop); 115 } 116 117 kfree(dn->full_name); 118 kfree(dn); 119 } 120 121 void dlpar_free_cc_nodes(struct device_node *dn) 122 { 123 if (dn->child) 124 dlpar_free_cc_nodes(dn->child); 125 126 if (dn->sibling) 127 dlpar_free_cc_nodes(dn->sibling); 128 129 dlpar_free_one_cc_node(dn); 130 } 131 132 #define COMPLETE 0 133 #define NEXT_SIBLING 1 134 #define NEXT_CHILD 2 135 #define NEXT_PROPERTY 3 136 #define PREV_PARENT 4 137 #define MORE_MEMORY 5 138 #define CALL_AGAIN -2 139 #define ERR_CFG_USE -9003 140 141 struct device_node *dlpar_configure_connector(__be32 drc_index, 142 struct device_node *parent) 143 { 144 struct device_node *dn; 145 struct device_node *first_dn = NULL; 146 struct device_node *last_dn = NULL; 147 struct property *property; 148 struct property *last_property = NULL; 149 struct cc_workarea *ccwa; 150 char *data_buf; 151 const char *parent_path = parent->full_name; 152 int cc_token; 153 int rc = -1; 154 155 cc_token = rtas_token("ibm,configure-connector"); 156 if (cc_token == RTAS_UNKNOWN_SERVICE) 157 return NULL; 158 159 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 160 if (!data_buf) 161 return NULL; 162 163 ccwa = (struct cc_workarea *)&data_buf[0]; 164 ccwa->drc_index = drc_index; 165 ccwa->zero = 0; 166 167 do { 168 /* Since we release the rtas_data_buf lock between configure 169 * connector calls we want to re-populate the rtas_data_buffer 170 * with the contents of the previous call. 171 */ 172 spin_lock(&rtas_data_buf_lock); 173 174 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); 175 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 176 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); 177 178 spin_unlock(&rtas_data_buf_lock); 179 180 switch (rc) { 181 case COMPLETE: 182 break; 183 184 case NEXT_SIBLING: 185 dn = dlpar_parse_cc_node(ccwa, parent_path); 186 if (!dn) 187 goto cc_error; 188 189 dn->parent = last_dn->parent; 190 last_dn->sibling = dn; 191 last_dn = dn; 192 break; 193 194 case NEXT_CHILD: 195 if (first_dn) 196 parent_path = last_dn->full_name; 197 198 dn = dlpar_parse_cc_node(ccwa, parent_path); 199 if (!dn) 200 goto cc_error; 201 202 if (!first_dn) { 203 dn->parent = parent; 204 first_dn = dn; 205 } else { 206 dn->parent = last_dn; 207 if (last_dn) 208 last_dn->child = dn; 209 } 210 211 last_dn = dn; 212 break; 213 214 case NEXT_PROPERTY: 215 property = dlpar_parse_cc_property(ccwa); 216 if (!property) 217 goto cc_error; 218 219 if (!last_dn->properties) 220 last_dn->properties = property; 221 else 222 last_property->next = property; 223 224 last_property = property; 225 break; 226 227 case PREV_PARENT: 228 last_dn = last_dn->parent; 229 parent_path = last_dn->parent->full_name; 230 break; 231 232 case CALL_AGAIN: 233 break; 234 235 case MORE_MEMORY: 236 case ERR_CFG_USE: 237 default: 238 printk(KERN_ERR "Unexpected Error (%d) " 239 "returned from configure-connector\n", rc); 240 goto cc_error; 241 } 242 } while (rc); 243 244 cc_error: 245 kfree(data_buf); 246 247 if (rc) { 248 if (first_dn) 249 dlpar_free_cc_nodes(first_dn); 250 251 return NULL; 252 } 253 254 return first_dn; 255 } 256 257 int dlpar_attach_node(struct device_node *dn) 258 { 259 int rc; 260 261 dn->parent = pseries_of_derive_parent(dn->full_name); 262 if (IS_ERR(dn->parent)) 263 return PTR_ERR(dn->parent); 264 265 rc = of_attach_node(dn); 266 if (rc) { 267 printk(KERN_ERR "Failed to add device node %s\n", 268 dn->full_name); 269 return rc; 270 } 271 272 of_node_put(dn->parent); 273 return 0; 274 } 275 276 int dlpar_detach_node(struct device_node *dn) 277 { 278 struct device_node *child; 279 int rc; 280 281 child = of_get_next_child(dn, NULL); 282 while (child) { 283 dlpar_detach_node(child); 284 child = of_get_next_child(dn, child); 285 } 286 287 rc = of_detach_node(dn); 288 if (rc) 289 return rc; 290 291 of_node_put(dn); /* Must decrement the refcount */ 292 return 0; 293 } 294 295 #define DR_ENTITY_SENSE 9003 296 #define DR_ENTITY_PRESENT 1 297 #define DR_ENTITY_UNUSABLE 2 298 #define ALLOCATION_STATE 9003 299 #define ALLOC_UNUSABLE 0 300 #define ALLOC_USABLE 1 301 #define ISOLATION_STATE 9001 302 #define ISOLATE 0 303 #define UNISOLATE 1 304 305 int dlpar_acquire_drc(u32 drc_index) 306 { 307 int dr_status, rc; 308 309 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 310 DR_ENTITY_SENSE, drc_index); 311 if (rc || dr_status != DR_ENTITY_UNUSABLE) 312 return -1; 313 314 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); 315 if (rc) 316 return rc; 317 318 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 319 if (rc) { 320 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 321 return rc; 322 } 323 324 return 0; 325 } 326 327 int dlpar_release_drc(u32 drc_index) 328 { 329 int dr_status, rc; 330 331 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 332 DR_ENTITY_SENSE, drc_index); 333 if (rc || dr_status != DR_ENTITY_PRESENT) 334 return -1; 335 336 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); 337 if (rc) 338 return rc; 339 340 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 341 if (rc) { 342 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 343 return rc; 344 } 345 346 return 0; 347 } 348 349 static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) 350 { 351 int rc; 352 353 /* pseries error logs are in BE format, convert to cpu type */ 354 switch (hp_elog->id_type) { 355 case PSERIES_HP_ELOG_ID_DRC_COUNT: 356 hp_elog->_drc_u.drc_count = 357 be32_to_cpu(hp_elog->_drc_u.drc_count); 358 break; 359 case PSERIES_HP_ELOG_ID_DRC_INDEX: 360 hp_elog->_drc_u.drc_index = 361 be32_to_cpu(hp_elog->_drc_u.drc_index); 362 break; 363 case PSERIES_HP_ELOG_ID_DRC_IC: 364 hp_elog->_drc_u.ic.count = 365 be32_to_cpu(hp_elog->_drc_u.ic.count); 366 hp_elog->_drc_u.ic.index = 367 be32_to_cpu(hp_elog->_drc_u.ic.index); 368 } 369 370 switch (hp_elog->resource) { 371 case PSERIES_HP_ELOG_RESOURCE_MEM: 372 rc = dlpar_memory(hp_elog); 373 break; 374 case PSERIES_HP_ELOG_RESOURCE_CPU: 375 rc = dlpar_cpu(hp_elog); 376 break; 377 default: 378 pr_warn_ratelimited("Invalid resource (%d) specified\n", 379 hp_elog->resource); 380 rc = -EINVAL; 381 } 382 383 return rc; 384 } 385 386 static void pseries_hp_work_fn(struct work_struct *work) 387 { 388 struct pseries_hp_work *hp_work = 389 container_of(work, struct pseries_hp_work, work); 390 391 if (hp_work->rc) 392 *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog); 393 else 394 handle_dlpar_errorlog(hp_work->errlog); 395 396 if (hp_work->hp_completion) 397 complete(hp_work->hp_completion); 398 399 kfree(hp_work->errlog); 400 kfree((void *)work); 401 } 402 403 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog, 404 struct completion *hotplug_done, int *rc) 405 { 406 struct pseries_hp_work *work; 407 struct pseries_hp_errorlog *hp_errlog_copy; 408 409 hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog), 410 GFP_KERNEL); 411 memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog)); 412 413 work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL); 414 if (work) { 415 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn); 416 work->errlog = hp_errlog_copy; 417 work->hp_completion = hotplug_done; 418 work->rc = rc; 419 queue_work(pseries_hp_wq, (struct work_struct *)work); 420 } else { 421 *rc = -ENOMEM; 422 kfree(hp_errlog_copy); 423 complete(hotplug_done); 424 } 425 } 426 427 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) 428 { 429 char *arg; 430 431 arg = strsep(cmd, " "); 432 if (!arg) 433 return -EINVAL; 434 435 if (sysfs_streq(arg, "memory")) { 436 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; 437 } else if (sysfs_streq(arg, "cpu")) { 438 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; 439 } else { 440 pr_err("Invalid resource specified.\n"); 441 return -EINVAL; 442 } 443 444 return 0; 445 } 446 447 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog) 448 { 449 char *arg; 450 451 arg = strsep(cmd, " "); 452 if (!arg) 453 return -EINVAL; 454 455 if (sysfs_streq(arg, "add")) { 456 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD; 457 } else if (sysfs_streq(arg, "remove")) { 458 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE; 459 } else { 460 pr_err("Invalid action specified.\n"); 461 return -EINVAL; 462 } 463 464 return 0; 465 } 466 467 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) 468 { 469 char *arg; 470 u32 count, index; 471 472 arg = strsep(cmd, " "); 473 if (!arg) 474 return -EINVAL; 475 476 if (sysfs_streq(arg, "indexed-count")) { 477 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC; 478 arg = strsep(cmd, " "); 479 if (!arg) { 480 pr_err("No DRC count specified.\n"); 481 return -EINVAL; 482 } 483 484 if (kstrtou32(arg, 0, &count)) { 485 pr_err("Invalid DRC count specified.\n"); 486 return -EINVAL; 487 } 488 489 arg = strsep(cmd, " "); 490 if (!arg) { 491 pr_err("No DRC Index specified.\n"); 492 return -EINVAL; 493 } 494 495 if (kstrtou32(arg, 0, &index)) { 496 pr_err("Invalid DRC Index specified.\n"); 497 return -EINVAL; 498 } 499 500 hp_elog->_drc_u.ic.count = cpu_to_be32(count); 501 hp_elog->_drc_u.ic.index = cpu_to_be32(index); 502 } else if (sysfs_streq(arg, "index")) { 503 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; 504 arg = strsep(cmd, " "); 505 if (!arg) { 506 pr_err("No DRC Index specified.\n"); 507 return -EINVAL; 508 } 509 510 if (kstrtou32(arg, 0, &index)) { 511 pr_err("Invalid DRC Index specified.\n"); 512 return -EINVAL; 513 } 514 515 hp_elog->_drc_u.drc_index = cpu_to_be32(index); 516 } else if (sysfs_streq(arg, "count")) { 517 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT; 518 arg = strsep(cmd, " "); 519 if (!arg) { 520 pr_err("No DRC count specified.\n"); 521 return -EINVAL; 522 } 523 524 if (kstrtou32(arg, 0, &count)) { 525 pr_err("Invalid DRC count specified.\n"); 526 return -EINVAL; 527 } 528 529 hp_elog->_drc_u.drc_count = cpu_to_be32(count); 530 } else { 531 pr_err("Invalid id_type specified.\n"); 532 return -EINVAL; 533 } 534 535 return 0; 536 } 537 538 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, 539 const char *buf, size_t count) 540 { 541 struct pseries_hp_errorlog *hp_elog; 542 struct completion hotplug_done; 543 char *argbuf; 544 char *args; 545 int rc; 546 547 args = argbuf = kstrdup(buf, GFP_KERNEL); 548 hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL); 549 if (!hp_elog || !argbuf) { 550 pr_info("Could not allocate resources for DLPAR operation\n"); 551 kfree(argbuf); 552 kfree(hp_elog); 553 return -ENOMEM; 554 } 555 556 /* 557 * Parse out the request from the user, this will be in the form: 558 * <resource> <action> <id_type> <id> 559 */ 560 rc = dlpar_parse_resource(&args, hp_elog); 561 if (rc) 562 goto dlpar_store_out; 563 564 rc = dlpar_parse_action(&args, hp_elog); 565 if (rc) 566 goto dlpar_store_out; 567 568 rc = dlpar_parse_id_type(&args, hp_elog); 569 if (rc) 570 goto dlpar_store_out; 571 572 init_completion(&hotplug_done); 573 queue_hotplug_event(hp_elog, &hotplug_done, &rc); 574 wait_for_completion(&hotplug_done); 575 576 dlpar_store_out: 577 kfree(argbuf); 578 kfree(hp_elog); 579 580 if (rc) 581 pr_err("Could not handle DLPAR request \"%s\"\n", buf); 582 583 return rc ? rc : count; 584 } 585 586 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, 587 char *buf) 588 { 589 return sprintf(buf, "%s\n", "memory,cpu"); 590 } 591 592 static CLASS_ATTR(dlpar, S_IWUSR | S_IRUSR, dlpar_show, dlpar_store); 593 594 static int __init pseries_dlpar_init(void) 595 { 596 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 597 WQ_UNBOUND, 1); 598 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 599 } 600 machine_device_initcall(pseries, pseries_dlpar_init); 601 602