1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support for dynamic reconfiguration for PCI, Memory, and CPU 4 * Hotplug and Dynamic Logical Partitioning on RPA platforms. 5 * 6 * Copyright (C) 2009 Nathan Fontenot 7 * Copyright (C) 2009 IBM Corporation 8 */ 9 10 #define pr_fmt(fmt) "dlpar: " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/notifier.h> 14 #include <linux/spinlock.h> 15 #include <linux/cpu.h> 16 #include <linux/slab.h> 17 #include <linux/of.h> 18 19 #include "of_helpers.h" 20 #include "pseries.h" 21 22 #include <asm/prom.h> 23 #include <asm/machdep.h> 24 #include <linux/uaccess.h> 25 #include <asm/rtas.h> 26 27 static struct workqueue_struct *pseries_hp_wq; 28 29 struct pseries_hp_work { 30 struct work_struct work; 31 struct pseries_hp_errorlog *errlog; 32 }; 33 34 struct cc_workarea { 35 __be32 drc_index; 36 __be32 zero; 37 __be32 name_offset; 38 __be32 prop_length; 39 __be32 prop_offset; 40 }; 41 42 void dlpar_free_cc_property(struct property *prop) 43 { 44 kfree(prop->name); 45 kfree(prop->value); 46 kfree(prop); 47 } 48 49 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) 50 { 51 struct property *prop; 52 char *name; 53 char *value; 54 55 prop = kzalloc(sizeof(*prop), GFP_KERNEL); 56 if (!prop) 57 return NULL; 58 59 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 60 prop->name = kstrdup(name, GFP_KERNEL); 61 if (!prop->name) { 62 dlpar_free_cc_property(prop); 63 return NULL; 64 } 65 66 prop->length = be32_to_cpu(ccwa->prop_length); 67 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); 68 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 69 if (!prop->value) { 70 dlpar_free_cc_property(prop); 71 return NULL; 72 } 73 74 return prop; 75 } 76 77 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) 78 { 79 struct device_node *dn; 80 const char *name; 81 82 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 83 if (!dn) 84 return NULL; 85 86 name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset); 87 dn->full_name = kstrdup(name, GFP_KERNEL); 88 if (!dn->full_name) { 89 kfree(dn); 90 return NULL; 91 } 92 93 of_node_set_flag(dn, OF_DYNAMIC); 94 of_node_init(dn); 95 96 return dn; 97 } 98 99 static void dlpar_free_one_cc_node(struct device_node *dn) 100 { 101 struct property *prop; 102 103 while (dn->properties) { 104 prop = dn->properties; 105 dn->properties = prop->next; 106 dlpar_free_cc_property(prop); 107 } 108 109 kfree(dn->full_name); 110 kfree(dn); 111 } 112 113 void dlpar_free_cc_nodes(struct device_node *dn) 114 { 115 if (dn->child) 116 dlpar_free_cc_nodes(dn->child); 117 118 if (dn->sibling) 119 dlpar_free_cc_nodes(dn->sibling); 120 121 dlpar_free_one_cc_node(dn); 122 } 123 124 #define COMPLETE 0 125 #define NEXT_SIBLING 1 126 #define NEXT_CHILD 2 127 #define NEXT_PROPERTY 3 128 #define PREV_PARENT 4 129 #define MORE_MEMORY 5 130 #define ERR_CFG_USE -9003 131 132 struct device_node *dlpar_configure_connector(__be32 drc_index, 133 struct device_node *parent) 134 { 135 struct device_node *dn; 136 struct device_node *first_dn = NULL; 137 struct device_node *last_dn = NULL; 138 struct property *property; 139 struct property *last_property = NULL; 140 struct cc_workarea *ccwa; 141 char *data_buf; 142 int cc_token; 143 int rc = -1; 144 145 cc_token = rtas_token("ibm,configure-connector"); 146 if (cc_token == RTAS_UNKNOWN_SERVICE) 147 return NULL; 148 149 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 150 if (!data_buf) 151 return NULL; 152 153 ccwa = (struct cc_workarea *)&data_buf[0]; 154 ccwa->drc_index = drc_index; 155 ccwa->zero = 0; 156 157 do { 158 /* Since we release the rtas_data_buf lock between configure 159 * connector calls we want to re-populate the rtas_data_buffer 160 * with the contents of the previous call. 161 */ 162 spin_lock(&rtas_data_buf_lock); 163 164 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); 165 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 166 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); 167 168 spin_unlock(&rtas_data_buf_lock); 169 170 if (rtas_busy_delay(rc)) 171 continue; 172 173 switch (rc) { 174 case COMPLETE: 175 break; 176 177 case NEXT_SIBLING: 178 dn = dlpar_parse_cc_node(ccwa); 179 if (!dn) 180 goto cc_error; 181 182 dn->parent = last_dn->parent; 183 last_dn->sibling = dn; 184 last_dn = dn; 185 break; 186 187 case NEXT_CHILD: 188 dn = dlpar_parse_cc_node(ccwa); 189 if (!dn) 190 goto cc_error; 191 192 if (!first_dn) { 193 dn->parent = parent; 194 first_dn = dn; 195 } else { 196 dn->parent = last_dn; 197 if (last_dn) 198 last_dn->child = dn; 199 } 200 201 last_dn = dn; 202 break; 203 204 case NEXT_PROPERTY: 205 property = dlpar_parse_cc_property(ccwa); 206 if (!property) 207 goto cc_error; 208 209 if (!last_dn->properties) 210 last_dn->properties = property; 211 else 212 last_property->next = property; 213 214 last_property = property; 215 break; 216 217 case PREV_PARENT: 218 last_dn = last_dn->parent; 219 break; 220 221 case MORE_MEMORY: 222 case ERR_CFG_USE: 223 default: 224 printk(KERN_ERR "Unexpected Error (%d) " 225 "returned from configure-connector\n", rc); 226 goto cc_error; 227 } 228 } while (rc); 229 230 cc_error: 231 kfree(data_buf); 232 233 if (rc) { 234 if (first_dn) 235 dlpar_free_cc_nodes(first_dn); 236 237 return NULL; 238 } 239 240 return first_dn; 241 } 242 243 int dlpar_attach_node(struct device_node *dn, struct device_node *parent) 244 { 245 int rc; 246 247 dn->parent = parent; 248 249 rc = of_attach_node(dn); 250 if (rc) { 251 printk(KERN_ERR "Failed to add device node %pOF\n", dn); 252 return rc; 253 } 254 255 return 0; 256 } 257 258 int dlpar_detach_node(struct device_node *dn) 259 { 260 struct device_node *child; 261 int rc; 262 263 child = of_get_next_child(dn, NULL); 264 while (child) { 265 dlpar_detach_node(child); 266 child = of_get_next_child(dn, child); 267 } 268 269 rc = of_detach_node(dn); 270 if (rc) 271 return rc; 272 273 of_node_put(dn); 274 275 return 0; 276 } 277 278 #define DR_ENTITY_SENSE 9003 279 #define DR_ENTITY_PRESENT 1 280 #define DR_ENTITY_UNUSABLE 2 281 #define ALLOCATION_STATE 9003 282 #define ALLOC_UNUSABLE 0 283 #define ALLOC_USABLE 1 284 #define ISOLATION_STATE 9001 285 #define ISOLATE 0 286 #define UNISOLATE 1 287 288 int dlpar_acquire_drc(u32 drc_index) 289 { 290 int dr_status, rc; 291 292 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 293 DR_ENTITY_SENSE, drc_index); 294 if (rc || dr_status != DR_ENTITY_UNUSABLE) 295 return -1; 296 297 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); 298 if (rc) 299 return rc; 300 301 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 302 if (rc) { 303 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 304 return rc; 305 } 306 307 return 0; 308 } 309 310 int dlpar_release_drc(u32 drc_index) 311 { 312 int dr_status, rc; 313 314 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 315 DR_ENTITY_SENSE, drc_index); 316 if (rc || dr_status != DR_ENTITY_PRESENT) 317 return -1; 318 319 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); 320 if (rc) 321 return rc; 322 323 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 324 if (rc) { 325 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 326 return rc; 327 } 328 329 return 0; 330 } 331 332 int dlpar_unisolate_drc(u32 drc_index) 333 { 334 int dr_status, rc; 335 336 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 337 DR_ENTITY_SENSE, drc_index); 338 if (rc || dr_status != DR_ENTITY_PRESENT) 339 return -1; 340 341 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 342 343 return 0; 344 } 345 346 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) 347 { 348 int rc; 349 350 /* pseries error logs are in BE format, convert to cpu type */ 351 switch (hp_elog->id_type) { 352 case PSERIES_HP_ELOG_ID_DRC_COUNT: 353 hp_elog->_drc_u.drc_count = 354 be32_to_cpu(hp_elog->_drc_u.drc_count); 355 break; 356 case PSERIES_HP_ELOG_ID_DRC_INDEX: 357 hp_elog->_drc_u.drc_index = 358 be32_to_cpu(hp_elog->_drc_u.drc_index); 359 break; 360 case PSERIES_HP_ELOG_ID_DRC_IC: 361 hp_elog->_drc_u.ic.count = 362 be32_to_cpu(hp_elog->_drc_u.ic.count); 363 hp_elog->_drc_u.ic.index = 364 be32_to_cpu(hp_elog->_drc_u.ic.index); 365 } 366 367 switch (hp_elog->resource) { 368 case PSERIES_HP_ELOG_RESOURCE_MEM: 369 rc = dlpar_memory(hp_elog); 370 break; 371 case PSERIES_HP_ELOG_RESOURCE_CPU: 372 rc = dlpar_cpu(hp_elog); 373 break; 374 case PSERIES_HP_ELOG_RESOURCE_PMEM: 375 rc = dlpar_hp_pmem(hp_elog); 376 break; 377 378 default: 379 pr_warn_ratelimited("Invalid resource (%d) specified\n", 380 hp_elog->resource); 381 rc = -EINVAL; 382 } 383 384 return rc; 385 } 386 387 static void pseries_hp_work_fn(struct work_struct *work) 388 { 389 struct pseries_hp_work *hp_work = 390 container_of(work, struct pseries_hp_work, work); 391 392 handle_dlpar_errorlog(hp_work->errlog); 393 394 kfree(hp_work->errlog); 395 kfree((void *)work); 396 } 397 398 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog) 399 { 400 struct pseries_hp_work *work; 401 struct pseries_hp_errorlog *hp_errlog_copy; 402 403 hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC); 404 if (!hp_errlog_copy) 405 return; 406 407 work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC); 408 if (work) { 409 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn); 410 work->errlog = hp_errlog_copy; 411 queue_work(pseries_hp_wq, (struct work_struct *)work); 412 } else { 413 kfree(hp_errlog_copy); 414 } 415 } 416 417 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) 418 { 419 char *arg; 420 421 arg = strsep(cmd, " "); 422 if (!arg) 423 return -EINVAL; 424 425 if (sysfs_streq(arg, "memory")) { 426 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; 427 } else if (sysfs_streq(arg, "cpu")) { 428 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; 429 } else { 430 pr_err("Invalid resource specified.\n"); 431 return -EINVAL; 432 } 433 434 return 0; 435 } 436 437 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog) 438 { 439 char *arg; 440 441 arg = strsep(cmd, " "); 442 if (!arg) 443 return -EINVAL; 444 445 if (sysfs_streq(arg, "add")) { 446 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD; 447 } else if (sysfs_streq(arg, "remove")) { 448 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE; 449 } else { 450 pr_err("Invalid action specified.\n"); 451 return -EINVAL; 452 } 453 454 return 0; 455 } 456 457 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) 458 { 459 char *arg; 460 u32 count, index; 461 462 arg = strsep(cmd, " "); 463 if (!arg) 464 return -EINVAL; 465 466 if (sysfs_streq(arg, "indexed-count")) { 467 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC; 468 arg = strsep(cmd, " "); 469 if (!arg) { 470 pr_err("No DRC count specified.\n"); 471 return -EINVAL; 472 } 473 474 if (kstrtou32(arg, 0, &count)) { 475 pr_err("Invalid DRC count specified.\n"); 476 return -EINVAL; 477 } 478 479 arg = strsep(cmd, " "); 480 if (!arg) { 481 pr_err("No DRC Index specified.\n"); 482 return -EINVAL; 483 } 484 485 if (kstrtou32(arg, 0, &index)) { 486 pr_err("Invalid DRC Index specified.\n"); 487 return -EINVAL; 488 } 489 490 hp_elog->_drc_u.ic.count = cpu_to_be32(count); 491 hp_elog->_drc_u.ic.index = cpu_to_be32(index); 492 } else if (sysfs_streq(arg, "index")) { 493 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; 494 arg = strsep(cmd, " "); 495 if (!arg) { 496 pr_err("No DRC Index specified.\n"); 497 return -EINVAL; 498 } 499 500 if (kstrtou32(arg, 0, &index)) { 501 pr_err("Invalid DRC Index specified.\n"); 502 return -EINVAL; 503 } 504 505 hp_elog->_drc_u.drc_index = cpu_to_be32(index); 506 } else if (sysfs_streq(arg, "count")) { 507 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT; 508 arg = strsep(cmd, " "); 509 if (!arg) { 510 pr_err("No DRC count specified.\n"); 511 return -EINVAL; 512 } 513 514 if (kstrtou32(arg, 0, &count)) { 515 pr_err("Invalid DRC count specified.\n"); 516 return -EINVAL; 517 } 518 519 hp_elog->_drc_u.drc_count = cpu_to_be32(count); 520 } else { 521 pr_err("Invalid id_type specified.\n"); 522 return -EINVAL; 523 } 524 525 return 0; 526 } 527 528 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, 529 const char *buf, size_t count) 530 { 531 struct pseries_hp_errorlog hp_elog; 532 char *argbuf; 533 char *args; 534 int rc; 535 536 args = argbuf = kstrdup(buf, GFP_KERNEL); 537 if (!argbuf) 538 return -ENOMEM; 539 540 /* 541 * Parse out the request from the user, this will be in the form: 542 * <resource> <action> <id_type> <id> 543 */ 544 rc = dlpar_parse_resource(&args, &hp_elog); 545 if (rc) 546 goto dlpar_store_out; 547 548 rc = dlpar_parse_action(&args, &hp_elog); 549 if (rc) 550 goto dlpar_store_out; 551 552 rc = dlpar_parse_id_type(&args, &hp_elog); 553 if (rc) 554 goto dlpar_store_out; 555 556 rc = handle_dlpar_errorlog(&hp_elog); 557 558 dlpar_store_out: 559 kfree(argbuf); 560 561 if (rc) 562 pr_err("Could not handle DLPAR request \"%s\"\n", buf); 563 564 return rc ? rc : count; 565 } 566 567 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, 568 char *buf) 569 { 570 return sprintf(buf, "%s\n", "memory,cpu"); 571 } 572 573 static CLASS_ATTR_RW(dlpar); 574 575 int __init dlpar_workqueue_init(void) 576 { 577 if (pseries_hp_wq) 578 return 0; 579 580 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 581 WQ_UNBOUND, 1); 582 583 return pseries_hp_wq ? 0 : -ENOMEM; 584 } 585 586 static int __init dlpar_sysfs_init(void) 587 { 588 int rc; 589 590 rc = dlpar_workqueue_init(); 591 if (rc) 592 return rc; 593 594 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 595 } 596 machine_device_initcall(pseries, dlpar_sysfs_init); 597 598