1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support for dynamic reconfiguration for PCI, Memory, and CPU 4 * Hotplug and Dynamic Logical Partitioning on RPA platforms. 5 * 6 * Copyright (C) 2009 Nathan Fontenot 7 * Copyright (C) 2009 IBM Corporation 8 */ 9 10 #define pr_fmt(fmt) "dlpar: " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/notifier.h> 14 #include <linux/spinlock.h> 15 #include <linux/cpu.h> 16 #include <linux/slab.h> 17 #include <linux/of.h> 18 19 #include "of_helpers.h" 20 #include "pseries.h" 21 22 #include <asm/prom.h> 23 #include <asm/machdep.h> 24 #include <linux/uaccess.h> 25 #include <asm/rtas.h> 26 27 static struct workqueue_struct *pseries_hp_wq; 28 29 struct pseries_hp_work { 30 struct work_struct work; 31 struct pseries_hp_errorlog *errlog; 32 }; 33 34 struct cc_workarea { 35 __be32 drc_index; 36 __be32 zero; 37 __be32 name_offset; 38 __be32 prop_length; 39 __be32 prop_offset; 40 }; 41 42 void dlpar_free_cc_property(struct property *prop) 43 { 44 kfree(prop->name); 45 kfree(prop->value); 46 kfree(prop); 47 } 48 49 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) 50 { 51 struct property *prop; 52 char *name; 53 char *value; 54 55 prop = kzalloc(sizeof(*prop), GFP_KERNEL); 56 if (!prop) 57 return NULL; 58 59 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset); 60 prop->name = kstrdup(name, GFP_KERNEL); 61 if (!prop->name) { 62 dlpar_free_cc_property(prop); 63 return NULL; 64 } 65 66 prop->length = be32_to_cpu(ccwa->prop_length); 67 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset); 68 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 69 if (!prop->value) { 70 dlpar_free_cc_property(prop); 71 return NULL; 72 } 73 74 return prop; 75 } 76 77 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) 78 { 79 struct device_node *dn; 80 const char *name; 81 82 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 83 if (!dn) 84 return NULL; 85 86 name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset); 87 dn->full_name = kstrdup(name, GFP_KERNEL); 88 if (!dn->full_name) { 89 kfree(dn); 90 return NULL; 91 } 92 93 of_node_set_flag(dn, OF_DYNAMIC); 94 of_node_init(dn); 95 96 return dn; 97 } 98 99 static void dlpar_free_one_cc_node(struct device_node *dn) 100 { 101 struct property *prop; 102 103 while (dn->properties) { 104 prop = dn->properties; 105 dn->properties = prop->next; 106 dlpar_free_cc_property(prop); 107 } 108 109 kfree(dn->full_name); 110 kfree(dn); 111 } 112 113 void dlpar_free_cc_nodes(struct device_node *dn) 114 { 115 if (dn->child) 116 dlpar_free_cc_nodes(dn->child); 117 118 if (dn->sibling) 119 dlpar_free_cc_nodes(dn->sibling); 120 121 dlpar_free_one_cc_node(dn); 122 } 123 124 #define COMPLETE 0 125 #define NEXT_SIBLING 1 126 #define NEXT_CHILD 2 127 #define NEXT_PROPERTY 3 128 #define PREV_PARENT 4 129 #define MORE_MEMORY 5 130 #define CALL_AGAIN -2 131 #define ERR_CFG_USE -9003 132 133 struct device_node *dlpar_configure_connector(__be32 drc_index, 134 struct device_node *parent) 135 { 136 struct device_node *dn; 137 struct device_node *first_dn = NULL; 138 struct device_node *last_dn = NULL; 139 struct property *property; 140 struct property *last_property = NULL; 141 struct cc_workarea *ccwa; 142 char *data_buf; 143 int cc_token; 144 int rc = -1; 145 146 cc_token = rtas_token("ibm,configure-connector"); 147 if (cc_token == RTAS_UNKNOWN_SERVICE) 148 return NULL; 149 150 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 151 if (!data_buf) 152 return NULL; 153 154 ccwa = (struct cc_workarea *)&data_buf[0]; 155 ccwa->drc_index = drc_index; 156 ccwa->zero = 0; 157 158 do { 159 /* Since we release the rtas_data_buf lock between configure 160 * connector calls we want to re-populate the rtas_data_buffer 161 * with the contents of the previous call. 162 */ 163 spin_lock(&rtas_data_buf_lock); 164 165 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); 166 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 167 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); 168 169 spin_unlock(&rtas_data_buf_lock); 170 171 switch (rc) { 172 case COMPLETE: 173 break; 174 175 case NEXT_SIBLING: 176 dn = dlpar_parse_cc_node(ccwa); 177 if (!dn) 178 goto cc_error; 179 180 dn->parent = last_dn->parent; 181 last_dn->sibling = dn; 182 last_dn = dn; 183 break; 184 185 case NEXT_CHILD: 186 dn = dlpar_parse_cc_node(ccwa); 187 if (!dn) 188 goto cc_error; 189 190 if (!first_dn) { 191 dn->parent = parent; 192 first_dn = dn; 193 } else { 194 dn->parent = last_dn; 195 if (last_dn) 196 last_dn->child = dn; 197 } 198 199 last_dn = dn; 200 break; 201 202 case NEXT_PROPERTY: 203 property = dlpar_parse_cc_property(ccwa); 204 if (!property) 205 goto cc_error; 206 207 if (!last_dn->properties) 208 last_dn->properties = property; 209 else 210 last_property->next = property; 211 212 last_property = property; 213 break; 214 215 case PREV_PARENT: 216 last_dn = last_dn->parent; 217 break; 218 219 case CALL_AGAIN: 220 break; 221 222 case MORE_MEMORY: 223 case ERR_CFG_USE: 224 default: 225 printk(KERN_ERR "Unexpected Error (%d) " 226 "returned from configure-connector\n", rc); 227 goto cc_error; 228 } 229 } while (rc); 230 231 cc_error: 232 kfree(data_buf); 233 234 if (rc) { 235 if (first_dn) 236 dlpar_free_cc_nodes(first_dn); 237 238 return NULL; 239 } 240 241 return first_dn; 242 } 243 244 int dlpar_attach_node(struct device_node *dn, struct device_node *parent) 245 { 246 int rc; 247 248 dn->parent = parent; 249 250 rc = of_attach_node(dn); 251 if (rc) { 252 printk(KERN_ERR "Failed to add device node %pOF\n", dn); 253 return rc; 254 } 255 256 return 0; 257 } 258 259 int dlpar_detach_node(struct device_node *dn) 260 { 261 struct device_node *child; 262 int rc; 263 264 child = of_get_next_child(dn, NULL); 265 while (child) { 266 dlpar_detach_node(child); 267 child = of_get_next_child(dn, child); 268 } 269 270 rc = of_detach_node(dn); 271 if (rc) 272 return rc; 273 274 of_node_put(dn); 275 276 return 0; 277 } 278 279 #define DR_ENTITY_SENSE 9003 280 #define DR_ENTITY_PRESENT 1 281 #define DR_ENTITY_UNUSABLE 2 282 #define ALLOCATION_STATE 9003 283 #define ALLOC_UNUSABLE 0 284 #define ALLOC_USABLE 1 285 #define ISOLATION_STATE 9001 286 #define ISOLATE 0 287 #define UNISOLATE 1 288 289 int dlpar_acquire_drc(u32 drc_index) 290 { 291 int dr_status, rc; 292 293 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 294 DR_ENTITY_SENSE, drc_index); 295 if (rc || dr_status != DR_ENTITY_UNUSABLE) 296 return -1; 297 298 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); 299 if (rc) 300 return rc; 301 302 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 303 if (rc) { 304 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 305 return rc; 306 } 307 308 return 0; 309 } 310 311 int dlpar_release_drc(u32 drc_index) 312 { 313 int dr_status, rc; 314 315 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 316 DR_ENTITY_SENSE, drc_index); 317 if (rc || dr_status != DR_ENTITY_PRESENT) 318 return -1; 319 320 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); 321 if (rc) 322 return rc; 323 324 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 325 if (rc) { 326 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 327 return rc; 328 } 329 330 return 0; 331 } 332 333 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) 334 { 335 int rc; 336 337 /* pseries error logs are in BE format, convert to cpu type */ 338 switch (hp_elog->id_type) { 339 case PSERIES_HP_ELOG_ID_DRC_COUNT: 340 hp_elog->_drc_u.drc_count = 341 be32_to_cpu(hp_elog->_drc_u.drc_count); 342 break; 343 case PSERIES_HP_ELOG_ID_DRC_INDEX: 344 hp_elog->_drc_u.drc_index = 345 be32_to_cpu(hp_elog->_drc_u.drc_index); 346 break; 347 case PSERIES_HP_ELOG_ID_DRC_IC: 348 hp_elog->_drc_u.ic.count = 349 be32_to_cpu(hp_elog->_drc_u.ic.count); 350 hp_elog->_drc_u.ic.index = 351 be32_to_cpu(hp_elog->_drc_u.ic.index); 352 } 353 354 switch (hp_elog->resource) { 355 case PSERIES_HP_ELOG_RESOURCE_MEM: 356 rc = dlpar_memory(hp_elog); 357 break; 358 case PSERIES_HP_ELOG_RESOURCE_CPU: 359 rc = dlpar_cpu(hp_elog); 360 break; 361 case PSERIES_HP_ELOG_RESOURCE_PMEM: 362 rc = dlpar_hp_pmem(hp_elog); 363 break; 364 365 default: 366 pr_warn_ratelimited("Invalid resource (%d) specified\n", 367 hp_elog->resource); 368 rc = -EINVAL; 369 } 370 371 return rc; 372 } 373 374 static void pseries_hp_work_fn(struct work_struct *work) 375 { 376 struct pseries_hp_work *hp_work = 377 container_of(work, struct pseries_hp_work, work); 378 379 handle_dlpar_errorlog(hp_work->errlog); 380 381 kfree(hp_work->errlog); 382 kfree((void *)work); 383 } 384 385 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog) 386 { 387 struct pseries_hp_work *work; 388 struct pseries_hp_errorlog *hp_errlog_copy; 389 390 hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC); 391 if (!hp_errlog_copy) 392 return; 393 394 work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC); 395 if (work) { 396 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn); 397 work->errlog = hp_errlog_copy; 398 queue_work(pseries_hp_wq, (struct work_struct *)work); 399 } else { 400 kfree(hp_errlog_copy); 401 } 402 } 403 404 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog) 405 { 406 char *arg; 407 408 arg = strsep(cmd, " "); 409 if (!arg) 410 return -EINVAL; 411 412 if (sysfs_streq(arg, "memory")) { 413 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM; 414 } else if (sysfs_streq(arg, "cpu")) { 415 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU; 416 } else { 417 pr_err("Invalid resource specified.\n"); 418 return -EINVAL; 419 } 420 421 return 0; 422 } 423 424 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog) 425 { 426 char *arg; 427 428 arg = strsep(cmd, " "); 429 if (!arg) 430 return -EINVAL; 431 432 if (sysfs_streq(arg, "add")) { 433 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD; 434 } else if (sysfs_streq(arg, "remove")) { 435 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE; 436 } else { 437 pr_err("Invalid action specified.\n"); 438 return -EINVAL; 439 } 440 441 return 0; 442 } 443 444 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) 445 { 446 char *arg; 447 u32 count, index; 448 449 arg = strsep(cmd, " "); 450 if (!arg) 451 return -EINVAL; 452 453 if (sysfs_streq(arg, "indexed-count")) { 454 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC; 455 arg = strsep(cmd, " "); 456 if (!arg) { 457 pr_err("No DRC count specified.\n"); 458 return -EINVAL; 459 } 460 461 if (kstrtou32(arg, 0, &count)) { 462 pr_err("Invalid DRC count specified.\n"); 463 return -EINVAL; 464 } 465 466 arg = strsep(cmd, " "); 467 if (!arg) { 468 pr_err("No DRC Index specified.\n"); 469 return -EINVAL; 470 } 471 472 if (kstrtou32(arg, 0, &index)) { 473 pr_err("Invalid DRC Index specified.\n"); 474 return -EINVAL; 475 } 476 477 hp_elog->_drc_u.ic.count = cpu_to_be32(count); 478 hp_elog->_drc_u.ic.index = cpu_to_be32(index); 479 } else if (sysfs_streq(arg, "index")) { 480 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; 481 arg = strsep(cmd, " "); 482 if (!arg) { 483 pr_err("No DRC Index specified.\n"); 484 return -EINVAL; 485 } 486 487 if (kstrtou32(arg, 0, &index)) { 488 pr_err("Invalid DRC Index specified.\n"); 489 return -EINVAL; 490 } 491 492 hp_elog->_drc_u.drc_index = cpu_to_be32(index); 493 } else if (sysfs_streq(arg, "count")) { 494 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT; 495 arg = strsep(cmd, " "); 496 if (!arg) { 497 pr_err("No DRC count specified.\n"); 498 return -EINVAL; 499 } 500 501 if (kstrtou32(arg, 0, &count)) { 502 pr_err("Invalid DRC count specified.\n"); 503 return -EINVAL; 504 } 505 506 hp_elog->_drc_u.drc_count = cpu_to_be32(count); 507 } else { 508 pr_err("Invalid id_type specified.\n"); 509 return -EINVAL; 510 } 511 512 return 0; 513 } 514 515 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr, 516 const char *buf, size_t count) 517 { 518 struct pseries_hp_errorlog hp_elog; 519 char *argbuf; 520 char *args; 521 int rc; 522 523 args = argbuf = kstrdup(buf, GFP_KERNEL); 524 if (!argbuf) { 525 pr_info("Could not allocate resources for DLPAR operation\n"); 526 kfree(argbuf); 527 return -ENOMEM; 528 } 529 530 /* 531 * Parse out the request from the user, this will be in the form: 532 * <resource> <action> <id_type> <id> 533 */ 534 rc = dlpar_parse_resource(&args, &hp_elog); 535 if (rc) 536 goto dlpar_store_out; 537 538 rc = dlpar_parse_action(&args, &hp_elog); 539 if (rc) 540 goto dlpar_store_out; 541 542 rc = dlpar_parse_id_type(&args, &hp_elog); 543 if (rc) 544 goto dlpar_store_out; 545 546 rc = handle_dlpar_errorlog(&hp_elog); 547 548 dlpar_store_out: 549 kfree(argbuf); 550 551 if (rc) 552 pr_err("Could not handle DLPAR request \"%s\"\n", buf); 553 554 return rc ? rc : count; 555 } 556 557 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, 558 char *buf) 559 { 560 return sprintf(buf, "%s\n", "memory,cpu"); 561 } 562 563 static CLASS_ATTR_RW(dlpar); 564 565 int __init dlpar_workqueue_init(void) 566 { 567 if (pseries_hp_wq) 568 return 0; 569 570 pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", 571 WQ_UNBOUND, 1); 572 573 return pseries_hp_wq ? 0 : -ENOMEM; 574 } 575 576 static int __init dlpar_sysfs_init(void) 577 { 578 int rc; 579 580 rc = dlpar_workqueue_init(); 581 if (rc) 582 return rc; 583 584 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); 585 } 586 machine_device_initcall(pseries, dlpar_sysfs_init); 587 588