1 /* 2 * Support for dynamic reconfiguration for PCI, Memory, and CPU 3 * Hotplug and Dynamic Logical Partitioning on RPA platforms. 4 * 5 * Copyright (C) 2009 Nathan Fontenot 6 * Copyright (C) 2009 IBM Corporation 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/kref.h> 15 #include <linux/notifier.h> 16 #include <linux/spinlock.h> 17 #include <linux/cpu.h> 18 #include <linux/slab.h> 19 #include <linux/of.h> 20 #include "offline_states.h" 21 22 #include <asm/prom.h> 23 #include <asm/machdep.h> 24 #include <asm/uaccess.h> 25 #include <asm/rtas.h> 26 27 struct cc_workarea { 28 u32 drc_index; 29 u32 zero; 30 u32 name_offset; 31 u32 prop_length; 32 u32 prop_offset; 33 }; 34 35 void dlpar_free_cc_property(struct property *prop) 36 { 37 kfree(prop->name); 38 kfree(prop->value); 39 kfree(prop); 40 } 41 42 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) 43 { 44 struct property *prop; 45 char *name; 46 char *value; 47 48 prop = kzalloc(sizeof(*prop), GFP_KERNEL); 49 if (!prop) 50 return NULL; 51 52 name = (char *)ccwa + ccwa->name_offset; 53 prop->name = kstrdup(name, GFP_KERNEL); 54 55 prop->length = ccwa->prop_length; 56 value = (char *)ccwa + ccwa->prop_offset; 57 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 58 if (!prop->value) { 59 dlpar_free_cc_property(prop); 60 return NULL; 61 } 62 63 return prop; 64 } 65 66 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, 67 const char *path) 68 { 69 struct device_node *dn; 70 char *name; 71 72 /* If parent node path is "/" advance path to NULL terminator to 73 * prevent double leading slashs in full_name. 74 */ 75 if (!path[1]) 76 path++; 77 78 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 79 if (!dn) 80 return NULL; 81 82 name = (char *)ccwa + ccwa->name_offset; 83 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); 84 if (!dn->full_name) { 85 kfree(dn); 86 return NULL; 87 } 88 89 of_node_set_flag(dn, OF_DYNAMIC); 90 kref_init(&dn->kref); 91 92 return dn; 93 } 94 95 static void dlpar_free_one_cc_node(struct device_node *dn) 96 { 97 struct property *prop; 98 99 while (dn->properties) { 100 prop = dn->properties; 101 dn->properties = prop->next; 102 dlpar_free_cc_property(prop); 103 } 104 105 kfree(dn->full_name); 106 kfree(dn); 107 } 108 109 void dlpar_free_cc_nodes(struct device_node *dn) 110 { 111 if (dn->child) 112 dlpar_free_cc_nodes(dn->child); 113 114 if (dn->sibling) 115 dlpar_free_cc_nodes(dn->sibling); 116 117 dlpar_free_one_cc_node(dn); 118 } 119 120 #define COMPLETE 0 121 #define NEXT_SIBLING 1 122 #define NEXT_CHILD 2 123 #define NEXT_PROPERTY 3 124 #define PREV_PARENT 4 125 #define MORE_MEMORY 5 126 #define CALL_AGAIN -2 127 #define ERR_CFG_USE -9003 128 129 struct device_node *dlpar_configure_connector(u32 drc_index, 130 struct device_node *parent) 131 { 132 struct device_node *dn; 133 struct device_node *first_dn = NULL; 134 struct device_node *last_dn = NULL; 135 struct property *property; 136 struct property *last_property = NULL; 137 struct cc_workarea *ccwa; 138 char *data_buf; 139 const char *parent_path = parent->full_name; 140 int cc_token; 141 int rc = -1; 142 143 cc_token = rtas_token("ibm,configure-connector"); 144 if (cc_token == RTAS_UNKNOWN_SERVICE) 145 return NULL; 146 147 data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); 148 if (!data_buf) 149 return NULL; 150 151 ccwa = (struct cc_workarea *)&data_buf[0]; 152 ccwa->drc_index = drc_index; 153 ccwa->zero = 0; 154 155 do { 156 /* Since we release the rtas_data_buf lock between configure 157 * connector calls we want to re-populate the rtas_data_buffer 158 * with the contents of the previous call. 159 */ 160 spin_lock(&rtas_data_buf_lock); 161 162 memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); 163 rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); 164 memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); 165 166 spin_unlock(&rtas_data_buf_lock); 167 168 switch (rc) { 169 case COMPLETE: 170 break; 171 172 case NEXT_SIBLING: 173 dn = dlpar_parse_cc_node(ccwa, parent_path); 174 if (!dn) 175 goto cc_error; 176 177 dn->parent = last_dn->parent; 178 last_dn->sibling = dn; 179 last_dn = dn; 180 break; 181 182 case NEXT_CHILD: 183 if (first_dn) 184 parent_path = last_dn->full_name; 185 186 dn = dlpar_parse_cc_node(ccwa, parent_path); 187 if (!dn) 188 goto cc_error; 189 190 if (!first_dn) { 191 dn->parent = parent; 192 first_dn = dn; 193 } else { 194 dn->parent = last_dn; 195 if (last_dn) 196 last_dn->child = dn; 197 } 198 199 last_dn = dn; 200 break; 201 202 case NEXT_PROPERTY: 203 property = dlpar_parse_cc_property(ccwa); 204 if (!property) 205 goto cc_error; 206 207 if (!last_dn->properties) 208 last_dn->properties = property; 209 else 210 last_property->next = property; 211 212 last_property = property; 213 break; 214 215 case PREV_PARENT: 216 last_dn = last_dn->parent; 217 parent_path = last_dn->parent->full_name; 218 break; 219 220 case CALL_AGAIN: 221 break; 222 223 case MORE_MEMORY: 224 case ERR_CFG_USE: 225 default: 226 printk(KERN_ERR "Unexpected Error (%d) " 227 "returned from configure-connector\n", rc); 228 goto cc_error; 229 } 230 } while (rc); 231 232 cc_error: 233 kfree(data_buf); 234 235 if (rc) { 236 if (first_dn) 237 dlpar_free_cc_nodes(first_dn); 238 239 return NULL; 240 } 241 242 return first_dn; 243 } 244 245 static struct device_node *derive_parent(const char *path) 246 { 247 struct device_node *parent; 248 char *last_slash; 249 250 last_slash = strrchr(path, '/'); 251 if (last_slash == path) { 252 parent = of_find_node_by_path("/"); 253 } else { 254 char *parent_path; 255 int parent_path_len = last_slash - path + 1; 256 parent_path = kmalloc(parent_path_len, GFP_KERNEL); 257 if (!parent_path) 258 return NULL; 259 260 strlcpy(parent_path, path, parent_path_len); 261 parent = of_find_node_by_path(parent_path); 262 kfree(parent_path); 263 } 264 265 return parent; 266 } 267 268 int dlpar_attach_node(struct device_node *dn) 269 { 270 int rc; 271 272 dn->parent = derive_parent(dn->full_name); 273 if (!dn->parent) 274 return -ENOMEM; 275 276 rc = of_attach_node(dn); 277 if (rc) { 278 printk(KERN_ERR "Failed to add device node %s\n", 279 dn->full_name); 280 return rc; 281 } 282 283 of_node_put(dn->parent); 284 return 0; 285 } 286 287 int dlpar_detach_node(struct device_node *dn) 288 { 289 struct device_node *child; 290 int rc; 291 292 child = of_get_next_child(dn, NULL); 293 while (child) { 294 dlpar_detach_node(child); 295 child = of_get_next_child(dn, child); 296 } 297 298 rc = of_detach_node(dn); 299 if (rc) 300 return rc; 301 302 of_node_put(dn); /* Must decrement the refcount */ 303 return 0; 304 } 305 306 #define DR_ENTITY_SENSE 9003 307 #define DR_ENTITY_PRESENT 1 308 #define DR_ENTITY_UNUSABLE 2 309 #define ALLOCATION_STATE 9003 310 #define ALLOC_UNUSABLE 0 311 #define ALLOC_USABLE 1 312 #define ISOLATION_STATE 9001 313 #define ISOLATE 0 314 #define UNISOLATE 1 315 316 int dlpar_acquire_drc(u32 drc_index) 317 { 318 int dr_status, rc; 319 320 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 321 DR_ENTITY_SENSE, drc_index); 322 if (rc || dr_status != DR_ENTITY_UNUSABLE) 323 return -1; 324 325 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE); 326 if (rc) 327 return rc; 328 329 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 330 if (rc) { 331 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 332 return rc; 333 } 334 335 return 0; 336 } 337 338 int dlpar_release_drc(u32 drc_index) 339 { 340 int dr_status, rc; 341 342 rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status, 343 DR_ENTITY_SENSE, drc_index); 344 if (rc || dr_status != DR_ENTITY_PRESENT) 345 return -1; 346 347 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE); 348 if (rc) 349 return rc; 350 351 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE); 352 if (rc) { 353 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE); 354 return rc; 355 } 356 357 return 0; 358 } 359 360 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 361 362 static int dlpar_online_cpu(struct device_node *dn) 363 { 364 int rc = 0; 365 unsigned int cpu; 366 int len, nthreads, i; 367 const u32 *intserv; 368 369 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 370 if (!intserv) 371 return -EINVAL; 372 373 nthreads = len / sizeof(u32); 374 375 cpu_maps_update_begin(); 376 for (i = 0; i < nthreads; i++) { 377 for_each_present_cpu(cpu) { 378 if (get_hard_smp_processor_id(cpu) != intserv[i]) 379 continue; 380 BUG_ON(get_cpu_current_state(cpu) 381 != CPU_STATE_OFFLINE); 382 cpu_maps_update_done(); 383 rc = cpu_up(cpu); 384 if (rc) 385 goto out; 386 cpu_maps_update_begin(); 387 388 break; 389 } 390 if (cpu == num_possible_cpus()) 391 printk(KERN_WARNING "Could not find cpu to online " 392 "with physical id 0x%x\n", intserv[i]); 393 } 394 cpu_maps_update_done(); 395 396 out: 397 return rc; 398 399 } 400 401 static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 402 { 403 struct device_node *dn, *parent; 404 unsigned long drc_index; 405 int rc; 406 407 cpu_hotplug_driver_lock(); 408 rc = strict_strtoul(buf, 0, &drc_index); 409 if (rc) { 410 rc = -EINVAL; 411 goto out; 412 } 413 414 parent = of_find_node_by_path("/cpus"); 415 if (!parent) { 416 rc = -ENODEV; 417 goto out; 418 } 419 420 dn = dlpar_configure_connector(drc_index, parent); 421 if (!dn) { 422 rc = -EINVAL; 423 goto out; 424 } 425 426 of_node_put(parent); 427 428 rc = dlpar_acquire_drc(drc_index); 429 if (rc) { 430 dlpar_free_cc_nodes(dn); 431 rc = -EINVAL; 432 goto out; 433 } 434 435 rc = dlpar_attach_node(dn); 436 if (rc) { 437 dlpar_release_drc(drc_index); 438 dlpar_free_cc_nodes(dn); 439 goto out; 440 } 441 442 rc = dlpar_online_cpu(dn); 443 out: 444 cpu_hotplug_driver_unlock(); 445 446 return rc ? rc : count; 447 } 448 449 static int dlpar_offline_cpu(struct device_node *dn) 450 { 451 int rc = 0; 452 unsigned int cpu; 453 int len, nthreads, i; 454 const u32 *intserv; 455 456 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len); 457 if (!intserv) 458 return -EINVAL; 459 460 nthreads = len / sizeof(u32); 461 462 cpu_maps_update_begin(); 463 for (i = 0; i < nthreads; i++) { 464 for_each_present_cpu(cpu) { 465 if (get_hard_smp_processor_id(cpu) != intserv[i]) 466 continue; 467 468 if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE) 469 break; 470 471 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 472 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 473 cpu_maps_update_done(); 474 rc = cpu_down(cpu); 475 if (rc) 476 goto out; 477 cpu_maps_update_begin(); 478 break; 479 480 } 481 482 /* 483 * The cpu is in CPU_STATE_INACTIVE. 484 * Upgrade it's state to CPU_STATE_OFFLINE. 485 */ 486 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 487 BUG_ON(plpar_hcall_norets(H_PROD, intserv[i]) 488 != H_SUCCESS); 489 __cpu_die(cpu); 490 break; 491 } 492 if (cpu == num_possible_cpus()) 493 printk(KERN_WARNING "Could not find cpu to offline " 494 "with physical id 0x%x\n", intserv[i]); 495 } 496 cpu_maps_update_done(); 497 498 out: 499 return rc; 500 501 } 502 503 static ssize_t dlpar_cpu_release(const char *buf, size_t count) 504 { 505 struct device_node *dn; 506 const u32 *drc_index; 507 int rc; 508 509 dn = of_find_node_by_path(buf); 510 if (!dn) 511 return -EINVAL; 512 513 drc_index = of_get_property(dn, "ibm,my-drc-index", NULL); 514 if (!drc_index) { 515 of_node_put(dn); 516 return -EINVAL; 517 } 518 519 cpu_hotplug_driver_lock(); 520 rc = dlpar_offline_cpu(dn); 521 if (rc) { 522 of_node_put(dn); 523 rc = -EINVAL; 524 goto out; 525 } 526 527 rc = dlpar_release_drc(*drc_index); 528 if (rc) { 529 of_node_put(dn); 530 goto out; 531 } 532 533 rc = dlpar_detach_node(dn); 534 if (rc) { 535 dlpar_acquire_drc(*drc_index); 536 goto out; 537 } 538 539 of_node_put(dn); 540 out: 541 cpu_hotplug_driver_unlock(); 542 return rc ? rc : count; 543 } 544 545 static int __init pseries_dlpar_init(void) 546 { 547 ppc_md.cpu_probe = dlpar_cpu_probe; 548 ppc_md.cpu_release = dlpar_cpu_release; 549 550 return 0; 551 } 552 machine_device_initcall(pseries, pseries_dlpar_init); 553 554 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ 555