1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for dynamic reconfiguration for PCI, Memory, and CPU
4 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 *
6 * Copyright (C) 2009 Nathan Fontenot
7 * Copyright (C) 2009 IBM Corporation
8 */
9
10 #define pr_fmt(fmt) "dlpar: " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/cpu.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18
19 #include "of_helpers.h"
20 #include "pseries.h"
21
22 #include <asm/machdep.h>
23 #include <linux/uaccess.h>
24 #include <asm/rtas.h>
25 #include <asm/rtas-work-area.h>
26
27 static struct workqueue_struct *pseries_hp_wq;
28
29 struct pseries_hp_work {
30 struct work_struct work;
31 struct pseries_hp_errorlog *errlog;
32 };
33
34 struct cc_workarea {
35 __be32 drc_index;
36 __be32 zero;
37 __be32 name_offset;
38 __be32 prop_length;
39 __be32 prop_offset;
40 };
41
dlpar_free_cc_property(struct property * prop)42 void dlpar_free_cc_property(struct property *prop)
43 {
44 kfree(prop->name);
45 kfree(prop->value);
46 kfree(prop);
47 }
48
dlpar_parse_cc_property(struct cc_workarea * ccwa)49 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
50 {
51 struct property *prop;
52 char *name;
53 char *value;
54
55 prop = kzalloc(sizeof(*prop), GFP_KERNEL);
56 if (!prop)
57 return NULL;
58
59 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
60 prop->name = kstrdup(name, GFP_KERNEL);
61 if (!prop->name) {
62 dlpar_free_cc_property(prop);
63 return NULL;
64 }
65
66 prop->length = be32_to_cpu(ccwa->prop_length);
67 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
68 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
69 if (!prop->value) {
70 dlpar_free_cc_property(prop);
71 return NULL;
72 }
73
74 return prop;
75 }
76
dlpar_parse_cc_node(struct cc_workarea * ccwa)77 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
78 {
79 struct device_node *dn;
80 const char *name;
81
82 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
83 if (!dn)
84 return NULL;
85
86 name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
87 dn->full_name = kstrdup(name, GFP_KERNEL);
88 if (!dn->full_name) {
89 kfree(dn);
90 return NULL;
91 }
92
93 of_node_set_flag(dn, OF_DYNAMIC);
94 of_node_init(dn);
95
96 return dn;
97 }
98
dlpar_free_one_cc_node(struct device_node * dn)99 static void dlpar_free_one_cc_node(struct device_node *dn)
100 {
101 struct property *prop;
102
103 while (dn->properties) {
104 prop = dn->properties;
105 dn->properties = prop->next;
106 dlpar_free_cc_property(prop);
107 }
108
109 kfree(dn->full_name);
110 kfree(dn);
111 }
112
dlpar_free_cc_nodes(struct device_node * dn)113 void dlpar_free_cc_nodes(struct device_node *dn)
114 {
115 if (dn->child)
116 dlpar_free_cc_nodes(dn->child);
117
118 if (dn->sibling)
119 dlpar_free_cc_nodes(dn->sibling);
120
121 dlpar_free_one_cc_node(dn);
122 }
123
124 #define COMPLETE 0
125 #define NEXT_SIBLING 1
126 #define NEXT_CHILD 2
127 #define NEXT_PROPERTY 3
128 #define PREV_PARENT 4
129 #define MORE_MEMORY 5
130 #define ERR_CFG_USE -9003
131
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)132 struct device_node *dlpar_configure_connector(__be32 drc_index,
133 struct device_node *parent)
134 {
135 struct device_node *dn;
136 struct device_node *first_dn = NULL;
137 struct device_node *last_dn = NULL;
138 struct property *property;
139 struct property *last_property = NULL;
140 struct cc_workarea *ccwa;
141 struct rtas_work_area *work_area;
142 char *data_buf;
143 int cc_token;
144 int rc = -1;
145
146 cc_token = rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
147 if (cc_token == RTAS_UNKNOWN_SERVICE)
148 return NULL;
149
150 work_area = rtas_work_area_alloc(SZ_4K);
151 data_buf = rtas_work_area_raw_buf(work_area);
152
153 ccwa = (struct cc_workarea *)&data_buf[0];
154 ccwa->drc_index = drc_index;
155 ccwa->zero = 0;
156
157 do {
158 do {
159 rc = rtas_call(cc_token, 2, 1, NULL,
160 rtas_work_area_phys(work_area), NULL);
161 } while (rtas_busy_delay(rc));
162
163 switch (rc) {
164 case COMPLETE:
165 break;
166
167 case NEXT_SIBLING:
168 dn = dlpar_parse_cc_node(ccwa);
169 if (!dn)
170 goto cc_error;
171
172 dn->parent = last_dn->parent;
173 last_dn->sibling = dn;
174 last_dn = dn;
175 break;
176
177 case NEXT_CHILD:
178 dn = dlpar_parse_cc_node(ccwa);
179 if (!dn)
180 goto cc_error;
181
182 if (!first_dn) {
183 dn->parent = parent;
184 first_dn = dn;
185 } else {
186 dn->parent = last_dn;
187 if (last_dn)
188 last_dn->child = dn;
189 }
190
191 last_dn = dn;
192 break;
193
194 case NEXT_PROPERTY:
195 property = dlpar_parse_cc_property(ccwa);
196 if (!property)
197 goto cc_error;
198
199 if (!last_dn->properties)
200 last_dn->properties = property;
201 else
202 last_property->next = property;
203
204 last_property = property;
205 break;
206
207 case PREV_PARENT:
208 last_dn = last_dn->parent;
209 break;
210
211 case MORE_MEMORY:
212 case ERR_CFG_USE:
213 default:
214 printk(KERN_ERR "Unexpected Error (%d) "
215 "returned from configure-connector\n", rc);
216 goto cc_error;
217 }
218 } while (rc);
219
220 cc_error:
221 rtas_work_area_free(work_area);
222
223 if (rc) {
224 if (first_dn)
225 dlpar_free_cc_nodes(first_dn);
226
227 return NULL;
228 }
229
230 return first_dn;
231 }
232
dlpar_attach_node(struct device_node * dn,struct device_node * parent)233 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
234 {
235 int rc;
236
237 dn->parent = parent;
238
239 rc = of_attach_node(dn);
240 if (rc) {
241 printk(KERN_ERR "Failed to add device node %pOF\n", dn);
242 return rc;
243 }
244
245 return 0;
246 }
247
dlpar_detach_node(struct device_node * dn)248 int dlpar_detach_node(struct device_node *dn)
249 {
250 struct device_node *child;
251 int rc;
252
253 child = of_get_next_child(dn, NULL);
254 while (child) {
255 dlpar_detach_node(child);
256 child = of_get_next_child(dn, child);
257 }
258
259 rc = of_detach_node(dn);
260 if (rc)
261 return rc;
262
263 of_node_put(dn);
264
265 return 0;
266 }
267
268 #define DR_ENTITY_SENSE 9003
269 #define DR_ENTITY_PRESENT 1
270 #define DR_ENTITY_UNUSABLE 2
271 #define ALLOCATION_STATE 9003
272 #define ALLOC_UNUSABLE 0
273 #define ALLOC_USABLE 1
274 #define ISOLATION_STATE 9001
275 #define ISOLATE 0
276 #define UNISOLATE 1
277
dlpar_acquire_drc(u32 drc_index)278 int dlpar_acquire_drc(u32 drc_index)
279 {
280 int dr_status, rc;
281
282 rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
283 if (rc || dr_status != DR_ENTITY_UNUSABLE)
284 return -1;
285
286 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
287 if (rc)
288 return rc;
289
290 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
291 if (rc) {
292 rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
293 return rc;
294 }
295
296 return 0;
297 }
298
dlpar_release_drc(u32 drc_index)299 int dlpar_release_drc(u32 drc_index)
300 {
301 int dr_status, rc;
302
303 rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
304 if (rc || dr_status != DR_ENTITY_PRESENT)
305 return -1;
306
307 rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
308 if (rc)
309 return rc;
310
311 rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
312 if (rc) {
313 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
314 return rc;
315 }
316
317 return 0;
318 }
319
dlpar_unisolate_drc(u32 drc_index)320 int dlpar_unisolate_drc(u32 drc_index)
321 {
322 int dr_status, rc;
323
324 rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
325 if (rc || dr_status != DR_ENTITY_PRESENT)
326 return -1;
327
328 rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
329
330 return 0;
331 }
332
handle_dlpar_errorlog(struct pseries_hp_errorlog * hp_elog)333 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
334 {
335 int rc;
336
337 switch (hp_elog->resource) {
338 case PSERIES_HP_ELOG_RESOURCE_MEM:
339 rc = dlpar_memory(hp_elog);
340 break;
341 case PSERIES_HP_ELOG_RESOURCE_CPU:
342 rc = dlpar_cpu(hp_elog);
343 break;
344 case PSERIES_HP_ELOG_RESOURCE_PMEM:
345 rc = dlpar_hp_pmem(hp_elog);
346 break;
347
348 default:
349 pr_warn_ratelimited("Invalid resource (%d) specified\n",
350 hp_elog->resource);
351 rc = -EINVAL;
352 }
353
354 return rc;
355 }
356
pseries_hp_work_fn(struct work_struct * work)357 static void pseries_hp_work_fn(struct work_struct *work)
358 {
359 struct pseries_hp_work *hp_work =
360 container_of(work, struct pseries_hp_work, work);
361
362 handle_dlpar_errorlog(hp_work->errlog);
363
364 kfree(hp_work->errlog);
365 kfree(work);
366 }
367
queue_hotplug_event(struct pseries_hp_errorlog * hp_errlog)368 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
369 {
370 struct pseries_hp_work *work;
371 struct pseries_hp_errorlog *hp_errlog_copy;
372
373 hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
374 if (!hp_errlog_copy)
375 return;
376
377 work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
378 if (work) {
379 INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
380 work->errlog = hp_errlog_copy;
381 queue_work(pseries_hp_wq, (struct work_struct *)work);
382 } else {
383 kfree(hp_errlog_copy);
384 }
385 }
386
dlpar_parse_resource(char ** cmd,struct pseries_hp_errorlog * hp_elog)387 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
388 {
389 char *arg;
390
391 arg = strsep(cmd, " ");
392 if (!arg)
393 return -EINVAL;
394
395 if (sysfs_streq(arg, "memory")) {
396 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
397 } else if (sysfs_streq(arg, "cpu")) {
398 hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
399 } else {
400 pr_err("Invalid resource specified.\n");
401 return -EINVAL;
402 }
403
404 return 0;
405 }
406
dlpar_parse_action(char ** cmd,struct pseries_hp_errorlog * hp_elog)407 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
408 {
409 char *arg;
410
411 arg = strsep(cmd, " ");
412 if (!arg)
413 return -EINVAL;
414
415 if (sysfs_streq(arg, "add")) {
416 hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
417 } else if (sysfs_streq(arg, "remove")) {
418 hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
419 } else {
420 pr_err("Invalid action specified.\n");
421 return -EINVAL;
422 }
423
424 return 0;
425 }
426
dlpar_parse_id_type(char ** cmd,struct pseries_hp_errorlog * hp_elog)427 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
428 {
429 char *arg;
430 u32 count, index;
431
432 arg = strsep(cmd, " ");
433 if (!arg)
434 return -EINVAL;
435
436 if (sysfs_streq(arg, "indexed-count")) {
437 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
438 arg = strsep(cmd, " ");
439 if (!arg) {
440 pr_err("No DRC count specified.\n");
441 return -EINVAL;
442 }
443
444 if (kstrtou32(arg, 0, &count)) {
445 pr_err("Invalid DRC count specified.\n");
446 return -EINVAL;
447 }
448
449 arg = strsep(cmd, " ");
450 if (!arg) {
451 pr_err("No DRC Index specified.\n");
452 return -EINVAL;
453 }
454
455 if (kstrtou32(arg, 0, &index)) {
456 pr_err("Invalid DRC Index specified.\n");
457 return -EINVAL;
458 }
459
460 hp_elog->_drc_u.ic.count = cpu_to_be32(count);
461 hp_elog->_drc_u.ic.index = cpu_to_be32(index);
462 } else if (sysfs_streq(arg, "index")) {
463 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
464 arg = strsep(cmd, " ");
465 if (!arg) {
466 pr_err("No DRC Index specified.\n");
467 return -EINVAL;
468 }
469
470 if (kstrtou32(arg, 0, &index)) {
471 pr_err("Invalid DRC Index specified.\n");
472 return -EINVAL;
473 }
474
475 hp_elog->_drc_u.drc_index = cpu_to_be32(index);
476 } else if (sysfs_streq(arg, "count")) {
477 hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
478 arg = strsep(cmd, " ");
479 if (!arg) {
480 pr_err("No DRC count specified.\n");
481 return -EINVAL;
482 }
483
484 if (kstrtou32(arg, 0, &count)) {
485 pr_err("Invalid DRC count specified.\n");
486 return -EINVAL;
487 }
488
489 hp_elog->_drc_u.drc_count = cpu_to_be32(count);
490 } else {
491 pr_err("Invalid id_type specified.\n");
492 return -EINVAL;
493 }
494
495 return 0;
496 }
497
dlpar_store(const struct class * class,const struct class_attribute * attr,const char * buf,size_t count)498 static ssize_t dlpar_store(const struct class *class, const struct class_attribute *attr,
499 const char *buf, size_t count)
500 {
501 struct pseries_hp_errorlog hp_elog;
502 char *argbuf;
503 char *args;
504 int rc;
505
506 args = argbuf = kstrdup(buf, GFP_KERNEL);
507 if (!argbuf)
508 return -ENOMEM;
509
510 /*
511 * Parse out the request from the user, this will be in the form:
512 * <resource> <action> <id_type> <id>
513 */
514 rc = dlpar_parse_resource(&args, &hp_elog);
515 if (rc)
516 goto dlpar_store_out;
517
518 rc = dlpar_parse_action(&args, &hp_elog);
519 if (rc)
520 goto dlpar_store_out;
521
522 rc = dlpar_parse_id_type(&args, &hp_elog);
523 if (rc)
524 goto dlpar_store_out;
525
526 rc = handle_dlpar_errorlog(&hp_elog);
527
528 dlpar_store_out:
529 kfree(argbuf);
530
531 if (rc)
532 pr_err("Could not handle DLPAR request \"%s\"\n", buf);
533
534 return rc ? rc : count;
535 }
536
dlpar_show(const struct class * class,const struct class_attribute * attr,char * buf)537 static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
538 char *buf)
539 {
540 return sprintf(buf, "%s\n", "memory,cpu");
541 }
542
543 static CLASS_ATTR_RW(dlpar);
544
dlpar_workqueue_init(void)545 int __init dlpar_workqueue_init(void)
546 {
547 if (pseries_hp_wq)
548 return 0;
549
550 pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
551
552 return pseries_hp_wq ? 0 : -ENOMEM;
553 }
554
dlpar_sysfs_init(void)555 static int __init dlpar_sysfs_init(void)
556 {
557 int rc;
558
559 rc = dlpar_workqueue_init();
560 if (rc)
561 return rc;
562
563 return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
564 }
565 machine_device_initcall(pseries, dlpar_sysfs_init);
566
567