1 /*
2  * PowerNV OPAL Dump Interface
3  *
4  * Copyright 2013,2014 IBM Corp.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kobject.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/pagemap.h>
17 #include <linux/delay.h>
18 
19 #include <asm/opal.h>
20 
21 #define DUMP_TYPE_FSP	0x01
22 
23 struct dump_obj {
24 	struct kobject  kobj;
25 	struct bin_attribute dump_attr;
26 	uint32_t	id;  /* becomes object name */
27 	uint32_t	type;
28 	uint32_t	size;
29 	char		*buffer;
30 };
31 #define to_dump_obj(x) container_of(x, struct dump_obj, kobj)
32 
33 struct dump_attribute {
34 	struct attribute attr;
35 	ssize_t (*show)(struct dump_obj *dump, struct dump_attribute *attr,
36 			char *buf);
37 	ssize_t (*store)(struct dump_obj *dump, struct dump_attribute *attr,
38 			 const char *buf, size_t count);
39 };
40 #define to_dump_attr(x) container_of(x, struct dump_attribute, attr)
41 
42 static ssize_t dump_id_show(struct dump_obj *dump_obj,
43 			    struct dump_attribute *attr,
44 			    char *buf)
45 {
46 	return sprintf(buf, "0x%x\n", dump_obj->id);
47 }
48 
49 static const char* dump_type_to_string(uint32_t type)
50 {
51 	switch (type) {
52 	case 0x01: return "SP Dump";
53 	case 0x02: return "System/Platform Dump";
54 	case 0x03: return "SMA Dump";
55 	default: return "unknown";
56 	}
57 }
58 
59 static ssize_t dump_type_show(struct dump_obj *dump_obj,
60 			      struct dump_attribute *attr,
61 			      char *buf)
62 {
63 
64 	return sprintf(buf, "0x%x %s\n", dump_obj->type,
65 		       dump_type_to_string(dump_obj->type));
66 }
67 
68 static ssize_t dump_ack_show(struct dump_obj *dump_obj,
69 			     struct dump_attribute *attr,
70 			     char *buf)
71 {
72 	return sprintf(buf, "ack - acknowledge dump\n");
73 }
74 
75 /*
76  * Send acknowledgement to OPAL
77  */
78 static int64_t dump_send_ack(uint32_t dump_id)
79 {
80 	int rc;
81 
82 	rc = opal_dump_ack(dump_id);
83 	if (rc)
84 		pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n",
85 			__func__, dump_id, rc);
86 	return rc;
87 }
88 
89 static void delay_release_kobj(void *kobj)
90 {
91 	kobject_put((struct kobject *)kobj);
92 }
93 
94 static ssize_t dump_ack_store(struct dump_obj *dump_obj,
95 			      struct dump_attribute *attr,
96 			      const char *buf,
97 			      size_t count)
98 {
99 	dump_send_ack(dump_obj->id);
100 	sysfs_schedule_callback(&dump_obj->kobj, delay_release_kobj,
101 				&dump_obj->kobj, THIS_MODULE);
102 	return count;
103 }
104 
105 /* Attributes of a dump
106  * The binary attribute of the dump itself is dynamic
107  * due to the dynamic size of the dump
108  */
109 static struct dump_attribute id_attribute =
110 	__ATTR(id, 0666, dump_id_show, NULL);
111 static struct dump_attribute type_attribute =
112 	__ATTR(type, 0666, dump_type_show, NULL);
113 static struct dump_attribute ack_attribute =
114 	__ATTR(acknowledge, 0660, dump_ack_show, dump_ack_store);
115 
116 static ssize_t init_dump_show(struct dump_obj *dump_obj,
117 			      struct dump_attribute *attr,
118 			      char *buf)
119 {
120 	return sprintf(buf, "1 - initiate dump\n");
121 }
122 
123 static int64_t dump_fips_init(uint8_t type)
124 {
125 	int rc;
126 
127 	rc = opal_dump_init(type);
128 	if (rc)
129 		pr_warn("%s: Failed to initiate FipS dump (%d)\n",
130 			__func__, rc);
131 	return rc;
132 }
133 
134 static ssize_t init_dump_store(struct dump_obj *dump_obj,
135 			       struct dump_attribute *attr,
136 			       const char *buf,
137 			       size_t count)
138 {
139 	dump_fips_init(DUMP_TYPE_FSP);
140 	pr_info("%s: Initiated FSP dump\n", __func__);
141 	return count;
142 }
143 
144 static struct dump_attribute initiate_attribute =
145 	__ATTR(initiate_dump, 0600, init_dump_show, init_dump_store);
146 
147 static struct attribute *initiate_attrs[] = {
148 	&initiate_attribute.attr,
149 	NULL,
150 };
151 
152 static struct attribute_group initiate_attr_group = {
153 	.attrs = initiate_attrs,
154 };
155 
156 static struct kset *dump_kset;
157 
158 static ssize_t dump_attr_show(struct kobject *kobj,
159 			      struct attribute *attr,
160 			      char *buf)
161 {
162 	struct dump_attribute *attribute;
163 	struct dump_obj *dump;
164 
165 	attribute = to_dump_attr(attr);
166 	dump = to_dump_obj(kobj);
167 
168 	if (!attribute->show)
169 		return -EIO;
170 
171 	return attribute->show(dump, attribute, buf);
172 }
173 
174 static ssize_t dump_attr_store(struct kobject *kobj,
175 			       struct attribute *attr,
176 			       const char *buf, size_t len)
177 {
178 	struct dump_attribute *attribute;
179 	struct dump_obj *dump;
180 
181 	attribute = to_dump_attr(attr);
182 	dump = to_dump_obj(kobj);
183 
184 	if (!attribute->store)
185 		return -EIO;
186 
187 	return attribute->store(dump, attribute, buf, len);
188 }
189 
190 static const struct sysfs_ops dump_sysfs_ops = {
191 	.show = dump_attr_show,
192 	.store = dump_attr_store,
193 };
194 
195 static void dump_release(struct kobject *kobj)
196 {
197 	struct dump_obj *dump;
198 
199 	dump = to_dump_obj(kobj);
200 	vfree(dump->buffer);
201 	kfree(dump);
202 }
203 
204 static struct attribute *dump_default_attrs[] = {
205 	&id_attribute.attr,
206 	&type_attribute.attr,
207 	&ack_attribute.attr,
208 	NULL,
209 };
210 
211 static struct kobj_type dump_ktype = {
212 	.sysfs_ops = &dump_sysfs_ops,
213 	.release = &dump_release,
214 	.default_attrs = dump_default_attrs,
215 };
216 
217 static void free_dump_sg_list(struct opal_sg_list *list)
218 {
219 	struct opal_sg_list *sg1;
220 	while (list) {
221 		sg1 = list->next;
222 		kfree(list);
223 		list = sg1;
224 	}
225 	list = NULL;
226 }
227 
228 static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
229 {
230 	struct opal_sg_list *sg1, *list = NULL;
231 	void *addr;
232 	int64_t size;
233 
234 	addr = dump->buffer;
235 	size = dump->size;
236 
237 	sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
238 	if (!sg1)
239 		goto nomem;
240 
241 	list = sg1;
242 	sg1->num_entries = 0;
243 	while (size > 0) {
244 		/* Translate virtual address to physical address */
245 		sg1->entry[sg1->num_entries].data =
246 			(void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
247 
248 		if (size > PAGE_SIZE)
249 			sg1->entry[sg1->num_entries].length = PAGE_SIZE;
250 		else
251 			sg1->entry[sg1->num_entries].length = size;
252 
253 		sg1->num_entries++;
254 		if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
255 			sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
256 			if (!sg1->next)
257 				goto nomem;
258 
259 			sg1 = sg1->next;
260 			sg1->num_entries = 0;
261 		}
262 		addr += PAGE_SIZE;
263 		size -= PAGE_SIZE;
264 	}
265 	return list;
266 
267 nomem:
268 	pr_err("%s : Failed to allocate memory\n", __func__);
269 	free_dump_sg_list(list);
270 	return NULL;
271 }
272 
273 static void sglist_to_phy_addr(struct opal_sg_list *list)
274 {
275 	struct opal_sg_list *sg, *next;
276 
277 	for (sg = list; sg; sg = next) {
278 		next = sg->next;
279 		/* Don't translate NULL pointer for last entry */
280 		if (sg->next)
281 			sg->next = (struct opal_sg_list *)__pa(sg->next);
282 		else
283 			sg->next = NULL;
284 
285 		/* Convert num_entries to length */
286 		sg->num_entries =
287 			sg->num_entries * sizeof(struct opal_sg_entry) + 16;
288 	}
289 }
290 
291 static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
292 {
293 	int rc;
294 	*type = 0xffffffff;
295 
296 	rc = opal_dump_info2(id, size, type);
297 
298 	if (rc == OPAL_PARAMETER)
299 		rc = opal_dump_info(id, size);
300 
301 	if (rc)
302 		pr_warn("%s: Failed to get dump info (%d)\n",
303 			__func__, rc);
304 	return rc;
305 }
306 
307 static int64_t dump_read_data(struct dump_obj *dump)
308 {
309 	struct opal_sg_list *list;
310 	uint64_t addr;
311 	int64_t rc;
312 
313 	/* Allocate memory */
314 	dump->buffer = vzalloc(PAGE_ALIGN(dump->size));
315 	if (!dump->buffer) {
316 		pr_err("%s : Failed to allocate memory\n", __func__);
317 		rc = -ENOMEM;
318 		goto out;
319 	}
320 
321 	/* Generate SG list */
322 	list = dump_data_to_sglist(dump);
323 	if (!list) {
324 		rc = -ENOMEM;
325 		goto out;
326 	}
327 
328 	/* Translate sg list addr to real address */
329 	sglist_to_phy_addr(list);
330 
331 	/* First entry address */
332 	addr = __pa(list);
333 
334 	/* Fetch data */
335 	rc = OPAL_BUSY_EVENT;
336 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
337 		rc = opal_dump_read(dump->id, addr);
338 		if (rc == OPAL_BUSY_EVENT) {
339 			opal_poll_events(NULL);
340 			msleep(20);
341 		}
342 	}
343 
344 	if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL)
345 		pr_warn("%s: Extract dump failed for ID 0x%x\n",
346 			__func__, dump->id);
347 
348 	/* Free SG list */
349 	free_dump_sg_list(list);
350 
351 out:
352 	return rc;
353 }
354 
355 static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
356 			      struct bin_attribute *bin_attr,
357 			      char *buffer, loff_t pos, size_t count)
358 {
359 	ssize_t rc;
360 
361 	struct dump_obj *dump = to_dump_obj(kobj);
362 
363 	if (!dump->buffer) {
364 		rc = dump_read_data(dump);
365 
366 		if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
367 			vfree(dump->buffer);
368 			dump->buffer = NULL;
369 
370 			return -EIO;
371 		}
372 		if (rc == OPAL_PARTIAL) {
373 			/* On a partial read, we just return EIO
374 			 * and rely on userspace to ask us to try
375 			 * again.
376 			 */
377 			pr_info("%s: Platform dump partially read.ID = 0x%x\n",
378 				__func__, dump->id);
379 			return -EIO;
380 		}
381 	}
382 
383 	memcpy(buffer, dump->buffer + pos, count);
384 
385 	/* You may think we could free the dump buffer now and retrieve
386 	 * it again later if needed, but due to current firmware limitation,
387 	 * that's not the case. So, once read into userspace once,
388 	 * we keep the dump around until it's acknowledged by userspace.
389 	 */
390 
391 	return count;
392 }
393 
394 static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
395 					uint32_t type)
396 {
397 	struct dump_obj *dump;
398 	int rc;
399 
400 	dump = kzalloc(sizeof(*dump), GFP_KERNEL);
401 	if (!dump)
402 		return NULL;
403 
404 	dump->kobj.kset = dump_kset;
405 
406 	kobject_init(&dump->kobj, &dump_ktype);
407 
408 	sysfs_bin_attr_init(&dump->dump_attr);
409 
410 	dump->dump_attr.attr.name = "dump";
411 	dump->dump_attr.attr.mode = 0400;
412 	dump->dump_attr.size = size;
413 	dump->dump_attr.read = dump_attr_read;
414 
415 	dump->id = id;
416 	dump->size = size;
417 	dump->type = type;
418 
419 	rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
420 	if (rc) {
421 		kobject_put(&dump->kobj);
422 		return NULL;
423 	}
424 
425 	rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
426 	if (rc) {
427 		kobject_put(&dump->kobj);
428 		return NULL;
429 	}
430 
431 	pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
432 		__func__, dump->id, dump->size);
433 
434 	kobject_uevent(&dump->kobj, KOBJ_ADD);
435 
436 	return dump;
437 }
438 
439 static int process_dump(void)
440 {
441 	int rc;
442 	uint32_t dump_id, dump_size, dump_type;
443 	struct dump_obj *dump;
444 	char name[22];
445 
446 	rc = dump_read_info(&dump_id, &dump_size, &dump_type);
447 	if (rc != OPAL_SUCCESS)
448 		return rc;
449 
450 	sprintf(name, "0x%x-0x%x", dump_type, dump_id);
451 
452 	/* we may get notified twice, let's handle
453 	 * that gracefully and not create two conflicting
454 	 * entries.
455 	 */
456 	if (kset_find_obj(dump_kset, name))
457 		return 0;
458 
459 	dump = create_dump_obj(dump_id, dump_size, dump_type);
460 	if (!dump)
461 		return -1;
462 
463 	return 0;
464 }
465 
466 static void dump_work_fn(struct work_struct *work)
467 {
468 	process_dump();
469 }
470 
471 static DECLARE_WORK(dump_work, dump_work_fn);
472 
473 static void schedule_process_dump(void)
474 {
475 	schedule_work(&dump_work);
476 }
477 
478 /*
479  * New dump available notification
480  *
481  * Once we get notification, we add sysfs entries for it.
482  * We only fetch the dump on demand, and create sysfs asynchronously.
483  */
484 static int dump_event(struct notifier_block *nb,
485 		      unsigned long events, void *change)
486 {
487 	if (events & OPAL_EVENT_DUMP_AVAIL)
488 		schedule_process_dump();
489 
490 	return 0;
491 }
492 
493 static struct notifier_block dump_nb = {
494 	.notifier_call  = dump_event,
495 	.next           = NULL,
496 	.priority       = 0
497 };
498 
499 void __init opal_platform_dump_init(void)
500 {
501 	int rc;
502 
503 	dump_kset = kset_create_and_add("dump", NULL, opal_kobj);
504 	if (!dump_kset) {
505 		pr_warn("%s: Failed to create dump kset\n", __func__);
506 		return;
507 	}
508 
509 	rc = sysfs_create_group(&dump_kset->kobj, &initiate_attr_group);
510 	if (rc) {
511 		pr_warn("%s: Failed to create initiate dump attr group\n",
512 			__func__);
513 		kobject_put(&dump_kset->kobj);
514 		return;
515 	}
516 
517 	rc = opal_notifier_register(&dump_nb);
518 	if (rc) {
519 		pr_warn("%s: Can't register OPAL event notifier (%d)\n",
520 			__func__, rc);
521 		return;
522 	}
523 
524 	opal_dump_resend_notification();
525 }
526