xref: /openbmc/linux/drivers/acpi/sysfs.c (revision f7af616c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sysfs.c - ACPI sysfs interface to userspace.
4  */
5 
6 #define pr_fmt(fmt) "ACPI: " fmt
7 
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/moduleparam.h>
11 #include <linux/acpi.h>
12 
13 #include "internal.h"
14 
15 #ifdef CONFIG_ACPI_DEBUG
16 /*
17  * ACPI debug sysfs I/F, including:
18  * /sys/modules/acpi/parameters/debug_layer
19  * /sys/modules/acpi/parameters/debug_level
20  * /sys/modules/acpi/parameters/trace_method_name
21  * /sys/modules/acpi/parameters/trace_state
22  * /sys/modules/acpi/parameters/trace_debug_layer
23  * /sys/modules/acpi/parameters/trace_debug_level
24  */
25 
26 struct acpi_dlayer {
27 	const char *name;
28 	unsigned long value;
29 };
30 struct acpi_dlevel {
31 	const char *name;
32 	unsigned long value;
33 };
34 #define ACPI_DEBUG_INIT(v)	{ .name = #v, .value = v }
35 
36 static const struct acpi_dlayer acpi_debug_layers[] = {
37 	ACPI_DEBUG_INIT(ACPI_UTILITIES),
38 	ACPI_DEBUG_INIT(ACPI_HARDWARE),
39 	ACPI_DEBUG_INIT(ACPI_EVENTS),
40 	ACPI_DEBUG_INIT(ACPI_TABLES),
41 	ACPI_DEBUG_INIT(ACPI_NAMESPACE),
42 	ACPI_DEBUG_INIT(ACPI_PARSER),
43 	ACPI_DEBUG_INIT(ACPI_DISPATCHER),
44 	ACPI_DEBUG_INIT(ACPI_EXECUTER),
45 	ACPI_DEBUG_INIT(ACPI_RESOURCES),
46 	ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
47 	ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
48 	ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
49 	ACPI_DEBUG_INIT(ACPI_COMPILER),
50 	ACPI_DEBUG_INIT(ACPI_TOOLS),
51 };
52 
53 static const struct acpi_dlevel acpi_debug_levels[] = {
54 	ACPI_DEBUG_INIT(ACPI_LV_INIT),
55 	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
56 	ACPI_DEBUG_INIT(ACPI_LV_INFO),
57 	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
58 	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
59 
60 	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
61 	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
62 	ACPI_DEBUG_INIT(ACPI_LV_LOAD),
63 	ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
64 	ACPI_DEBUG_INIT(ACPI_LV_EXEC),
65 	ACPI_DEBUG_INIT(ACPI_LV_NAMES),
66 	ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
67 	ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
68 	ACPI_DEBUG_INIT(ACPI_LV_TABLES),
69 	ACPI_DEBUG_INIT(ACPI_LV_VALUES),
70 	ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
71 	ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
72 	ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
73 	ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
74 
75 	ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
76 	ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
77 	ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
78 
79 	ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
80 	ACPI_DEBUG_INIT(ACPI_LV_THREADS),
81 	ACPI_DEBUG_INIT(ACPI_LV_IO),
82 	ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
83 
84 	ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
85 	ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
86 	ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
87 	ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
88 };
89 
90 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
91 {
92 	int result = 0;
93 	int i;
94 
95 	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
96 
97 	for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
98 		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
99 				  acpi_debug_layers[i].name,
100 				  acpi_debug_layers[i].value,
101 				  (acpi_dbg_layer & acpi_debug_layers[i].value)
102 				  ? '*' : ' ');
103 	}
104 	result +=
105 	    sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
106 		    ACPI_ALL_DRIVERS,
107 		    (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
108 		    ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
109 		    == 0 ? ' ' : '-');
110 	result +=
111 	    sprintf(buffer + result,
112 		    "--\ndebug_layer = 0x%08X ( * = enabled)\n",
113 		    acpi_dbg_layer);
114 
115 	return result;
116 }
117 
118 static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
119 {
120 	int result = 0;
121 	int i;
122 
123 	result = sprintf(buffer, "%-25s\tHex        SET\n", "Description");
124 
125 	for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
126 		result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
127 				  acpi_debug_levels[i].name,
128 				  acpi_debug_levels[i].value,
129 				  (acpi_dbg_level & acpi_debug_levels[i].value)
130 				  ? '*' : ' ');
131 	}
132 	result +=
133 	    sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
134 		    acpi_dbg_level);
135 
136 	return result;
137 }
138 
139 static const struct kernel_param_ops param_ops_debug_layer = {
140 	.set = param_set_uint,
141 	.get = param_get_debug_layer,
142 };
143 
144 static const struct kernel_param_ops param_ops_debug_level = {
145 	.set = param_set_uint,
146 	.get = param_get_debug_level,
147 };
148 
149 module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
150 module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
151 
152 static char trace_method_name[1024];
153 
154 static int param_set_trace_method_name(const char *val,
155 				       const struct kernel_param *kp)
156 {
157 	u32 saved_flags = 0;
158 	bool is_abs_path = true;
159 
160 	if (*val != '\\')
161 		is_abs_path = false;
162 
163 	if ((is_abs_path && strlen(val) > 1023) ||
164 	    (!is_abs_path && strlen(val) > 1022)) {
165 		pr_err("%s: string parameter too long\n", kp->name);
166 		return -ENOSPC;
167 	}
168 
169 	/*
170 	 * It's not safe to update acpi_gbl_trace_method_name without
171 	 * having the tracer stopped, so we save the original tracer
172 	 * state and disable it.
173 	 */
174 	saved_flags = acpi_gbl_trace_flags;
175 	(void)acpi_debug_trace(NULL,
176 			       acpi_gbl_trace_dbg_level,
177 			       acpi_gbl_trace_dbg_layer,
178 			       0);
179 
180 	/* This is a hack.  We can't kmalloc in early boot. */
181 	if (is_abs_path)
182 		strcpy(trace_method_name, val);
183 	else {
184 		trace_method_name[0] = '\\';
185 		strcpy(trace_method_name+1, val);
186 	}
187 
188 	/* Restore the original tracer state */
189 	(void)acpi_debug_trace(trace_method_name,
190 			       acpi_gbl_trace_dbg_level,
191 			       acpi_gbl_trace_dbg_layer,
192 			       saved_flags);
193 
194 	return 0;
195 }
196 
197 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
198 {
199 	return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name);
200 }
201 
202 static const struct kernel_param_ops param_ops_trace_method = {
203 	.set = param_set_trace_method_name,
204 	.get = param_get_trace_method_name,
205 };
206 
207 static const struct kernel_param_ops param_ops_trace_attrib = {
208 	.set = param_set_uint,
209 	.get = param_get_uint,
210 };
211 
212 module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
213 module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
214 module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
215 
216 static int param_set_trace_state(const char *val,
217 				 const struct kernel_param *kp)
218 {
219 	acpi_status status;
220 	const char *method = trace_method_name;
221 	u32 flags = 0;
222 
223 /* So "xxx-once" comparison should go prior than "xxx" comparison */
224 #define acpi_compare_param(val, key)	\
225 	strncmp((val), (key), sizeof(key) - 1)
226 
227 	if (!acpi_compare_param(val, "enable")) {
228 		method = NULL;
229 		flags = ACPI_TRACE_ENABLED;
230 	} else if (!acpi_compare_param(val, "disable"))
231 		method = NULL;
232 	else if (!acpi_compare_param(val, "method-once"))
233 		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
234 	else if (!acpi_compare_param(val, "method"))
235 		flags = ACPI_TRACE_ENABLED;
236 	else if (!acpi_compare_param(val, "opcode-once"))
237 		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
238 	else if (!acpi_compare_param(val, "opcode"))
239 		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
240 	else
241 		return -EINVAL;
242 
243 	status = acpi_debug_trace(method,
244 				  acpi_gbl_trace_dbg_level,
245 				  acpi_gbl_trace_dbg_layer,
246 				  flags);
247 	if (ACPI_FAILURE(status))
248 		return -EBUSY;
249 
250 	return 0;
251 }
252 
253 static int param_get_trace_state(char *buffer, const struct kernel_param *kp)
254 {
255 	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
256 		return sprintf(buffer, "disable\n");
257 	else {
258 		if (acpi_gbl_trace_method_name) {
259 			if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
260 				return sprintf(buffer, "method-once\n");
261 			else
262 				return sprintf(buffer, "method\n");
263 		} else
264 			return sprintf(buffer, "enable\n");
265 	}
266 	return 0;
267 }
268 
269 module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
270 		  NULL, 0644);
271 #endif /* CONFIG_ACPI_DEBUG */
272 
273 
274 /* /sys/modules/acpi/parameters/aml_debug_output */
275 
276 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
277 		   byte, 0644);
278 MODULE_PARM_DESC(aml_debug_output,
279 		 "To enable/disable the ACPI Debug Object output.");
280 
281 /* /sys/module/acpi/parameters/acpica_version */
282 static int param_get_acpica_version(char *buffer,
283 				    const struct kernel_param *kp)
284 {
285 	int result;
286 
287 	result = sprintf(buffer, "%x\n", ACPI_CA_VERSION);
288 
289 	return result;
290 }
291 
292 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
293 
294 /*
295  * ACPI table sysfs I/F:
296  * /sys/firmware/acpi/tables/
297  * /sys/firmware/acpi/tables/data/
298  * /sys/firmware/acpi/tables/dynamic/
299  */
300 
301 static LIST_HEAD(acpi_table_attr_list);
302 static struct kobject *tables_kobj;
303 static struct kobject *tables_data_kobj;
304 static struct kobject *dynamic_tables_kobj;
305 static struct kobject *hotplug_kobj;
306 
307 #define ACPI_MAX_TABLE_INSTANCES	999
308 #define ACPI_INST_SIZE			4 /* including trailing 0 */
309 
310 struct acpi_table_attr {
311 	struct bin_attribute attr;
312 	char name[ACPI_NAMESEG_SIZE];
313 	int instance;
314 	char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE];
315 	struct list_head node;
316 };
317 
318 struct acpi_data_attr {
319 	struct bin_attribute attr;
320 	u64	addr;
321 };
322 
323 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
324 			       struct bin_attribute *bin_attr, char *buf,
325 			       loff_t offset, size_t count)
326 {
327 	struct acpi_table_attr *table_attr =
328 	    container_of(bin_attr, struct acpi_table_attr, attr);
329 	struct acpi_table_header *table_header = NULL;
330 	acpi_status status;
331 	ssize_t rc;
332 
333 	status = acpi_get_table(table_attr->name, table_attr->instance,
334 				&table_header);
335 	if (ACPI_FAILURE(status))
336 		return -ENODEV;
337 
338 	rc = memory_read_from_buffer(buf, count, &offset, table_header,
339 			table_header->length);
340 	acpi_put_table(table_header);
341 	return rc;
342 }
343 
344 static int acpi_table_attr_init(struct kobject *tables_obj,
345 				struct acpi_table_attr *table_attr,
346 				struct acpi_table_header *table_header)
347 {
348 	struct acpi_table_header *header = NULL;
349 	struct acpi_table_attr *attr = NULL;
350 	char instance_str[ACPI_INST_SIZE];
351 
352 	sysfs_attr_init(&table_attr->attr.attr);
353 	ACPI_COPY_NAMESEG(table_attr->name, table_header->signature);
354 
355 	list_for_each_entry(attr, &acpi_table_attr_list, node) {
356 		if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name))
357 			if (table_attr->instance < attr->instance)
358 				table_attr->instance = attr->instance;
359 	}
360 	table_attr->instance++;
361 	if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
362 		pr_warn("%4.4s: too many table instances\n",
363 			table_attr->name);
364 		return -ERANGE;
365 	}
366 
367 	ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature);
368 	table_attr->filename[ACPI_NAMESEG_SIZE] = '\0';
369 	if (table_attr->instance > 1 || (table_attr->instance == 1 &&
370 					 !acpi_get_table
371 					 (table_header->signature, 2, &header))) {
372 		snprintf(instance_str, sizeof(instance_str), "%u",
373 			 table_attr->instance);
374 		strcat(table_attr->filename, instance_str);
375 	}
376 
377 	table_attr->attr.size = table_header->length;
378 	table_attr->attr.read = acpi_table_show;
379 	table_attr->attr.attr.name = table_attr->filename;
380 	table_attr->attr.attr.mode = 0400;
381 
382 	return sysfs_create_bin_file(tables_obj, &table_attr->attr);
383 }
384 
385 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
386 {
387 	struct acpi_table_attr *table_attr;
388 
389 	switch (event) {
390 	case ACPI_TABLE_EVENT_INSTALL:
391 		table_attr =
392 		    kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
393 		if (!table_attr)
394 			return AE_NO_MEMORY;
395 
396 		if (acpi_table_attr_init(dynamic_tables_kobj,
397 					 table_attr, table)) {
398 			kfree(table_attr);
399 			return AE_ERROR;
400 		}
401 		list_add_tail(&table_attr->node, &acpi_table_attr_list);
402 		break;
403 	case ACPI_TABLE_EVENT_LOAD:
404 	case ACPI_TABLE_EVENT_UNLOAD:
405 	case ACPI_TABLE_EVENT_UNINSTALL:
406 		/*
407 		 * we do not need to do anything right now
408 		 * because the table is not deleted from the
409 		 * global table list when unloading it.
410 		 */
411 		break;
412 	default:
413 		return AE_BAD_PARAMETER;
414 	}
415 	return AE_OK;
416 }
417 
418 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
419 			      struct bin_attribute *bin_attr, char *buf,
420 			      loff_t offset, size_t count)
421 {
422 	struct acpi_data_attr *data_attr;
423 	void __iomem *base;
424 	ssize_t rc;
425 
426 	data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
427 
428 	base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
429 	if (!base)
430 		return -ENOMEM;
431 	rc = memory_read_from_buffer(buf, count, &offset, base,
432 				     data_attr->attr.size);
433 	acpi_os_unmap_memory(base, data_attr->attr.size);
434 
435 	return rc;
436 }
437 
438 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
439 {
440 	struct acpi_table_bert *bert = th;
441 
442 	if (bert->header.length < sizeof(struct acpi_table_bert) ||
443 	    bert->region_length < sizeof(struct acpi_hest_generic_status)) {
444 		kfree(data_attr);
445 		return -EINVAL;
446 	}
447 	data_attr->addr = bert->address;
448 	data_attr->attr.size = bert->region_length;
449 	data_attr->attr.attr.name = "BERT";
450 
451 	return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
452 }
453 
454 static struct acpi_data_obj {
455 	char *name;
456 	int (*fn)(void *, struct acpi_data_attr *);
457 } acpi_data_objs[] = {
458 	{ ACPI_SIG_BERT, acpi_bert_data_init },
459 };
460 
461 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
462 
463 static int acpi_table_data_init(struct acpi_table_header *th)
464 {
465 	struct acpi_data_attr *data_attr;
466 	int i;
467 
468 	for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
469 		if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) {
470 			data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
471 			if (!data_attr)
472 				return -ENOMEM;
473 			sysfs_attr_init(&data_attr->attr.attr);
474 			data_attr->attr.read = acpi_data_show;
475 			data_attr->attr.attr.mode = 0400;
476 			return acpi_data_objs[i].fn(th, data_attr);
477 		}
478 	}
479 	return 0;
480 }
481 
482 static int acpi_tables_sysfs_init(void)
483 {
484 	struct acpi_table_attr *table_attr;
485 	struct acpi_table_header *table_header = NULL;
486 	int table_index;
487 	acpi_status status;
488 	int ret;
489 
490 	tables_kobj = kobject_create_and_add("tables", acpi_kobj);
491 	if (!tables_kobj)
492 		goto err;
493 
494 	tables_data_kobj = kobject_create_and_add("data", tables_kobj);
495 	if (!tables_data_kobj)
496 		goto err_tables_data;
497 
498 	dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
499 	if (!dynamic_tables_kobj)
500 		goto err_dynamic_tables;
501 
502 	for (table_index = 0;; table_index++) {
503 		status = acpi_get_table_by_index(table_index, &table_header);
504 
505 		if (status == AE_BAD_PARAMETER)
506 			break;
507 
508 		if (ACPI_FAILURE(status))
509 			continue;
510 
511 		table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
512 		if (!table_attr)
513 			return -ENOMEM;
514 
515 		ret = acpi_table_attr_init(tables_kobj,
516 					   table_attr, table_header);
517 		if (ret) {
518 			kfree(table_attr);
519 			return ret;
520 		}
521 		list_add_tail(&table_attr->node, &acpi_table_attr_list);
522 		acpi_table_data_init(table_header);
523 	}
524 
525 	kobject_uevent(tables_kobj, KOBJ_ADD);
526 	kobject_uevent(tables_data_kobj, KOBJ_ADD);
527 	kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
528 
529 	return 0;
530 err_dynamic_tables:
531 	kobject_put(tables_data_kobj);
532 err_tables_data:
533 	kobject_put(tables_kobj);
534 err:
535 	return -ENOMEM;
536 }
537 
538 /*
539  * Detailed ACPI IRQ counters:
540  * /sys/firmware/acpi/interrupts/
541  */
542 
543 u32 acpi_irq_handled;
544 u32 acpi_irq_not_handled;
545 
546 #define COUNT_GPE 0
547 #define COUNT_SCI 1		/* acpi_irq_handled */
548 #define COUNT_SCI_NOT 2		/* acpi_irq_not_handled */
549 #define COUNT_ERROR 3		/* other */
550 #define NUM_COUNTERS_EXTRA 4
551 
552 struct event_counter {
553 	u32 count;
554 	u32 flags;
555 };
556 
557 static struct event_counter *all_counters;
558 static u32 num_gpes;
559 static u32 num_counters;
560 static struct attribute **all_attrs;
561 static u32 acpi_gpe_count;
562 
563 static struct attribute_group interrupt_stats_attr_group = {
564 	.name = "interrupts",
565 };
566 
567 static struct kobj_attribute *counter_attrs;
568 
569 static void delete_gpe_attr_array(void)
570 {
571 	struct event_counter *tmp = all_counters;
572 
573 	all_counters = NULL;
574 	kfree(tmp);
575 
576 	if (counter_attrs) {
577 		int i;
578 
579 		for (i = 0; i < num_gpes; i++)
580 			kfree(counter_attrs[i].attr.name);
581 
582 		kfree(counter_attrs);
583 	}
584 	kfree(all_attrs);
585 
586 	return;
587 }
588 
589 static void gpe_count(u32 gpe_number)
590 {
591 	acpi_gpe_count++;
592 
593 	if (!all_counters)
594 		return;
595 
596 	if (gpe_number < num_gpes)
597 		all_counters[gpe_number].count++;
598 	else
599 		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
600 			     COUNT_ERROR].count++;
601 
602 	return;
603 }
604 
605 static void fixed_event_count(u32 event_number)
606 {
607 	if (!all_counters)
608 		return;
609 
610 	if (event_number < ACPI_NUM_FIXED_EVENTS)
611 		all_counters[num_gpes + event_number].count++;
612 	else
613 		all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
614 			     COUNT_ERROR].count++;
615 
616 	return;
617 }
618 
619 static void acpi_global_event_handler(u32 event_type, acpi_handle device,
620 	u32 event_number, void *context)
621 {
622 	if (event_type == ACPI_EVENT_TYPE_GPE) {
623 		gpe_count(event_number);
624 		pr_debug("GPE event 0x%02x\n", event_number);
625 	} else if (event_type == ACPI_EVENT_TYPE_FIXED) {
626 		fixed_event_count(event_number);
627 		pr_debug("Fixed event 0x%02x\n", event_number);
628 	} else {
629 		pr_debug("Other event 0x%02x\n", event_number);
630 	}
631 }
632 
633 static int get_status(u32 index, acpi_event_status *ret,
634 		      acpi_handle *handle)
635 {
636 	acpi_status status;
637 
638 	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
639 		return -EINVAL;
640 
641 	if (index < num_gpes) {
642 		status = acpi_get_gpe_device(index, handle);
643 		if (ACPI_FAILURE(status)) {
644 			pr_warn("Invalid GPE 0x%x", index);
645 			return -ENXIO;
646 		}
647 		status = acpi_get_gpe_status(*handle, index, ret);
648 	} else {
649 		status = acpi_get_event_status(index - num_gpes, ret);
650 	}
651 	if (ACPI_FAILURE(status))
652 		return -EIO;
653 
654 	return 0;
655 }
656 
657 static ssize_t counter_show(struct kobject *kobj,
658 			    struct kobj_attribute *attr, char *buf)
659 {
660 	int index = attr - counter_attrs;
661 	int size;
662 	acpi_handle handle;
663 	acpi_event_status status;
664 	int result = 0;
665 
666 	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
667 	    acpi_irq_handled;
668 	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
669 	    acpi_irq_not_handled;
670 	all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
671 	    acpi_gpe_count;
672 	size = sprintf(buf, "%8u", all_counters[index].count);
673 
674 	/* "gpe_all" or "sci" */
675 	if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
676 		goto end;
677 
678 	result = get_status(index, &status, &handle);
679 	if (result)
680 		goto end;
681 
682 	if (status & ACPI_EVENT_FLAG_ENABLE_SET)
683 		size += sprintf(buf + size, "  EN");
684 	else
685 		size += sprintf(buf + size, "    ");
686 	if (status & ACPI_EVENT_FLAG_STATUS_SET)
687 		size += sprintf(buf + size, " STS");
688 	else
689 		size += sprintf(buf + size, "    ");
690 
691 	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
692 		size += sprintf(buf + size, " invalid     ");
693 	else if (status & ACPI_EVENT_FLAG_ENABLED)
694 		size += sprintf(buf + size, " enabled     ");
695 	else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
696 		size += sprintf(buf + size, " wake_enabled");
697 	else
698 		size += sprintf(buf + size, " disabled    ");
699 	if (status & ACPI_EVENT_FLAG_MASKED)
700 		size += sprintf(buf + size, " masked  ");
701 	else
702 		size += sprintf(buf + size, " unmasked");
703 
704 end:
705 	size += sprintf(buf + size, "\n");
706 	return result ? result : size;
707 }
708 
709 /*
710  * counter_set() sets the specified counter.
711  * setting the total "sci" file to any value clears all counters.
712  * enable/disable/clear a gpe/fixed event in user space.
713  */
714 static ssize_t counter_set(struct kobject *kobj,
715 			   struct kobj_attribute *attr, const char *buf,
716 			   size_t size)
717 {
718 	int index = attr - counter_attrs;
719 	acpi_event_status status;
720 	acpi_handle handle;
721 	int result = 0;
722 	unsigned long tmp;
723 
724 	if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
725 		int i;
726 		for (i = 0; i < num_counters; ++i)
727 			all_counters[i].count = 0;
728 		acpi_gpe_count = 0;
729 		acpi_irq_handled = 0;
730 		acpi_irq_not_handled = 0;
731 		goto end;
732 	}
733 
734 	/* show the event status for both GPEs and Fixed Events */
735 	result = get_status(index, &status, &handle);
736 	if (result)
737 		goto end;
738 
739 	if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
740 		printk(KERN_WARNING PREFIX
741 		       "Can not change Invalid GPE/Fixed Event status\n");
742 		return -EINVAL;
743 	}
744 
745 	if (index < num_gpes) {
746 		if (!strcmp(buf, "disable\n") &&
747 		    (status & ACPI_EVENT_FLAG_ENABLED))
748 			result = acpi_disable_gpe(handle, index);
749 		else if (!strcmp(buf, "enable\n") &&
750 			 !(status & ACPI_EVENT_FLAG_ENABLED))
751 			result = acpi_enable_gpe(handle, index);
752 		else if (!strcmp(buf, "clear\n") &&
753 			 (status & ACPI_EVENT_FLAG_STATUS_SET))
754 			result = acpi_clear_gpe(handle, index);
755 		else if (!strcmp(buf, "mask\n"))
756 			result = acpi_mask_gpe(handle, index, TRUE);
757 		else if (!strcmp(buf, "unmask\n"))
758 			result = acpi_mask_gpe(handle, index, FALSE);
759 		else if (!kstrtoul(buf, 0, &tmp))
760 			all_counters[index].count = tmp;
761 		else
762 			result = -EINVAL;
763 	} else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
764 		int event = index - num_gpes;
765 		if (!strcmp(buf, "disable\n") &&
766 		    (status & ACPI_EVENT_FLAG_ENABLE_SET))
767 			result = acpi_disable_event(event, ACPI_NOT_ISR);
768 		else if (!strcmp(buf, "enable\n") &&
769 			 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
770 			result = acpi_enable_event(event, ACPI_NOT_ISR);
771 		else if (!strcmp(buf, "clear\n") &&
772 			 (status & ACPI_EVENT_FLAG_STATUS_SET))
773 			result = acpi_clear_event(event);
774 		else if (!kstrtoul(buf, 0, &tmp))
775 			all_counters[index].count = tmp;
776 		else
777 			result = -EINVAL;
778 	} else
779 		all_counters[index].count = strtoul(buf, NULL, 0);
780 
781 	if (ACPI_FAILURE(result))
782 		result = -EINVAL;
783 end:
784 	return result ? result : size;
785 }
786 
787 /*
788  * A Quirk Mechanism for GPE Flooding Prevention:
789  *
790  * Quirks may be needed to prevent GPE flooding on a specific GPE. The
791  * flooding typically cannot be detected and automatically prevented by
792  * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
793  * the AML tables. This normally indicates a feature gap in Linux, thus
794  * instead of providing endless quirk tables, we provide a boot parameter
795  * for those who want this quirk. For example, if the users want to prevent
796  * the GPE flooding for GPE 00, they need to specify the following boot
797  * parameter:
798  *   acpi_mask_gpe=0x00
799  * The masking status can be modified by the following runtime controlling
800  * interface:
801  *   echo unmask > /sys/firmware/acpi/interrupts/gpe00
802  */
803 #define ACPI_MASKABLE_GPE_MAX	0x100
804 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata;
805 
806 static int __init acpi_gpe_set_masked_gpes(char *val)
807 {
808 	u8 gpe;
809 
810 	if (kstrtou8(val, 0, &gpe))
811 		return -EINVAL;
812 	set_bit(gpe, acpi_masked_gpes_map);
813 
814 	return 1;
815 }
816 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
817 
818 void __init acpi_gpe_apply_masked_gpes(void)
819 {
820 	acpi_handle handle;
821 	acpi_status status;
822 	u16 gpe;
823 
824 	for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) {
825 		status = acpi_get_gpe_device(gpe, &handle);
826 		if (ACPI_SUCCESS(status)) {
827 			pr_info("Masking GPE 0x%x.\n", gpe);
828 			(void)acpi_mask_gpe(handle, gpe, TRUE);
829 		}
830 	}
831 }
832 
833 void acpi_irq_stats_init(void)
834 {
835 	acpi_status status;
836 	int i;
837 
838 	if (all_counters)
839 		return;
840 
841 	num_gpes = acpi_current_gpe_count;
842 	num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
843 
844 	all_attrs = kcalloc(num_counters + 1, sizeof(struct attribute *),
845 			    GFP_KERNEL);
846 	if (all_attrs == NULL)
847 		return;
848 
849 	all_counters = kcalloc(num_counters, sizeof(struct event_counter),
850 			       GFP_KERNEL);
851 	if (all_counters == NULL)
852 		goto fail;
853 
854 	status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
855 	if (ACPI_FAILURE(status))
856 		goto fail;
857 
858 	counter_attrs = kcalloc(num_counters, sizeof(struct kobj_attribute),
859 				GFP_KERNEL);
860 	if (counter_attrs == NULL)
861 		goto fail;
862 
863 	for (i = 0; i < num_counters; ++i) {
864 		char buffer[12];
865 		char *name;
866 
867 		if (i < num_gpes)
868 			sprintf(buffer, "gpe%02X", i);
869 		else if (i == num_gpes + ACPI_EVENT_PMTIMER)
870 			sprintf(buffer, "ff_pmtimer");
871 		else if (i == num_gpes + ACPI_EVENT_GLOBAL)
872 			sprintf(buffer, "ff_gbl_lock");
873 		else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
874 			sprintf(buffer, "ff_pwr_btn");
875 		else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
876 			sprintf(buffer, "ff_slp_btn");
877 		else if (i == num_gpes + ACPI_EVENT_RTC)
878 			sprintf(buffer, "ff_rt_clk");
879 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
880 			sprintf(buffer, "gpe_all");
881 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
882 			sprintf(buffer, "sci");
883 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
884 			sprintf(buffer, "sci_not");
885 		else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
886 			sprintf(buffer, "error");
887 		else
888 			sprintf(buffer, "bug%02X", i);
889 
890 		name = kstrdup(buffer, GFP_KERNEL);
891 		if (name == NULL)
892 			goto fail;
893 
894 		sysfs_attr_init(&counter_attrs[i].attr);
895 		counter_attrs[i].attr.name = name;
896 		counter_attrs[i].attr.mode = 0644;
897 		counter_attrs[i].show = counter_show;
898 		counter_attrs[i].store = counter_set;
899 
900 		all_attrs[i] = &counter_attrs[i].attr;
901 	}
902 
903 	interrupt_stats_attr_group.attrs = all_attrs;
904 	if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
905 		return;
906 
907 fail:
908 	delete_gpe_attr_array();
909 	return;
910 }
911 
912 static void __exit interrupt_stats_exit(void)
913 {
914 	sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
915 
916 	delete_gpe_attr_array();
917 
918 	return;
919 }
920 
921 static ssize_t
922 acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
923 		  char *buf)
924 {
925 	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
926 }
927 
928 static const struct kobj_attribute pm_profile_attr =
929 	__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
930 
931 static ssize_t hotplug_enabled_show(struct kobject *kobj,
932 				    struct kobj_attribute *attr, char *buf)
933 {
934 	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
935 
936 	return sprintf(buf, "%d\n", hotplug->enabled);
937 }
938 
939 static ssize_t hotplug_enabled_store(struct kobject *kobj,
940 				     struct kobj_attribute *attr,
941 				     const char *buf, size_t size)
942 {
943 	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
944 	unsigned int val;
945 
946 	if (kstrtouint(buf, 10, &val) || val > 1)
947 		return -EINVAL;
948 
949 	acpi_scan_hotplug_enabled(hotplug, val);
950 	return size;
951 }
952 
953 static struct kobj_attribute hotplug_enabled_attr =
954 	__ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
955 		hotplug_enabled_store);
956 
957 static struct attribute *hotplug_profile_attrs[] = {
958 	&hotplug_enabled_attr.attr,
959 	NULL
960 };
961 
962 static struct kobj_type acpi_hotplug_profile_ktype = {
963 	.sysfs_ops = &kobj_sysfs_ops,
964 	.default_attrs = hotplug_profile_attrs,
965 };
966 
967 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
968 				    const char *name)
969 {
970 	int error;
971 
972 	if (!hotplug_kobj)
973 		goto err_out;
974 
975 	error = kobject_init_and_add(&hotplug->kobj,
976 		&acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
977 	if (error) {
978 		kobject_put(&hotplug->kobj);
979 		goto err_out;
980 	}
981 
982 	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
983 	return;
984 
985  err_out:
986 	pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
987 }
988 
989 static ssize_t force_remove_show(struct kobject *kobj,
990 				 struct kobj_attribute *attr, char *buf)
991 {
992 	return sprintf(buf, "%d\n", 0);
993 }
994 
995 static ssize_t force_remove_store(struct kobject *kobj,
996 				  struct kobj_attribute *attr,
997 				  const char *buf, size_t size)
998 {
999 	bool val;
1000 	int ret;
1001 
1002 	ret = strtobool(buf, &val);
1003 	if (ret < 0)
1004 		return ret;
1005 
1006 	if (val) {
1007 		pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1008 		return -EINVAL;
1009 	}
1010 	return size;
1011 }
1012 
1013 static const struct kobj_attribute force_remove_attr =
1014 	__ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
1015 	       force_remove_store);
1016 
1017 int __init acpi_sysfs_init(void)
1018 {
1019 	int result;
1020 
1021 	result = acpi_tables_sysfs_init();
1022 	if (result)
1023 		return result;
1024 
1025 	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1026 	if (!hotplug_kobj)
1027 		return -ENOMEM;
1028 
1029 	result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1030 	if (result)
1031 		return result;
1032 
1033 	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
1034 	return result;
1035 }
1036