1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sysfs.c - ACPI sysfs interface to userspace. 4 */ 5 6 #define pr_fmt(fmt) "ACPI: " fmt 7 8 #include <linux/acpi.h> 9 #include <linux/bitmap.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/moduleparam.h> 13 14 #include "internal.h" 15 16 #ifdef CONFIG_ACPI_DEBUG 17 /* 18 * ACPI debug sysfs I/F, including: 19 * /sys/modules/acpi/parameters/debug_layer 20 * /sys/modules/acpi/parameters/debug_level 21 * /sys/modules/acpi/parameters/trace_method_name 22 * /sys/modules/acpi/parameters/trace_state 23 * /sys/modules/acpi/parameters/trace_debug_layer 24 * /sys/modules/acpi/parameters/trace_debug_level 25 */ 26 27 struct acpi_dlayer { 28 const char *name; 29 unsigned long value; 30 }; 31 struct acpi_dlevel { 32 const char *name; 33 unsigned long value; 34 }; 35 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v } 36 37 static const struct acpi_dlayer acpi_debug_layers[] = { 38 ACPI_DEBUG_INIT(ACPI_UTILITIES), 39 ACPI_DEBUG_INIT(ACPI_HARDWARE), 40 ACPI_DEBUG_INIT(ACPI_EVENTS), 41 ACPI_DEBUG_INIT(ACPI_TABLES), 42 ACPI_DEBUG_INIT(ACPI_NAMESPACE), 43 ACPI_DEBUG_INIT(ACPI_PARSER), 44 ACPI_DEBUG_INIT(ACPI_DISPATCHER), 45 ACPI_DEBUG_INIT(ACPI_EXECUTER), 46 ACPI_DEBUG_INIT(ACPI_RESOURCES), 47 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER), 48 ACPI_DEBUG_INIT(ACPI_OS_SERVICES), 49 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), 50 ACPI_DEBUG_INIT(ACPI_COMPILER), 51 ACPI_DEBUG_INIT(ACPI_TOOLS), 52 }; 53 54 static const struct acpi_dlevel acpi_debug_levels[] = { 55 ACPI_DEBUG_INIT(ACPI_LV_INIT), 56 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), 57 ACPI_DEBUG_INIT(ACPI_LV_INFO), 58 ACPI_DEBUG_INIT(ACPI_LV_REPAIR), 59 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT), 60 61 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), 62 ACPI_DEBUG_INIT(ACPI_LV_PARSE), 63 ACPI_DEBUG_INIT(ACPI_LV_LOAD), 64 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH), 65 ACPI_DEBUG_INIT(ACPI_LV_EXEC), 66 ACPI_DEBUG_INIT(ACPI_LV_NAMES), 67 ACPI_DEBUG_INIT(ACPI_LV_OPREGION), 68 ACPI_DEBUG_INIT(ACPI_LV_BFIELD), 69 ACPI_DEBUG_INIT(ACPI_LV_TABLES), 70 ACPI_DEBUG_INIT(ACPI_LV_VALUES), 71 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS), 72 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES), 73 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS), 74 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE), 75 76 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS), 77 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS), 78 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS), 79 80 ACPI_DEBUG_INIT(ACPI_LV_MUTEX), 81 ACPI_DEBUG_INIT(ACPI_LV_THREADS), 82 ACPI_DEBUG_INIT(ACPI_LV_IO), 83 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS), 84 85 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE), 86 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO), 87 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES), 88 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 89 }; 90 91 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) 92 { 93 int result = 0; 94 int i; 95 96 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 97 98 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) { 99 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 100 acpi_debug_layers[i].name, 101 acpi_debug_layers[i].value, 102 (acpi_dbg_layer & acpi_debug_layers[i].value) 103 ? '*' : ' '); 104 } 105 result += 106 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", 107 ACPI_ALL_DRIVERS, 108 (acpi_dbg_layer & ACPI_ALL_DRIVERS) == 109 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS) 110 == 0 ? ' ' : '-'); 111 result += 112 sprintf(buffer + result, 113 "--\ndebug_layer = 0x%08X ( * = enabled)\n", 114 acpi_dbg_layer); 115 116 return result; 117 } 118 119 static int param_get_debug_level(char *buffer, const struct kernel_param *kp) 120 { 121 int result = 0; 122 int i; 123 124 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 125 126 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { 127 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 128 acpi_debug_levels[i].name, 129 acpi_debug_levels[i].value, 130 (acpi_dbg_level & acpi_debug_levels[i].value) 131 ? '*' : ' '); 132 } 133 result += 134 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n", 135 acpi_dbg_level); 136 137 return result; 138 } 139 140 static const struct kernel_param_ops param_ops_debug_layer = { 141 .set = param_set_uint, 142 .get = param_get_debug_layer, 143 }; 144 145 static const struct kernel_param_ops param_ops_debug_level = { 146 .set = param_set_uint, 147 .get = param_get_debug_level, 148 }; 149 150 module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); 151 module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); 152 153 static char trace_method_name[1024]; 154 155 static int param_set_trace_method_name(const char *val, 156 const struct kernel_param *kp) 157 { 158 u32 saved_flags = 0; 159 bool is_abs_path = true; 160 161 if (*val != '\\') 162 is_abs_path = false; 163 164 if ((is_abs_path && strlen(val) > 1023) || 165 (!is_abs_path && strlen(val) > 1022)) { 166 pr_err("%s: string parameter too long\n", kp->name); 167 return -ENOSPC; 168 } 169 170 /* 171 * It's not safe to update acpi_gbl_trace_method_name without 172 * having the tracer stopped, so we save the original tracer 173 * state and disable it. 174 */ 175 saved_flags = acpi_gbl_trace_flags; 176 (void)acpi_debug_trace(NULL, 177 acpi_gbl_trace_dbg_level, 178 acpi_gbl_trace_dbg_layer, 179 0); 180 181 /* This is a hack. We can't kmalloc in early boot. */ 182 if (is_abs_path) 183 strcpy(trace_method_name, val); 184 else { 185 trace_method_name[0] = '\\'; 186 strcpy(trace_method_name+1, val); 187 } 188 189 /* Restore the original tracer state */ 190 (void)acpi_debug_trace(trace_method_name, 191 acpi_gbl_trace_dbg_level, 192 acpi_gbl_trace_dbg_layer, 193 saved_flags); 194 195 return 0; 196 } 197 198 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp) 199 { 200 return scnprintf(buffer, PAGE_SIZE, "%s\n", acpi_gbl_trace_method_name); 201 } 202 203 static const struct kernel_param_ops param_ops_trace_method = { 204 .set = param_set_trace_method_name, 205 .get = param_get_trace_method_name, 206 }; 207 208 static const struct kernel_param_ops param_ops_trace_attrib = { 209 .set = param_set_uint, 210 .get = param_get_uint, 211 }; 212 213 module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644); 214 module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); 215 module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); 216 217 static int param_set_trace_state(const char *val, 218 const struct kernel_param *kp) 219 { 220 acpi_status status; 221 const char *method = trace_method_name; 222 u32 flags = 0; 223 224 /* So "xxx-once" comparison should go prior than "xxx" comparison */ 225 #define acpi_compare_param(val, key) \ 226 strncmp((val), (key), sizeof(key) - 1) 227 228 if (!acpi_compare_param(val, "enable")) { 229 method = NULL; 230 flags = ACPI_TRACE_ENABLED; 231 } else if (!acpi_compare_param(val, "disable")) 232 method = NULL; 233 else if (!acpi_compare_param(val, "method-once")) 234 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT; 235 else if (!acpi_compare_param(val, "method")) 236 flags = ACPI_TRACE_ENABLED; 237 else if (!acpi_compare_param(val, "opcode-once")) 238 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE; 239 else if (!acpi_compare_param(val, "opcode")) 240 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE; 241 else 242 return -EINVAL; 243 244 status = acpi_debug_trace(method, 245 acpi_gbl_trace_dbg_level, 246 acpi_gbl_trace_dbg_layer, 247 flags); 248 if (ACPI_FAILURE(status)) 249 return -EBUSY; 250 251 return 0; 252 } 253 254 static int param_get_trace_state(char *buffer, const struct kernel_param *kp) 255 { 256 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) 257 return sprintf(buffer, "disable\n"); 258 if (!acpi_gbl_trace_method_name) 259 return sprintf(buffer, "enable\n"); 260 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) 261 return sprintf(buffer, "method-once\n"); 262 else 263 return sprintf(buffer, "method\n"); 264 } 265 266 module_param_call(trace_state, param_set_trace_state, param_get_trace_state, 267 NULL, 0644); 268 #endif /* CONFIG_ACPI_DEBUG */ 269 270 271 /* /sys/modules/acpi/parameters/aml_debug_output */ 272 273 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object, 274 byte, 0644); 275 MODULE_PARM_DESC(aml_debug_output, 276 "To enable/disable the ACPI Debug Object output."); 277 278 /* /sys/module/acpi/parameters/acpica_version */ 279 static int param_get_acpica_version(char *buffer, 280 const struct kernel_param *kp) 281 { 282 int result; 283 284 result = sprintf(buffer, "%x\n", ACPI_CA_VERSION); 285 286 return result; 287 } 288 289 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444); 290 291 /* 292 * ACPI table sysfs I/F: 293 * /sys/firmware/acpi/tables/ 294 * /sys/firmware/acpi/tables/data/ 295 * /sys/firmware/acpi/tables/dynamic/ 296 */ 297 298 static LIST_HEAD(acpi_table_attr_list); 299 static struct kobject *tables_kobj; 300 static struct kobject *tables_data_kobj; 301 static struct kobject *dynamic_tables_kobj; 302 static struct kobject *hotplug_kobj; 303 304 #define ACPI_MAX_TABLE_INSTANCES 999 305 #define ACPI_INST_SIZE 4 /* including trailing 0 */ 306 307 struct acpi_table_attr { 308 struct bin_attribute attr; 309 char name[ACPI_NAMESEG_SIZE]; 310 int instance; 311 char filename[ACPI_NAMESEG_SIZE+ACPI_INST_SIZE]; 312 struct list_head node; 313 }; 314 315 struct acpi_data_attr { 316 struct bin_attribute attr; 317 u64 addr; 318 }; 319 320 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, 321 struct bin_attribute *bin_attr, char *buf, 322 loff_t offset, size_t count) 323 { 324 struct acpi_table_attr *table_attr = 325 container_of(bin_attr, struct acpi_table_attr, attr); 326 struct acpi_table_header *table_header = NULL; 327 acpi_status status; 328 ssize_t rc; 329 330 status = acpi_get_table(table_attr->name, table_attr->instance, 331 &table_header); 332 if (ACPI_FAILURE(status)) 333 return -ENODEV; 334 335 rc = memory_read_from_buffer(buf, count, &offset, table_header, 336 table_header->length); 337 acpi_put_table(table_header); 338 return rc; 339 } 340 341 static int acpi_table_attr_init(struct kobject *tables_obj, 342 struct acpi_table_attr *table_attr, 343 struct acpi_table_header *table_header) 344 { 345 struct acpi_table_header *header = NULL; 346 struct acpi_table_attr *attr = NULL; 347 char instance_str[ACPI_INST_SIZE]; 348 349 sysfs_attr_init(&table_attr->attr.attr); 350 ACPI_COPY_NAMESEG(table_attr->name, table_header->signature); 351 352 list_for_each_entry(attr, &acpi_table_attr_list, node) { 353 if (ACPI_COMPARE_NAMESEG(table_attr->name, attr->name)) 354 if (table_attr->instance < attr->instance) 355 table_attr->instance = attr->instance; 356 } 357 table_attr->instance++; 358 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) { 359 pr_warn("%4.4s: too many table instances\n", table_attr->name); 360 return -ERANGE; 361 } 362 363 ACPI_COPY_NAMESEG(table_attr->filename, table_header->signature); 364 table_attr->filename[ACPI_NAMESEG_SIZE] = '\0'; 365 if (table_attr->instance > 1 || (table_attr->instance == 1 && 366 !acpi_get_table 367 (table_header->signature, 2, &header))) { 368 snprintf(instance_str, sizeof(instance_str), "%u", 369 table_attr->instance); 370 strcat(table_attr->filename, instance_str); 371 } 372 373 table_attr->attr.size = table_header->length; 374 table_attr->attr.read = acpi_table_show; 375 table_attr->attr.attr.name = table_attr->filename; 376 table_attr->attr.attr.mode = 0400; 377 378 return sysfs_create_bin_file(tables_obj, &table_attr->attr); 379 } 380 381 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) 382 { 383 struct acpi_table_attr *table_attr; 384 385 switch (event) { 386 case ACPI_TABLE_EVENT_INSTALL: 387 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); 388 if (!table_attr) 389 return AE_NO_MEMORY; 390 391 if (acpi_table_attr_init(dynamic_tables_kobj, 392 table_attr, table)) { 393 kfree(table_attr); 394 return AE_ERROR; 395 } 396 list_add_tail(&table_attr->node, &acpi_table_attr_list); 397 break; 398 case ACPI_TABLE_EVENT_LOAD: 399 case ACPI_TABLE_EVENT_UNLOAD: 400 case ACPI_TABLE_EVENT_UNINSTALL: 401 /* 402 * we do not need to do anything right now 403 * because the table is not deleted from the 404 * global table list when unloading it. 405 */ 406 break; 407 default: 408 return AE_BAD_PARAMETER; 409 } 410 return AE_OK; 411 } 412 413 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, 414 struct bin_attribute *bin_attr, char *buf, 415 loff_t offset, size_t count) 416 { 417 struct acpi_data_attr *data_attr; 418 void *base; 419 ssize_t rc; 420 421 data_attr = container_of(bin_attr, struct acpi_data_attr, attr); 422 423 base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); 424 if (!base) 425 return -ENOMEM; 426 rc = memory_read_from_buffer(buf, count, &offset, base, 427 data_attr->attr.size); 428 acpi_os_unmap_memory(base, data_attr->attr.size); 429 430 return rc; 431 } 432 433 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) 434 { 435 struct acpi_table_bert *bert = th; 436 437 if (bert->header.length < sizeof(struct acpi_table_bert) || 438 bert->region_length < sizeof(struct acpi_hest_generic_status)) { 439 kfree(data_attr); 440 return -EINVAL; 441 } 442 data_attr->addr = bert->address; 443 data_attr->attr.size = bert->region_length; 444 data_attr->attr.attr.name = "BERT"; 445 446 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); 447 } 448 449 static struct acpi_data_obj { 450 char *name; 451 int (*fn)(void *, struct acpi_data_attr *); 452 } acpi_data_objs[] = { 453 { ACPI_SIG_BERT, acpi_bert_data_init }, 454 }; 455 456 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs) 457 458 static int acpi_table_data_init(struct acpi_table_header *th) 459 { 460 struct acpi_data_attr *data_attr; 461 int i; 462 463 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) { 464 if (ACPI_COMPARE_NAMESEG(th->signature, acpi_data_objs[i].name)) { 465 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL); 466 if (!data_attr) 467 return -ENOMEM; 468 sysfs_attr_init(&data_attr->attr.attr); 469 data_attr->attr.read = acpi_data_show; 470 data_attr->attr.attr.mode = 0400; 471 return acpi_data_objs[i].fn(th, data_attr); 472 } 473 } 474 return 0; 475 } 476 477 static int acpi_tables_sysfs_init(void) 478 { 479 struct acpi_table_attr *table_attr; 480 struct acpi_table_header *table_header = NULL; 481 int table_index; 482 acpi_status status; 483 int ret; 484 485 tables_kobj = kobject_create_and_add("tables", acpi_kobj); 486 if (!tables_kobj) 487 goto err; 488 489 tables_data_kobj = kobject_create_and_add("data", tables_kobj); 490 if (!tables_data_kobj) 491 goto err_tables_data; 492 493 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj); 494 if (!dynamic_tables_kobj) 495 goto err_dynamic_tables; 496 497 for (table_index = 0;; table_index++) { 498 status = acpi_get_table_by_index(table_index, &table_header); 499 500 if (status == AE_BAD_PARAMETER) 501 break; 502 503 if (ACPI_FAILURE(status)) 504 continue; 505 506 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); 507 if (!table_attr) 508 return -ENOMEM; 509 510 ret = acpi_table_attr_init(tables_kobj, 511 table_attr, table_header); 512 if (ret) { 513 kfree(table_attr); 514 return ret; 515 } 516 list_add_tail(&table_attr->node, &acpi_table_attr_list); 517 acpi_table_data_init(table_header); 518 } 519 520 kobject_uevent(tables_kobj, KOBJ_ADD); 521 kobject_uevent(tables_data_kobj, KOBJ_ADD); 522 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); 523 524 return 0; 525 err_dynamic_tables: 526 kobject_put(tables_data_kobj); 527 err_tables_data: 528 kobject_put(tables_kobj); 529 err: 530 return -ENOMEM; 531 } 532 533 /* 534 * Detailed ACPI IRQ counters: 535 * /sys/firmware/acpi/interrupts/ 536 */ 537 538 u32 acpi_irq_handled; 539 u32 acpi_irq_not_handled; 540 541 #define COUNT_GPE 0 542 #define COUNT_SCI 1 /* acpi_irq_handled */ 543 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */ 544 #define COUNT_ERROR 3 /* other */ 545 #define NUM_COUNTERS_EXTRA 4 546 547 struct event_counter { 548 u32 count; 549 u32 flags; 550 }; 551 552 static struct event_counter *all_counters; 553 static u32 num_gpes; 554 static u32 num_counters; 555 static struct attribute **all_attrs; 556 static u32 acpi_gpe_count; 557 558 static struct attribute_group interrupt_stats_attr_group = { 559 .name = "interrupts", 560 }; 561 562 static struct kobj_attribute *counter_attrs; 563 564 static void delete_gpe_attr_array(void) 565 { 566 struct event_counter *tmp = all_counters; 567 568 all_counters = NULL; 569 kfree(tmp); 570 571 if (counter_attrs) { 572 int i; 573 574 for (i = 0; i < num_gpes; i++) 575 kfree(counter_attrs[i].attr.name); 576 577 kfree(counter_attrs); 578 } 579 kfree(all_attrs); 580 } 581 582 static void gpe_count(u32 gpe_number) 583 { 584 acpi_gpe_count++; 585 586 if (!all_counters) 587 return; 588 589 if (gpe_number < num_gpes) 590 all_counters[gpe_number].count++; 591 else 592 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 593 COUNT_ERROR].count++; 594 } 595 596 static void fixed_event_count(u32 event_number) 597 { 598 if (!all_counters) 599 return; 600 601 if (event_number < ACPI_NUM_FIXED_EVENTS) 602 all_counters[num_gpes + event_number].count++; 603 else 604 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 605 COUNT_ERROR].count++; 606 } 607 608 static void acpi_global_event_handler(u32 event_type, acpi_handle device, 609 u32 event_number, void *context) 610 { 611 if (event_type == ACPI_EVENT_TYPE_GPE) { 612 gpe_count(event_number); 613 pr_debug("GPE event 0x%02x\n", event_number); 614 } else if (event_type == ACPI_EVENT_TYPE_FIXED) { 615 fixed_event_count(event_number); 616 pr_debug("Fixed event 0x%02x\n", event_number); 617 } else { 618 pr_debug("Other event 0x%02x\n", event_number); 619 } 620 } 621 622 static int get_status(u32 index, acpi_event_status *ret, 623 acpi_handle *handle) 624 { 625 acpi_status status; 626 627 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 628 return -EINVAL; 629 630 if (index < num_gpes) { 631 status = acpi_get_gpe_device(index, handle); 632 if (ACPI_FAILURE(status)) { 633 pr_warn("Invalid GPE 0x%x", index); 634 return -ENXIO; 635 } 636 status = acpi_get_gpe_status(*handle, index, ret); 637 } else { 638 status = acpi_get_event_status(index - num_gpes, ret); 639 } 640 if (ACPI_FAILURE(status)) 641 return -EIO; 642 643 return 0; 644 } 645 646 static ssize_t counter_show(struct kobject *kobj, 647 struct kobj_attribute *attr, char *buf) 648 { 649 int index = attr - counter_attrs; 650 int size; 651 acpi_handle handle; 652 acpi_event_status status; 653 int result = 0; 654 655 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = 656 acpi_irq_handled; 657 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count = 658 acpi_irq_not_handled; 659 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = 660 acpi_gpe_count; 661 size = sprintf(buf, "%8u", all_counters[index].count); 662 663 /* "gpe_all" or "sci" */ 664 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 665 goto end; 666 667 result = get_status(index, &status, &handle); 668 if (result) 669 goto end; 670 671 if (status & ACPI_EVENT_FLAG_ENABLE_SET) 672 size += sprintf(buf + size, " EN"); 673 else 674 size += sprintf(buf + size, " "); 675 if (status & ACPI_EVENT_FLAG_STATUS_SET) 676 size += sprintf(buf + size, " STS"); 677 else 678 size += sprintf(buf + size, " "); 679 680 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) 681 size += sprintf(buf + size, " invalid "); 682 else if (status & ACPI_EVENT_FLAG_ENABLED) 683 size += sprintf(buf + size, " enabled "); 684 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED) 685 size += sprintf(buf + size, " wake_enabled"); 686 else 687 size += sprintf(buf + size, " disabled "); 688 if (status & ACPI_EVENT_FLAG_MASKED) 689 size += sprintf(buf + size, " masked "); 690 else 691 size += sprintf(buf + size, " unmasked"); 692 693 end: 694 size += sprintf(buf + size, "\n"); 695 return result ? result : size; 696 } 697 698 /* 699 * counter_set() sets the specified counter. 700 * setting the total "sci" file to any value clears all counters. 701 * enable/disable/clear a gpe/fixed event in user space. 702 */ 703 static ssize_t counter_set(struct kobject *kobj, 704 struct kobj_attribute *attr, const char *buf, 705 size_t size) 706 { 707 int index = attr - counter_attrs; 708 acpi_event_status status; 709 acpi_handle handle; 710 int result = 0; 711 unsigned long tmp; 712 713 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { 714 int i; 715 for (i = 0; i < num_counters; ++i) 716 all_counters[i].count = 0; 717 acpi_gpe_count = 0; 718 acpi_irq_handled = 0; 719 acpi_irq_not_handled = 0; 720 goto end; 721 } 722 723 /* show the event status for both GPEs and Fixed Events */ 724 result = get_status(index, &status, &handle); 725 if (result) 726 goto end; 727 728 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) { 729 pr_warn("Can not change Invalid GPE/Fixed Event status\n"); 730 return -EINVAL; 731 } 732 733 if (index < num_gpes) { 734 if (!strcmp(buf, "disable\n") && 735 (status & ACPI_EVENT_FLAG_ENABLED)) 736 result = acpi_disable_gpe(handle, index); 737 else if (!strcmp(buf, "enable\n") && 738 !(status & ACPI_EVENT_FLAG_ENABLED)) 739 result = acpi_enable_gpe(handle, index); 740 else if (!strcmp(buf, "clear\n") && 741 (status & ACPI_EVENT_FLAG_STATUS_SET)) 742 result = acpi_clear_gpe(handle, index); 743 else if (!strcmp(buf, "mask\n")) 744 result = acpi_mask_gpe(handle, index, TRUE); 745 else if (!strcmp(buf, "unmask\n")) 746 result = acpi_mask_gpe(handle, index, FALSE); 747 else if (!kstrtoul(buf, 0, &tmp)) 748 all_counters[index].count = tmp; 749 else 750 result = -EINVAL; 751 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { 752 int event = index - num_gpes; 753 if (!strcmp(buf, "disable\n") && 754 (status & ACPI_EVENT_FLAG_ENABLE_SET)) 755 result = acpi_disable_event(event, ACPI_NOT_ISR); 756 else if (!strcmp(buf, "enable\n") && 757 !(status & ACPI_EVENT_FLAG_ENABLE_SET)) 758 result = acpi_enable_event(event, ACPI_NOT_ISR); 759 else if (!strcmp(buf, "clear\n") && 760 (status & ACPI_EVENT_FLAG_STATUS_SET)) 761 result = acpi_clear_event(event); 762 else if (!kstrtoul(buf, 0, &tmp)) 763 all_counters[index].count = tmp; 764 else 765 result = -EINVAL; 766 } else 767 all_counters[index].count = strtoul(buf, NULL, 0); 768 769 if (ACPI_FAILURE(result)) 770 result = -EINVAL; 771 end: 772 return result ? result : size; 773 } 774 775 /* 776 * A Quirk Mechanism for GPE Flooding Prevention: 777 * 778 * Quirks may be needed to prevent GPE flooding on a specific GPE. The 779 * flooding typically cannot be detected and automatically prevented by 780 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in 781 * the AML tables. This normally indicates a feature gap in Linux, thus 782 * instead of providing endless quirk tables, we provide a boot parameter 783 * for those who want this quirk. For example, if the users want to prevent 784 * the GPE flooding for GPE 00, they need to specify the following boot 785 * parameter: 786 * acpi_mask_gpe=0x00 787 * Note, the parameter can be a list (see bitmap_parselist() for the details). 788 * The masking status can be modified by the following runtime controlling 789 * interface: 790 * echo unmask > /sys/firmware/acpi/interrupts/gpe00 791 */ 792 #define ACPI_MASKABLE_GPE_MAX 0x100 793 static DECLARE_BITMAP(acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) __initdata; 794 795 static int __init acpi_gpe_set_masked_gpes(char *val) 796 { 797 int ret; 798 u8 gpe; 799 800 ret = kstrtou8(val, 0, &gpe); 801 if (ret) { 802 ret = bitmap_parselist(val, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX); 803 if (ret) 804 return ret; 805 } else 806 set_bit(gpe, acpi_masked_gpes_map); 807 808 return 1; 809 } 810 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes); 811 812 void __init acpi_gpe_apply_masked_gpes(void) 813 { 814 acpi_handle handle; 815 acpi_status status; 816 u16 gpe; 817 818 for_each_set_bit(gpe, acpi_masked_gpes_map, ACPI_MASKABLE_GPE_MAX) { 819 status = acpi_get_gpe_device(gpe, &handle); 820 if (ACPI_SUCCESS(status)) { 821 pr_info("Masking GPE 0x%x.\n", gpe); 822 (void)acpi_mask_gpe(handle, gpe, TRUE); 823 } 824 } 825 } 826 827 void acpi_irq_stats_init(void) 828 { 829 acpi_status status; 830 int i; 831 832 if (all_counters) 833 return; 834 835 num_gpes = acpi_current_gpe_count; 836 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; 837 838 all_attrs = kcalloc(num_counters + 1, sizeof(*all_attrs), GFP_KERNEL); 839 if (all_attrs == NULL) 840 return; 841 842 all_counters = kcalloc(num_counters, sizeof(*all_counters), GFP_KERNEL); 843 if (all_counters == NULL) 844 goto fail; 845 846 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); 847 if (ACPI_FAILURE(status)) 848 goto fail; 849 850 counter_attrs = kcalloc(num_counters, sizeof(*counter_attrs), GFP_KERNEL); 851 if (counter_attrs == NULL) 852 goto fail; 853 854 for (i = 0; i < num_counters; ++i) { 855 char buffer[12]; 856 char *name; 857 858 if (i < num_gpes) 859 sprintf(buffer, "gpe%02X", i); 860 else if (i == num_gpes + ACPI_EVENT_PMTIMER) 861 sprintf(buffer, "ff_pmtimer"); 862 else if (i == num_gpes + ACPI_EVENT_GLOBAL) 863 sprintf(buffer, "ff_gbl_lock"); 864 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON) 865 sprintf(buffer, "ff_pwr_btn"); 866 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON) 867 sprintf(buffer, "ff_slp_btn"); 868 else if (i == num_gpes + ACPI_EVENT_RTC) 869 sprintf(buffer, "ff_rt_clk"); 870 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE) 871 sprintf(buffer, "gpe_all"); 872 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) 873 sprintf(buffer, "sci"); 874 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT) 875 sprintf(buffer, "sci_not"); 876 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR) 877 sprintf(buffer, "error"); 878 else 879 sprintf(buffer, "bug%02X", i); 880 881 name = kstrdup(buffer, GFP_KERNEL); 882 if (name == NULL) 883 goto fail; 884 885 sysfs_attr_init(&counter_attrs[i].attr); 886 counter_attrs[i].attr.name = name; 887 counter_attrs[i].attr.mode = 0644; 888 counter_attrs[i].show = counter_show; 889 counter_attrs[i].store = counter_set; 890 891 all_attrs[i] = &counter_attrs[i].attr; 892 } 893 894 interrupt_stats_attr_group.attrs = all_attrs; 895 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group)) 896 return; 897 898 fail: 899 delete_gpe_attr_array(); 900 } 901 902 static void __exit interrupt_stats_exit(void) 903 { 904 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group); 905 906 delete_gpe_attr_array(); 907 } 908 909 static ssize_t pm_profile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 910 { 911 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); 912 } 913 914 static const struct kobj_attribute pm_profile_attr = __ATTR_RO(pm_profile); 915 916 static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 917 { 918 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 919 920 return sprintf(buf, "%d\n", hotplug->enabled); 921 } 922 923 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr, 924 const char *buf, size_t size) 925 { 926 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 927 unsigned int val; 928 929 if (kstrtouint(buf, 10, &val) || val > 1) 930 return -EINVAL; 931 932 acpi_scan_hotplug_enabled(hotplug, val); 933 return size; 934 } 935 936 static struct kobj_attribute hotplug_enabled_attr = __ATTR_RW(enabled); 937 938 static struct attribute *hotplug_profile_attrs[] = { 939 &hotplug_enabled_attr.attr, 940 NULL 941 }; 942 ATTRIBUTE_GROUPS(hotplug_profile); 943 944 static struct kobj_type acpi_hotplug_profile_ktype = { 945 .sysfs_ops = &kobj_sysfs_ops, 946 .default_groups = hotplug_profile_groups, 947 }; 948 949 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, 950 const char *name) 951 { 952 int error; 953 954 if (!hotplug_kobj) 955 goto err_out; 956 957 error = kobject_init_and_add(&hotplug->kobj, 958 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); 959 if (error) { 960 kobject_put(&hotplug->kobj); 961 goto err_out; 962 } 963 964 kobject_uevent(&hotplug->kobj, KOBJ_ADD); 965 return; 966 967 err_out: 968 pr_err("Unable to add hotplug profile '%s'\n", name); 969 } 970 971 static ssize_t force_remove_show(struct kobject *kobj, 972 struct kobj_attribute *attr, char *buf) 973 { 974 return sprintf(buf, "%d\n", 0); 975 } 976 977 static ssize_t force_remove_store(struct kobject *kobj, 978 struct kobj_attribute *attr, 979 const char *buf, size_t size) 980 { 981 bool val; 982 int ret; 983 984 ret = strtobool(buf, &val); 985 if (ret < 0) 986 return ret; 987 988 if (val) { 989 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n"); 990 return -EINVAL; 991 } 992 return size; 993 } 994 995 static const struct kobj_attribute force_remove_attr = __ATTR_RW(force_remove); 996 997 int __init acpi_sysfs_init(void) 998 { 999 int result; 1000 1001 result = acpi_tables_sysfs_init(); 1002 if (result) 1003 return result; 1004 1005 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj); 1006 if (!hotplug_kobj) 1007 return -ENOMEM; 1008 1009 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr); 1010 if (result) 1011 return result; 1012 1013 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); 1014 return result; 1015 } 1016