1 /* 2 * sysfs.c - ACPI sysfs interface to userspace. 3 */ 4 5 #define pr_fmt(fmt) "ACPI: " fmt 6 7 #include <linux/init.h> 8 #include <linux/kernel.h> 9 #include <linux/moduleparam.h> 10 #include <linux/acpi.h> 11 12 #include "internal.h" 13 14 #define _COMPONENT ACPI_SYSTEM_COMPONENT 15 ACPI_MODULE_NAME("sysfs"); 16 17 #ifdef CONFIG_ACPI_DEBUG 18 /* 19 * ACPI debug sysfs I/F, including: 20 * /sys/modules/acpi/parameters/debug_layer 21 * /sys/modules/acpi/parameters/debug_level 22 * /sys/modules/acpi/parameters/trace_method_name 23 * /sys/modules/acpi/parameters/trace_state 24 * /sys/modules/acpi/parameters/trace_debug_layer 25 * /sys/modules/acpi/parameters/trace_debug_level 26 */ 27 28 struct acpi_dlayer { 29 const char *name; 30 unsigned long value; 31 }; 32 struct acpi_dlevel { 33 const char *name; 34 unsigned long value; 35 }; 36 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v } 37 38 static const struct acpi_dlayer acpi_debug_layers[] = { 39 ACPI_DEBUG_INIT(ACPI_UTILITIES), 40 ACPI_DEBUG_INIT(ACPI_HARDWARE), 41 ACPI_DEBUG_INIT(ACPI_EVENTS), 42 ACPI_DEBUG_INIT(ACPI_TABLES), 43 ACPI_DEBUG_INIT(ACPI_NAMESPACE), 44 ACPI_DEBUG_INIT(ACPI_PARSER), 45 ACPI_DEBUG_INIT(ACPI_DISPATCHER), 46 ACPI_DEBUG_INIT(ACPI_EXECUTER), 47 ACPI_DEBUG_INIT(ACPI_RESOURCES), 48 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER), 49 ACPI_DEBUG_INIT(ACPI_OS_SERVICES), 50 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER), 51 ACPI_DEBUG_INIT(ACPI_COMPILER), 52 ACPI_DEBUG_INIT(ACPI_TOOLS), 53 54 ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT), 55 ACPI_DEBUG_INIT(ACPI_AC_COMPONENT), 56 ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT), 57 ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT), 58 ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT), 59 ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT), 60 ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT), 61 ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT), 62 ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT), 63 ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT), 64 ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT), 65 ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT), 66 ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT), 67 ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT), 68 }; 69 70 static const struct acpi_dlevel acpi_debug_levels[] = { 71 ACPI_DEBUG_INIT(ACPI_LV_INIT), 72 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT), 73 ACPI_DEBUG_INIT(ACPI_LV_INFO), 74 ACPI_DEBUG_INIT(ACPI_LV_REPAIR), 75 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT), 76 77 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES), 78 ACPI_DEBUG_INIT(ACPI_LV_PARSE), 79 ACPI_DEBUG_INIT(ACPI_LV_LOAD), 80 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH), 81 ACPI_DEBUG_INIT(ACPI_LV_EXEC), 82 ACPI_DEBUG_INIT(ACPI_LV_NAMES), 83 ACPI_DEBUG_INIT(ACPI_LV_OPREGION), 84 ACPI_DEBUG_INIT(ACPI_LV_BFIELD), 85 ACPI_DEBUG_INIT(ACPI_LV_TABLES), 86 ACPI_DEBUG_INIT(ACPI_LV_VALUES), 87 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS), 88 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES), 89 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS), 90 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE), 91 92 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS), 93 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS), 94 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS), 95 96 ACPI_DEBUG_INIT(ACPI_LV_MUTEX), 97 ACPI_DEBUG_INIT(ACPI_LV_THREADS), 98 ACPI_DEBUG_INIT(ACPI_LV_IO), 99 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS), 100 101 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE), 102 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO), 103 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES), 104 ACPI_DEBUG_INIT(ACPI_LV_EVENTS), 105 }; 106 107 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) 108 { 109 int result = 0; 110 int i; 111 112 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 113 114 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) { 115 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 116 acpi_debug_layers[i].name, 117 acpi_debug_layers[i].value, 118 (acpi_dbg_layer & acpi_debug_layers[i].value) 119 ? '*' : ' '); 120 } 121 result += 122 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS", 123 ACPI_ALL_DRIVERS, 124 (acpi_dbg_layer & ACPI_ALL_DRIVERS) == 125 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS) 126 == 0 ? ' ' : '-'); 127 result += 128 sprintf(buffer + result, 129 "--\ndebug_layer = 0x%08X ( * = enabled)\n", 130 acpi_dbg_layer); 131 132 return result; 133 } 134 135 static int param_get_debug_level(char *buffer, const struct kernel_param *kp) 136 { 137 int result = 0; 138 int i; 139 140 result = sprintf(buffer, "%-25s\tHex SET\n", "Description"); 141 142 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) { 143 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n", 144 acpi_debug_levels[i].name, 145 acpi_debug_levels[i].value, 146 (acpi_dbg_level & acpi_debug_levels[i].value) 147 ? '*' : ' '); 148 } 149 result += 150 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n", 151 acpi_dbg_level); 152 153 return result; 154 } 155 156 static const struct kernel_param_ops param_ops_debug_layer = { 157 .set = param_set_uint, 158 .get = param_get_debug_layer, 159 }; 160 161 static const struct kernel_param_ops param_ops_debug_level = { 162 .set = param_set_uint, 163 .get = param_get_debug_level, 164 }; 165 166 module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); 167 module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); 168 169 static char trace_method_name[1024]; 170 171 int param_set_trace_method_name(const char *val, const struct kernel_param *kp) 172 { 173 u32 saved_flags = 0; 174 bool is_abs_path = true; 175 176 if (*val != '\\') 177 is_abs_path = false; 178 179 if ((is_abs_path && strlen(val) > 1023) || 180 (!is_abs_path && strlen(val) > 1022)) { 181 pr_err("%s: string parameter too long\n", kp->name); 182 return -ENOSPC; 183 } 184 185 /* 186 * It's not safe to update acpi_gbl_trace_method_name without 187 * having the tracer stopped, so we save the original tracer 188 * state and disable it. 189 */ 190 saved_flags = acpi_gbl_trace_flags; 191 (void)acpi_debug_trace(NULL, 192 acpi_gbl_trace_dbg_level, 193 acpi_gbl_trace_dbg_layer, 194 0); 195 196 /* This is a hack. We can't kmalloc in early boot. */ 197 if (is_abs_path) 198 strcpy(trace_method_name, val); 199 else { 200 trace_method_name[0] = '\\'; 201 strcpy(trace_method_name+1, val); 202 } 203 204 /* Restore the original tracer state */ 205 (void)acpi_debug_trace(trace_method_name, 206 acpi_gbl_trace_dbg_level, 207 acpi_gbl_trace_dbg_layer, 208 saved_flags); 209 210 return 0; 211 } 212 213 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp) 214 { 215 return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name); 216 } 217 218 static const struct kernel_param_ops param_ops_trace_method = { 219 .set = param_set_trace_method_name, 220 .get = param_get_trace_method_name, 221 }; 222 223 static const struct kernel_param_ops param_ops_trace_attrib = { 224 .set = param_set_uint, 225 .get = param_get_uint, 226 }; 227 228 module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644); 229 module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644); 230 module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644); 231 232 static int param_set_trace_state(const char *val, 233 const struct kernel_param *kp) 234 { 235 acpi_status status; 236 const char *method = trace_method_name; 237 u32 flags = 0; 238 239 /* So "xxx-once" comparison should go prior than "xxx" comparison */ 240 #define acpi_compare_param(val, key) \ 241 strncmp((val), (key), sizeof(key) - 1) 242 243 if (!acpi_compare_param(val, "enable")) { 244 method = NULL; 245 flags = ACPI_TRACE_ENABLED; 246 } else if (!acpi_compare_param(val, "disable")) 247 method = NULL; 248 else if (!acpi_compare_param(val, "method-once")) 249 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT; 250 else if (!acpi_compare_param(val, "method")) 251 flags = ACPI_TRACE_ENABLED; 252 else if (!acpi_compare_param(val, "opcode-once")) 253 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE; 254 else if (!acpi_compare_param(val, "opcode")) 255 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE; 256 else 257 return -EINVAL; 258 259 status = acpi_debug_trace(method, 260 acpi_gbl_trace_dbg_level, 261 acpi_gbl_trace_dbg_layer, 262 flags); 263 if (ACPI_FAILURE(status)) 264 return -EBUSY; 265 266 return 0; 267 } 268 269 static int param_get_trace_state(char *buffer, const struct kernel_param *kp) 270 { 271 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) 272 return sprintf(buffer, "disable"); 273 else { 274 if (acpi_gbl_trace_method_name) { 275 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) 276 return sprintf(buffer, "method-once"); 277 else 278 return sprintf(buffer, "method"); 279 } else 280 return sprintf(buffer, "enable"); 281 } 282 return 0; 283 } 284 285 module_param_call(trace_state, param_set_trace_state, param_get_trace_state, 286 NULL, 0644); 287 #endif /* CONFIG_ACPI_DEBUG */ 288 289 290 /* /sys/modules/acpi/parameters/aml_debug_output */ 291 292 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object, 293 byte, 0644); 294 MODULE_PARM_DESC(aml_debug_output, 295 "To enable/disable the ACPI Debug Object output."); 296 297 /* /sys/module/acpi/parameters/acpica_version */ 298 static int param_get_acpica_version(char *buffer, 299 const struct kernel_param *kp) 300 { 301 int result; 302 303 result = sprintf(buffer, "%x", ACPI_CA_VERSION); 304 305 return result; 306 } 307 308 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444); 309 310 /* 311 * ACPI table sysfs I/F: 312 * /sys/firmware/acpi/tables/ 313 * /sys/firmware/acpi/tables/data/ 314 * /sys/firmware/acpi/tables/dynamic/ 315 */ 316 317 static LIST_HEAD(acpi_table_attr_list); 318 static struct kobject *tables_kobj; 319 static struct kobject *tables_data_kobj; 320 static struct kobject *dynamic_tables_kobj; 321 static struct kobject *hotplug_kobj; 322 323 #define ACPI_MAX_TABLE_INSTANCES 999 324 #define ACPI_INST_SIZE 4 /* including trailing 0 */ 325 326 struct acpi_table_attr { 327 struct bin_attribute attr; 328 char name[ACPI_NAME_SIZE]; 329 int instance; 330 char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE]; 331 struct list_head node; 332 }; 333 334 struct acpi_data_attr { 335 struct bin_attribute attr; 336 u64 addr; 337 }; 338 339 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, 340 struct bin_attribute *bin_attr, char *buf, 341 loff_t offset, size_t count) 342 { 343 struct acpi_table_attr *table_attr = 344 container_of(bin_attr, struct acpi_table_attr, attr); 345 struct acpi_table_header *table_header = NULL; 346 acpi_status status; 347 ssize_t rc; 348 349 status = acpi_get_table(table_attr->name, table_attr->instance, 350 &table_header); 351 if (ACPI_FAILURE(status)) 352 return -ENODEV; 353 354 rc = memory_read_from_buffer(buf, count, &offset, table_header, 355 table_header->length); 356 acpi_put_table(table_header); 357 return rc; 358 } 359 360 static int acpi_table_attr_init(struct kobject *tables_obj, 361 struct acpi_table_attr *table_attr, 362 struct acpi_table_header *table_header) 363 { 364 struct acpi_table_header *header = NULL; 365 struct acpi_table_attr *attr = NULL; 366 char instance_str[ACPI_INST_SIZE]; 367 368 sysfs_attr_init(&table_attr->attr.attr); 369 ACPI_MOVE_NAME(table_attr->name, table_header->signature); 370 371 list_for_each_entry(attr, &acpi_table_attr_list, node) { 372 if (ACPI_COMPARE_NAME(table_attr->name, attr->name)) 373 if (table_attr->instance < attr->instance) 374 table_attr->instance = attr->instance; 375 } 376 table_attr->instance++; 377 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) { 378 pr_warn("%4.4s: too many table instances\n", 379 table_attr->name); 380 return -ERANGE; 381 } 382 383 ACPI_MOVE_NAME(table_attr->filename, table_header->signature); 384 table_attr->filename[ACPI_NAME_SIZE] = '\0'; 385 if (table_attr->instance > 1 || (table_attr->instance == 1 && 386 !acpi_get_table 387 (table_header->signature, 2, &header))) { 388 snprintf(instance_str, sizeof(instance_str), "%u", 389 table_attr->instance); 390 strcat(table_attr->filename, instance_str); 391 } 392 393 table_attr->attr.size = table_header->length; 394 table_attr->attr.read = acpi_table_show; 395 table_attr->attr.attr.name = table_attr->filename; 396 table_attr->attr.attr.mode = 0400; 397 398 return sysfs_create_bin_file(tables_obj, &table_attr->attr); 399 } 400 401 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context) 402 { 403 struct acpi_table_attr *table_attr; 404 405 switch (event) { 406 case ACPI_TABLE_EVENT_INSTALL: 407 table_attr = 408 kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL); 409 if (!table_attr) 410 return AE_NO_MEMORY; 411 412 if (acpi_table_attr_init(dynamic_tables_kobj, 413 table_attr, table)) { 414 kfree(table_attr); 415 return AE_ERROR; 416 } 417 list_add_tail(&table_attr->node, &acpi_table_attr_list); 418 break; 419 case ACPI_TABLE_EVENT_LOAD: 420 case ACPI_TABLE_EVENT_UNLOAD: 421 case ACPI_TABLE_EVENT_UNINSTALL: 422 /* 423 * we do not need to do anything right now 424 * because the table is not deleted from the 425 * global table list when unloading it. 426 */ 427 break; 428 default: 429 return AE_BAD_PARAMETER; 430 } 431 return AE_OK; 432 } 433 434 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj, 435 struct bin_attribute *bin_attr, char *buf, 436 loff_t offset, size_t count) 437 { 438 struct acpi_data_attr *data_attr; 439 void __iomem *base; 440 ssize_t rc; 441 442 data_attr = container_of(bin_attr, struct acpi_data_attr, attr); 443 444 base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size); 445 if (!base) 446 return -ENOMEM; 447 rc = memory_read_from_buffer(buf, count, &offset, base, 448 data_attr->attr.size); 449 acpi_os_unmap_memory(base, data_attr->attr.size); 450 451 return rc; 452 } 453 454 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr) 455 { 456 struct acpi_table_bert *bert = th; 457 458 if (bert->header.length < sizeof(struct acpi_table_bert) || 459 bert->region_length < sizeof(struct acpi_hest_generic_status)) { 460 kfree(data_attr); 461 return -EINVAL; 462 } 463 data_attr->addr = bert->address; 464 data_attr->attr.size = bert->region_length; 465 data_attr->attr.attr.name = "BERT"; 466 467 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr); 468 } 469 470 static struct acpi_data_obj { 471 char *name; 472 int (*fn)(void *, struct acpi_data_attr *); 473 } acpi_data_objs[] = { 474 { ACPI_SIG_BERT, acpi_bert_data_init }, 475 }; 476 477 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs) 478 479 static int acpi_table_data_init(struct acpi_table_header *th) 480 { 481 struct acpi_data_attr *data_attr; 482 int i; 483 484 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) { 485 if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) { 486 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL); 487 if (!data_attr) 488 return -ENOMEM; 489 sysfs_attr_init(&data_attr->attr.attr); 490 data_attr->attr.read = acpi_data_show; 491 data_attr->attr.attr.mode = 0400; 492 return acpi_data_objs[i].fn(th, data_attr); 493 } 494 } 495 return 0; 496 } 497 498 static int acpi_tables_sysfs_init(void) 499 { 500 struct acpi_table_attr *table_attr; 501 struct acpi_table_header *table_header = NULL; 502 int table_index; 503 acpi_status status; 504 int ret; 505 506 tables_kobj = kobject_create_and_add("tables", acpi_kobj); 507 if (!tables_kobj) 508 goto err; 509 510 tables_data_kobj = kobject_create_and_add("data", tables_kobj); 511 if (!tables_data_kobj) 512 goto err_tables_data; 513 514 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj); 515 if (!dynamic_tables_kobj) 516 goto err_dynamic_tables; 517 518 for (table_index = 0;; table_index++) { 519 status = acpi_get_table_by_index(table_index, &table_header); 520 521 if (status == AE_BAD_PARAMETER) 522 break; 523 524 if (ACPI_FAILURE(status)) 525 continue; 526 527 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL); 528 if (!table_attr) 529 return -ENOMEM; 530 531 ret = acpi_table_attr_init(tables_kobj, 532 table_attr, table_header); 533 if (ret) { 534 kfree(table_attr); 535 return ret; 536 } 537 list_add_tail(&table_attr->node, &acpi_table_attr_list); 538 acpi_table_data_init(table_header); 539 } 540 541 kobject_uevent(tables_kobj, KOBJ_ADD); 542 kobject_uevent(tables_data_kobj, KOBJ_ADD); 543 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD); 544 545 return 0; 546 err_dynamic_tables: 547 kobject_put(tables_data_kobj); 548 err_tables_data: 549 kobject_put(tables_kobj); 550 err: 551 return -ENOMEM; 552 } 553 554 /* 555 * Detailed ACPI IRQ counters: 556 * /sys/firmware/acpi/interrupts/ 557 */ 558 559 u32 acpi_irq_handled; 560 u32 acpi_irq_not_handled; 561 562 #define COUNT_GPE 0 563 #define COUNT_SCI 1 /* acpi_irq_handled */ 564 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */ 565 #define COUNT_ERROR 3 /* other */ 566 #define NUM_COUNTERS_EXTRA 4 567 568 struct event_counter { 569 u32 count; 570 u32 flags; 571 }; 572 573 static struct event_counter *all_counters; 574 static u32 num_gpes; 575 static u32 num_counters; 576 static struct attribute **all_attrs; 577 static u32 acpi_gpe_count; 578 579 static struct attribute_group interrupt_stats_attr_group = { 580 .name = "interrupts", 581 }; 582 583 static struct kobj_attribute *counter_attrs; 584 585 static void delete_gpe_attr_array(void) 586 { 587 struct event_counter *tmp = all_counters; 588 589 all_counters = NULL; 590 kfree(tmp); 591 592 if (counter_attrs) { 593 int i; 594 595 for (i = 0; i < num_gpes; i++) 596 kfree(counter_attrs[i].attr.name); 597 598 kfree(counter_attrs); 599 } 600 kfree(all_attrs); 601 602 return; 603 } 604 605 static void gpe_count(u32 gpe_number) 606 { 607 acpi_gpe_count++; 608 609 if (!all_counters) 610 return; 611 612 if (gpe_number < num_gpes) 613 all_counters[gpe_number].count++; 614 else 615 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 616 COUNT_ERROR].count++; 617 618 return; 619 } 620 621 static void fixed_event_count(u32 event_number) 622 { 623 if (!all_counters) 624 return; 625 626 if (event_number < ACPI_NUM_FIXED_EVENTS) 627 all_counters[num_gpes + event_number].count++; 628 else 629 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + 630 COUNT_ERROR].count++; 631 632 return; 633 } 634 635 static void acpi_global_event_handler(u32 event_type, acpi_handle device, 636 u32 event_number, void *context) 637 { 638 if (event_type == ACPI_EVENT_TYPE_GPE) { 639 gpe_count(event_number); 640 pr_debug("GPE event 0x%02x\n", event_number); 641 } else if (event_type == ACPI_EVENT_TYPE_FIXED) { 642 fixed_event_count(event_number); 643 pr_debug("Fixed event 0x%02x\n", event_number); 644 } else { 645 pr_debug("Other event 0x%02x\n", event_number); 646 } 647 } 648 649 static int get_status(u32 index, acpi_event_status *status, 650 acpi_handle *handle) 651 { 652 int result; 653 654 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 655 return -EINVAL; 656 657 if (index < num_gpes) { 658 result = acpi_get_gpe_device(index, handle); 659 if (result) { 660 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND, 661 "Invalid GPE 0x%x", index)); 662 return result; 663 } 664 result = acpi_get_gpe_status(*handle, index, status); 665 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) 666 result = acpi_get_event_status(index - num_gpes, status); 667 668 return result; 669 } 670 671 static ssize_t counter_show(struct kobject *kobj, 672 struct kobj_attribute *attr, char *buf) 673 { 674 int index = attr - counter_attrs; 675 int size; 676 acpi_handle handle; 677 acpi_event_status status; 678 int result = 0; 679 680 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count = 681 acpi_irq_handled; 682 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count = 683 acpi_irq_not_handled; 684 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count = 685 acpi_gpe_count; 686 size = sprintf(buf, "%8u", all_counters[index].count); 687 688 /* "gpe_all" or "sci" */ 689 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS) 690 goto end; 691 692 result = get_status(index, &status, &handle); 693 if (result) 694 goto end; 695 696 if (status & ACPI_EVENT_FLAG_ENABLE_SET) 697 size += sprintf(buf + size, " EN"); 698 else 699 size += sprintf(buf + size, " "); 700 if (status & ACPI_EVENT_FLAG_STATUS_SET) 701 size += sprintf(buf + size, " STS"); 702 else 703 size += sprintf(buf + size, " "); 704 705 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) 706 size += sprintf(buf + size, " invalid "); 707 else if (status & ACPI_EVENT_FLAG_ENABLED) 708 size += sprintf(buf + size, " enabled "); 709 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED) 710 size += sprintf(buf + size, " wake_enabled"); 711 else 712 size += sprintf(buf + size, " disabled "); 713 if (status & ACPI_EVENT_FLAG_MASKED) 714 size += sprintf(buf + size, " masked "); 715 else 716 size += sprintf(buf + size, " unmasked"); 717 718 end: 719 size += sprintf(buf + size, "\n"); 720 return result ? result : size; 721 } 722 723 /* 724 * counter_set() sets the specified counter. 725 * setting the total "sci" file to any value clears all counters. 726 * enable/disable/clear a gpe/fixed event in user space. 727 */ 728 static ssize_t counter_set(struct kobject *kobj, 729 struct kobj_attribute *attr, const char *buf, 730 size_t size) 731 { 732 int index = attr - counter_attrs; 733 acpi_event_status status; 734 acpi_handle handle; 735 int result = 0; 736 unsigned long tmp; 737 738 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) { 739 int i; 740 for (i = 0; i < num_counters; ++i) 741 all_counters[i].count = 0; 742 acpi_gpe_count = 0; 743 acpi_irq_handled = 0; 744 acpi_irq_not_handled = 0; 745 goto end; 746 } 747 748 /* show the event status for both GPEs and Fixed Events */ 749 result = get_status(index, &status, &handle); 750 if (result) 751 goto end; 752 753 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) { 754 printk(KERN_WARNING PREFIX 755 "Can not change Invalid GPE/Fixed Event status\n"); 756 return -EINVAL; 757 } 758 759 if (index < num_gpes) { 760 if (!strcmp(buf, "disable\n") && 761 (status & ACPI_EVENT_FLAG_ENABLED)) 762 result = acpi_disable_gpe(handle, index); 763 else if (!strcmp(buf, "enable\n") && 764 !(status & ACPI_EVENT_FLAG_ENABLED)) 765 result = acpi_enable_gpe(handle, index); 766 else if (!strcmp(buf, "clear\n") && 767 (status & ACPI_EVENT_FLAG_STATUS_SET)) 768 result = acpi_clear_gpe(handle, index); 769 else if (!strcmp(buf, "mask\n")) 770 result = acpi_mask_gpe(handle, index, TRUE); 771 else if (!strcmp(buf, "unmask\n")) 772 result = acpi_mask_gpe(handle, index, FALSE); 773 else if (!kstrtoul(buf, 0, &tmp)) 774 all_counters[index].count = tmp; 775 else 776 result = -EINVAL; 777 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { 778 int event = index - num_gpes; 779 if (!strcmp(buf, "disable\n") && 780 (status & ACPI_EVENT_FLAG_ENABLE_SET)) 781 result = acpi_disable_event(event, ACPI_NOT_ISR); 782 else if (!strcmp(buf, "enable\n") && 783 !(status & ACPI_EVENT_FLAG_ENABLE_SET)) 784 result = acpi_enable_event(event, ACPI_NOT_ISR); 785 else if (!strcmp(buf, "clear\n") && 786 (status & ACPI_EVENT_FLAG_STATUS_SET)) 787 result = acpi_clear_event(event); 788 else if (!kstrtoul(buf, 0, &tmp)) 789 all_counters[index].count = tmp; 790 else 791 result = -EINVAL; 792 } else 793 all_counters[index].count = strtoul(buf, NULL, 0); 794 795 if (ACPI_FAILURE(result)) 796 result = -EINVAL; 797 end: 798 return result ? result : size; 799 } 800 801 /* 802 * A Quirk Mechanism for GPE Flooding Prevention: 803 * 804 * Quirks may be needed to prevent GPE flooding on a specific GPE. The 805 * flooding typically cannot be detected and automatically prevented by 806 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in 807 * the AML tables. This normally indicates a feature gap in Linux, thus 808 * instead of providing endless quirk tables, we provide a boot parameter 809 * for those who want this quirk. For example, if the users want to prevent 810 * the GPE flooding for GPE 00, they need to specify the following boot 811 * parameter: 812 * acpi_mask_gpe=0x00 813 * The masking status can be modified by the following runtime controlling 814 * interface: 815 * echo unmask > /sys/firmware/acpi/interrupts/gpe00 816 */ 817 818 /* 819 * Currently, the GPE flooding prevention only supports to mask the GPEs 820 * numbered from 00 to 7f. 821 */ 822 #define ACPI_MASKABLE_GPE_MAX 0x80 823 824 static u64 __initdata acpi_masked_gpes; 825 826 static int __init acpi_gpe_set_masked_gpes(char *val) 827 { 828 u8 gpe; 829 830 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX) 831 return -EINVAL; 832 acpi_masked_gpes |= ((u64)1<<gpe); 833 834 return 1; 835 } 836 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes); 837 838 void __init acpi_gpe_apply_masked_gpes(void) 839 { 840 acpi_handle handle; 841 acpi_status status; 842 u8 gpe; 843 844 for (gpe = 0; 845 gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count); 846 gpe++) { 847 if (acpi_masked_gpes & ((u64)1<<gpe)) { 848 status = acpi_get_gpe_device(gpe, &handle); 849 if (ACPI_SUCCESS(status)) { 850 pr_info("Masking GPE 0x%x.\n", gpe); 851 (void)acpi_mask_gpe(handle, gpe, TRUE); 852 } 853 } 854 } 855 } 856 857 void acpi_irq_stats_init(void) 858 { 859 acpi_status status; 860 int i; 861 862 if (all_counters) 863 return; 864 865 num_gpes = acpi_current_gpe_count; 866 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA; 867 868 all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1), 869 GFP_KERNEL); 870 if (all_attrs == NULL) 871 return; 872 873 all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), 874 GFP_KERNEL); 875 if (all_counters == NULL) 876 goto fail; 877 878 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL); 879 if (ACPI_FAILURE(status)) 880 goto fail; 881 882 counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters), 883 GFP_KERNEL); 884 if (counter_attrs == NULL) 885 goto fail; 886 887 for (i = 0; i < num_counters; ++i) { 888 char buffer[12]; 889 char *name; 890 891 if (i < num_gpes) 892 sprintf(buffer, "gpe%02X", i); 893 else if (i == num_gpes + ACPI_EVENT_PMTIMER) 894 sprintf(buffer, "ff_pmtimer"); 895 else if (i == num_gpes + ACPI_EVENT_GLOBAL) 896 sprintf(buffer, "ff_gbl_lock"); 897 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON) 898 sprintf(buffer, "ff_pwr_btn"); 899 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON) 900 sprintf(buffer, "ff_slp_btn"); 901 else if (i == num_gpes + ACPI_EVENT_RTC) 902 sprintf(buffer, "ff_rt_clk"); 903 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE) 904 sprintf(buffer, "gpe_all"); 905 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) 906 sprintf(buffer, "sci"); 907 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT) 908 sprintf(buffer, "sci_not"); 909 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR) 910 sprintf(buffer, "error"); 911 else 912 sprintf(buffer, "bug%02X", i); 913 914 name = kstrdup(buffer, GFP_KERNEL); 915 if (name == NULL) 916 goto fail; 917 918 sysfs_attr_init(&counter_attrs[i].attr); 919 counter_attrs[i].attr.name = name; 920 counter_attrs[i].attr.mode = 0644; 921 counter_attrs[i].show = counter_show; 922 counter_attrs[i].store = counter_set; 923 924 all_attrs[i] = &counter_attrs[i].attr; 925 } 926 927 interrupt_stats_attr_group.attrs = all_attrs; 928 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group)) 929 return; 930 931 fail: 932 delete_gpe_attr_array(); 933 return; 934 } 935 936 static void __exit interrupt_stats_exit(void) 937 { 938 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group); 939 940 delete_gpe_attr_array(); 941 942 return; 943 } 944 945 static ssize_t 946 acpi_show_profile(struct device *dev, struct device_attribute *attr, 947 char *buf) 948 { 949 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile); 950 } 951 952 static const struct device_attribute pm_profile_attr = 953 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL); 954 955 static ssize_t hotplug_enabled_show(struct kobject *kobj, 956 struct kobj_attribute *attr, char *buf) 957 { 958 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 959 960 return sprintf(buf, "%d\n", hotplug->enabled); 961 } 962 963 static ssize_t hotplug_enabled_store(struct kobject *kobj, 964 struct kobj_attribute *attr, 965 const char *buf, size_t size) 966 { 967 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj); 968 unsigned int val; 969 970 if (kstrtouint(buf, 10, &val) || val > 1) 971 return -EINVAL; 972 973 acpi_scan_hotplug_enabled(hotplug, val); 974 return size; 975 } 976 977 static struct kobj_attribute hotplug_enabled_attr = 978 __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show, 979 hotplug_enabled_store); 980 981 static struct attribute *hotplug_profile_attrs[] = { 982 &hotplug_enabled_attr.attr, 983 NULL 984 }; 985 986 static struct kobj_type acpi_hotplug_profile_ktype = { 987 .sysfs_ops = &kobj_sysfs_ops, 988 .default_attrs = hotplug_profile_attrs, 989 }; 990 991 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, 992 const char *name) 993 { 994 int error; 995 996 if (!hotplug_kobj) 997 goto err_out; 998 999 error = kobject_init_and_add(&hotplug->kobj, 1000 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); 1001 if (error) 1002 goto err_out; 1003 1004 kobject_uevent(&hotplug->kobj, KOBJ_ADD); 1005 return; 1006 1007 err_out: 1008 pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name); 1009 } 1010 1011 static ssize_t force_remove_show(struct kobject *kobj, 1012 struct kobj_attribute *attr, char *buf) 1013 { 1014 return sprintf(buf, "%d\n", 0); 1015 } 1016 1017 static ssize_t force_remove_store(struct kobject *kobj, 1018 struct kobj_attribute *attr, 1019 const char *buf, size_t size) 1020 { 1021 bool val; 1022 int ret; 1023 1024 ret = strtobool(buf, &val); 1025 if (ret < 0) 1026 return ret; 1027 1028 if (val) { 1029 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n"); 1030 return -EINVAL; 1031 } 1032 return size; 1033 } 1034 1035 static const struct kobj_attribute force_remove_attr = 1036 __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show, 1037 force_remove_store); 1038 1039 int __init acpi_sysfs_init(void) 1040 { 1041 int result; 1042 1043 result = acpi_tables_sysfs_init(); 1044 if (result) 1045 return result; 1046 1047 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj); 1048 if (!hotplug_kobj) 1049 return -ENOMEM; 1050 1051 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr); 1052 if (result) 1053 return result; 1054 1055 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr); 1056 return result; 1057 } 1058