Lines Matching +full:wakeup +full:- +full:event +full:- +full:action
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ec.c - ACPI Embedded Controller Driver (v3)
5 * Copyright (C) 2001-2015 Intel Corporation
43 #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
53 * host should re-check SCI_EVT after the first time the SCI_EVT
56 * at any later time could indicate another event. Normally such
57 * kind of EC firmware has implemented an event queue and will
58 * return 0x00 to indicate "no outstanding event".
61 * event value in the data register (EC_DATA), the target can safely
63 * event is being handled by the host. The host then should check
64 * SCI_EVT right after reading the event response from the data
66 * EVENT: After seeing the event response read from the data register
95 EC_FLAGS_EVENT_HANDLER_INSTALLED, /* Event handler installed */
137 MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
141 MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
187 /* --------------------------------------------------------------------------
189 * -------------------------------------------------------------------------- */
229 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
231 /* --------------------------------------------------------------------------
233 * -------------------------------------------------------------------------- */
237 return test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_started()
238 !test_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_started()
245 * (boot/resume), OSPMs shouldn't enable the event handling, only in acpi_ec_event_enabled()
248 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in acpi_ec_event_enabled()
251 * However, disabling the event handling is experimental for late in acpi_ec_event_enabled()
254 * 1. true: The EC event handling is disabled before entering in acpi_ec_event_enabled()
256 * 2. false: The EC event handling is automatically disabled as in acpi_ec_event_enabled()
262 return test_bit(EC_FLAGS_STARTED, &ec->flags); in acpi_ec_event_enabled()
267 return ec->reference_count == 1; in acpi_ec_flushed()
270 /* --------------------------------------------------------------------------
272 * -------------------------------------------------------------------------- */
276 u8 x = inb(ec->command_addr); in acpi_ec_read_status()
291 u8 x = inb(ec->data_addr); in acpi_ec_read_data()
293 ec->timestamp = jiffies; in acpi_ec_read_data()
301 outb(command, ec->command_addr); in acpi_ec_write_cmd()
302 ec->timestamp = jiffies; in acpi_ec_write_cmd()
308 outb(data, ec->data_addr); in acpi_ec_write_data()
309 ec->timestamp = jiffies; in acpi_ec_write_data()
333 /* --------------------------------------------------------------------------
335 * -------------------------------------------------------------------------- */
341 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status); in acpi_ec_gpe_status_set()
348 acpi_enable_gpe(NULL, ec->gpe); in acpi_ec_enable_gpe()
350 BUG_ON(ec->reference_count < 1); in acpi_ec_enable_gpe()
351 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); in acpi_ec_enable_gpe()
356 * software need to manually trigger a pseudo GPE event on in acpi_ec_enable_gpe()
367 acpi_disable_gpe(NULL, ec->gpe); in acpi_ec_disable_gpe()
369 BUG_ON(ec->reference_count < 1); in acpi_ec_disable_gpe()
370 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); in acpi_ec_disable_gpe()
374 /* --------------------------------------------------------------------------
376 * -------------------------------------------------------------------------- */
380 ec->reference_count++; in acpi_ec_submit_request()
381 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && in acpi_ec_submit_request()
382 ec->gpe >= 0 && ec->reference_count == 1) in acpi_ec_submit_request()
390 ec->reference_count--; in acpi_ec_complete_request()
391 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) && in acpi_ec_complete_request()
392 ec->gpe >= 0 && ec->reference_count == 0) in acpi_ec_complete_request()
396 wake_up(&ec->wait); in acpi_ec_complete_request()
401 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { in acpi_ec_mask_events()
402 if (ec->gpe >= 0) in acpi_ec_mask_events()
405 disable_irq_nosync(ec->irq); in acpi_ec_mask_events()
408 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); in acpi_ec_mask_events()
414 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) { in acpi_ec_unmask_events()
415 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags); in acpi_ec_unmask_events()
416 if (ec->gpe >= 0) in acpi_ec_unmask_events()
419 enable_irq(ec->irq); in acpi_ec_unmask_events()
426 * acpi_ec_submit_flushable_request() - Increase the reference count unless
431 * This function must be used before taking a new action that should hold
432 * the reference count. If this function returns false, then the action
454 if (ec->event_state != EC_EVENT_READY) in acpi_ec_submit_event()
460 ec->event_state = EC_EVENT_IN_PROGRESS; in acpi_ec_submit_event()
465 * more, so it is not necessary to queue up the event work to start the in acpi_ec_submit_event()
468 if (ec->events_to_process++ > 0) in acpi_ec_submit_event()
471 ec->events_in_progress++; in acpi_ec_submit_event()
472 queue_work(ec_wq, &ec->work); in acpi_ec_submit_event()
477 if (ec->event_state == EC_EVENT_IN_PROGRESS) in acpi_ec_complete_event()
478 ec->event_state = EC_EVENT_COMPLETE; in acpi_ec_complete_event()
483 if (ec->event_state != EC_EVENT_READY) in acpi_ec_close_event()
487 ec->event_state = EC_EVENT_READY; in acpi_ec_close_event()
493 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in __acpi_ec_enable_event()
494 ec_log_drv("event unblocked"); in __acpi_ec_enable_event()
496 * Unconditionally invoke this once after enabling the event in __acpi_ec_enable_event()
504 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags)) in __acpi_ec_disable_event()
505 ec_log_drv("event blocked"); in __acpi_ec_disable_event()
530 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_enable_event()
533 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_enable_event()
543 flush_workqueue(ec_wq); /* flush ec->work */ in __acpi_ec_flush_work()
551 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_disable_event()
553 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_disable_event()
577 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_guard_event()
579 * If firmware SCI_EVT clearing timing is "event", we actually in acpi_ec_guard_event()
581 * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an in acpi_ec_guard_event()
584 * The guarding period is applicable if the event state is not in acpi_ec_guard_event()
591 ec->event_state != EC_EVENT_READY && in acpi_ec_guard_event()
592 (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY); in acpi_ec_guard_event()
593 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_guard_event()
602 spin_lock_irqsave(&ec->lock, flags); in ec_transaction_polled()
603 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) in ec_transaction_polled()
605 spin_unlock_irqrestore(&ec->lock, flags); in ec_transaction_polled()
614 spin_lock_irqsave(&ec->lock, flags); in ec_transaction_completed()
615 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) in ec_transaction_completed()
617 spin_unlock_irqrestore(&ec->lock, flags); in ec_transaction_completed()
623 ec->curr->flags |= flag; in ec_transaction_transition()
625 if (ec->curr->command != ACPI_EC_COMMAND_QUERY) in ec_transaction_transition()
649 if (t->irq_count < ec_storm_threshold) in acpi_ec_spurious_interrupt()
650 ++t->irq_count; in acpi_ec_spurious_interrupt()
653 if (t->irq_count == ec_storm_threshold) in acpi_ec_spurious_interrupt()
659 struct transaction *t = ec->curr; in advance_transaction()
660 bool wakeup = false; in advance_transaction() local
671 if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) { in advance_transaction()
673 ec->event_state == EC_EVENT_COMPLETE) in advance_transaction()
680 if (t->flags & ACPI_EC_COMMAND_POLL) { in advance_transaction()
681 if (t->wlen > t->wi) { in advance_transaction()
683 acpi_ec_write_data(ec, t->wdata[t->wi++]); in advance_transaction()
686 } else if (t->rlen > t->ri) { in advance_transaction()
688 t->rdata[t->ri++] = acpi_ec_read_data(ec); in advance_transaction()
689 if (t->rlen == t->ri) { in advance_transaction()
691 wakeup = true; in advance_transaction()
692 if (t->command == ACPI_EC_COMMAND_QUERY) in advance_transaction()
699 } else if (t->wlen == t->wi && !(status & ACPI_EC_FLAG_IBF)) { in advance_transaction()
701 wakeup = true; in advance_transaction()
704 acpi_ec_write_cmd(ec, t->command); in advance_transaction()
712 if (wakeup && interrupt) in advance_transaction()
713 wake_up(&ec->wait); in advance_transaction()
718 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; in start_transaction()
719 ec->curr->flags = 0; in start_transaction()
724 unsigned long guard = usecs_to_jiffies(ec->polling_guard); in ec_guard()
725 unsigned long timeout = ec->timestamp + guard; in ec_guard()
729 if (ec->busy_polling) { in ec_guard()
741 * for event clearing mode "event" before the in ec_guard()
748 if (wait_event_timeout(ec->wait, in ec_guard()
754 return -ETIME; in ec_guard()
762 while (repeat--) { in ec_poll()
768 spin_lock_irqsave(&ec->lock, flags); in ec_poll()
770 spin_unlock_irqrestore(&ec->lock, flags); in ec_poll()
773 spin_lock_irqsave(&ec->lock, flags); in ec_poll()
775 spin_unlock_irqrestore(&ec->lock, flags); in ec_poll()
777 return -ETIME; in ec_poll()
786 if (t->rdata) in acpi_ec_transaction_unlocked()
787 memset(t->rdata, 0, t->rlen); in acpi_ec_transaction_unlocked()
790 spin_lock_irqsave(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
793 ret = -EINVAL; in acpi_ec_transaction_unlocked()
798 ec->curr = t; in acpi_ec_transaction_unlocked()
799 ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command)); in acpi_ec_transaction_unlocked()
801 spin_unlock_irqrestore(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
805 spin_lock_irqsave(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
806 if (t->irq_count == ec_storm_threshold) in acpi_ec_transaction_unlocked()
808 ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command)); in acpi_ec_transaction_unlocked()
809 ec->curr = NULL; in acpi_ec_transaction_unlocked()
814 spin_unlock_irqrestore(&ec->lock, tmp); in acpi_ec_transaction_unlocked()
823 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) in acpi_ec_transaction()
824 return -EINVAL; in acpi_ec_transaction()
826 mutex_lock(&ec->mutex); in acpi_ec_transaction()
827 if (ec->global_lock) { in acpi_ec_transaction()
830 status = -ENODEV; in acpi_ec_transaction()
837 if (ec->global_lock) in acpi_ec_transaction()
840 mutex_unlock(&ec->mutex); in acpi_ec_transaction()
916 return -ENODEV; in ec_read()
931 return -ENODEV; in ec_write()
946 return -ENODEV; in ec_transaction()
957 return first_ec->handle; in ec_get_handle()
965 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_start()
966 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) { in acpi_ec_start()
968 /* Enable GPE for event processing (SCI_EVT=1) */ in acpi_ec_start()
975 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_start()
983 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stopped()
985 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stopped()
993 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stop()
996 set_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_stop()
997 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stop()
998 wait_event(ec->wait, acpi_ec_stopped(ec)); in acpi_ec_stop()
999 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_stop()
1000 /* Disable GPE for event processing (SCI_EVT=1) */ in acpi_ec_stop()
1006 clear_bit(EC_FLAGS_STARTED, &ec->flags); in acpi_ec_stop()
1007 clear_bit(EC_FLAGS_STOPPED, &ec->flags); in acpi_ec_stop()
1010 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_stop()
1017 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_enter_noirq()
1018 ec->busy_polling = true; in acpi_ec_enter_noirq()
1019 ec->polling_guard = 0; in acpi_ec_enter_noirq()
1021 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_enter_noirq()
1028 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_leave_noirq()
1029 ec->busy_polling = ec_busy_polling; in acpi_ec_leave_noirq()
1030 ec->polling_guard = ec_polling_guard; in acpi_ec_leave_noirq()
1032 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_leave_noirq()
1042 mutex_lock(&ec->mutex); in acpi_ec_block_transactions()
1045 mutex_unlock(&ec->mutex); in acpi_ec_block_transactions()
1052 * atomic context during wakeup, so we don't need to acquire the mutex). in acpi_ec_unblock_transactions()
1058 /* --------------------------------------------------------------------------
1059 Event Management
1060 -------------------------------------------------------------------------- */
1066 mutex_lock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1067 list_for_each_entry(handler, &ec->list, node) { in acpi_ec_get_query_handler_by_value()
1068 if (value == handler->query_bit) { in acpi_ec_get_query_handler_by_value()
1069 kref_get(&handler->kref); in acpi_ec_get_query_handler_by_value()
1070 mutex_unlock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1074 mutex_unlock(&ec->mutex); in acpi_ec_get_query_handler_by_value()
1088 kref_put(&handler->kref, acpi_ec_query_handler_release); in acpi_ec_put_query_handler()
1098 return -EINVAL; in acpi_ec_add_query_handler()
1102 return -ENOMEM; in acpi_ec_add_query_handler()
1104 handler->query_bit = query_bit; in acpi_ec_add_query_handler()
1105 handler->handle = handle; in acpi_ec_add_query_handler()
1106 handler->func = func; in acpi_ec_add_query_handler()
1107 handler->data = data; in acpi_ec_add_query_handler()
1108 mutex_lock(&ec->mutex); in acpi_ec_add_query_handler()
1109 kref_init(&handler->kref); in acpi_ec_add_query_handler()
1110 list_add(&handler->node, &ec->list); in acpi_ec_add_query_handler()
1111 mutex_unlock(&ec->mutex); in acpi_ec_add_query_handler()
1123 mutex_lock(&ec->mutex); in acpi_ec_remove_query_handlers()
1124 list_for_each_entry_safe(handler, tmp, &ec->list, node) { in acpi_ec_remove_query_handlers()
1127 * which have handler->func set. This is done to preserve query in acpi_ec_remove_query_handlers()
1131 if (remove_all || (handler->func && handler->query_bit == query_bit)) { in acpi_ec_remove_query_handlers()
1132 list_del_init(&handler->node); in acpi_ec_remove_query_handlers()
1133 list_add(&handler->node, &free_list); in acpi_ec_remove_query_handlers()
1137 mutex_unlock(&ec->mutex); in acpi_ec_remove_query_handlers()
1152 struct acpi_ec_query_handler *handler = q->handler; in acpi_ec_event_processor()
1153 struct acpi_ec *ec = q->ec; in acpi_ec_event_processor()
1155 ec_dbg_evt("Query(0x%02x) started", handler->query_bit); in acpi_ec_event_processor()
1157 if (handler->func) in acpi_ec_event_processor()
1158 handler->func(handler->data); in acpi_ec_event_processor()
1159 else if (handler->handle) in acpi_ec_event_processor()
1160 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); in acpi_ec_event_processor()
1162 ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit); in acpi_ec_event_processor()
1164 spin_lock_irq(&ec->lock); in acpi_ec_event_processor()
1165 ec->queries_in_progress--; in acpi_ec_event_processor()
1166 spin_unlock_irq(&ec->lock); in acpi_ec_event_processor()
1181 INIT_WORK(&q->work, acpi_ec_event_processor); in acpi_ec_create_query()
1182 t = &q->transaction; in acpi_ec_create_query()
1183 t->command = ACPI_EC_COMMAND_QUERY; in acpi_ec_create_query()
1184 t->rdata = pval; in acpi_ec_create_query()
1185 t->rlen = 1; in acpi_ec_create_query()
1186 q->ec = ec; in acpi_ec_create_query()
1198 return -ENOMEM; in acpi_ec_submit_query()
1205 result = acpi_ec_transaction(ec, &q->transaction); in acpi_ec_submit_query()
1210 result = -ENODATA; in acpi_ec_submit_query()
1214 q->handler = acpi_ec_get_query_handler_by_value(ec, value); in acpi_ec_submit_query()
1215 if (!q->handler) { in acpi_ec_submit_query()
1216 result = -ENODATA; in acpi_ec_submit_query()
1229 spin_lock_irq(&ec->lock); in acpi_ec_submit_query()
1231 ec->queries_in_progress++; in acpi_ec_submit_query()
1232 queue_work(ec_query_wq, &q->work); in acpi_ec_submit_query()
1234 spin_unlock_irq(&ec->lock); in acpi_ec_submit_query()
1248 ec_dbg_evt("Event started"); in acpi_ec_event_handler()
1250 spin_lock_irq(&ec->lock); in acpi_ec_event_handler()
1252 while (ec->events_to_process) { in acpi_ec_event_handler()
1253 spin_unlock_irq(&ec->lock); in acpi_ec_event_handler()
1257 spin_lock_irq(&ec->lock); in acpi_ec_event_handler()
1259 ec->events_to_process--; in acpi_ec_event_handler()
1264 * event handling work again regardless of whether or not the query in acpi_ec_event_handler()
1272 ec_dbg_evt("Event stopped"); in acpi_ec_event_handler()
1274 spin_unlock_irq(&ec->lock); in acpi_ec_event_handler()
1278 spin_lock_irq(&ec->lock); in acpi_ec_event_handler()
1281 if (guard_timeout && !ec->curr) in acpi_ec_event_handler()
1286 ec_dbg_evt("Event stopped"); in acpi_ec_event_handler()
1289 ec->events_in_progress--; in acpi_ec_event_handler()
1291 spin_unlock_irq(&ec->lock); in acpi_ec_event_handler()
1297 * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1 in clear_gpe_and_advance_transaction()
1308 if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec)) in clear_gpe_and_advance_transaction()
1309 acpi_clear_gpe(NULL, ec->gpe); in clear_gpe_and_advance_transaction()
1318 spin_lock_irqsave(&ec->lock, flags); in acpi_ec_handle_interrupt()
1322 spin_unlock_irqrestore(&ec->lock, flags); in acpi_ec_handle_interrupt()
1338 /* --------------------------------------------------------------------------
1340 * -------------------------------------------------------------------------- */
1358 mutex_lock(&ec->mutex); in acpi_ec_space_handler()
1360 if (ec->global_lock) { in acpi_ec_space_handler()
1365 result = -ENODEV; in acpi_ec_space_handler()
1370 if (ec->busy_polling || bits > 8) in acpi_ec_space_handler()
1381 if (ec->busy_polling || bits > 8) in acpi_ec_space_handler()
1384 if (ec->global_lock) in acpi_ec_space_handler()
1388 mutex_unlock(&ec->mutex); in acpi_ec_space_handler()
1391 case -EINVAL: in acpi_ec_space_handler()
1393 case -ENODEV: in acpi_ec_space_handler()
1395 case -ETIME: in acpi_ec_space_handler()
1404 /* --------------------------------------------------------------------------
1406 * -------------------------------------------------------------------------- */
1426 mutex_init(&ec->mutex); in acpi_ec_alloc()
1427 init_waitqueue_head(&ec->wait); in acpi_ec_alloc()
1428 INIT_LIST_HEAD(&ec->list); in acpi_ec_alloc()
1429 spin_lock_init(&ec->lock); in acpi_ec_alloc()
1430 INIT_WORK(&ec->work, acpi_ec_event_handler); in acpi_ec_alloc()
1431 ec->timestamp = jiffies; in acpi_ec_alloc()
1432 ec->busy_polling = true; in acpi_ec_alloc()
1433 ec->polling_guard = 0; in acpi_ec_alloc()
1434 ec->gpe = -1; in acpi_ec_alloc()
1435 ec->irq = -1; in acpi_ec_alloc()
1464 ec->command_addr = ec->data_addr = 0; in ec_parse_device()
1470 if (ec->data_addr == 0 || ec->command_addr == 0) in ec_parse_device()
1477 ec->gpe = tmp; in ec_parse_device()
1479 * Errors are non-fatal, allowing for ACPI Reduced Hardware in ec_parse_device()
1486 ec->global_lock = tmp; in ec_parse_device()
1487 ec->handle = handle; in ec_parse_device()
1495 status = acpi_install_gpe_raw_handler(NULL, ec->gpe, in install_gpe_event_handler()
1501 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1) in install_gpe_event_handler()
1509 return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED, in install_gpio_irq_event_handler()
1514 * ec_install_handlers - Install service callbacks and register query methods.
1521 * namespace and register them, and install an event (either GPE or GPIO IRQ)
1525 * -ENODEV if the address space handler cannot be installed, which means
1527 * -EPROBE_DEFER if GPIO IRQ acquisition needs to be deferred,
1537 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { in ec_install_handlers()
1538 acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; in ec_install_handlers()
1547 return -ENODEV; in ec_install_handlers()
1549 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); in ec_install_handlers()
1552 if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) { in ec_install_handlers()
1553 acpi_execute_reg_methods(ec->handle, ACPI_UINT32_MAX, ACPI_ADR_SPACE_EC); in ec_install_handlers()
1554 set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags); in ec_install_handlers()
1560 if (ec->gpe < 0) { in ec_install_handlers()
1567 if (irq == -EPROBE_DEFER) in ec_install_handlers()
1568 return -EPROBE_DEFER; in ec_install_handlers()
1570 ec->irq = irq; in ec_install_handlers()
1573 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { in ec_install_handlers()
1575 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, in ec_install_handlers()
1578 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); in ec_install_handlers()
1580 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in ec_install_handlers()
1583 if (ec->gpe >= 0) in ec_install_handlers()
1585 else if (ec->irq >= 0) in ec_install_handlers()
1589 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); in ec_install_handlers()
1593 * Failures to install an event handler are not fatal, because in ec_install_handlers()
1605 acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle; in ec_remove_handlers()
1607 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1613 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags); in ec_remove_handlers()
1629 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1630 if (ec->gpe >= 0 && in ec_remove_handlers()
1631 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, in ec_remove_handlers()
1635 if (ec->irq >= 0) in ec_remove_handlers()
1636 free_irq(ec->irq, ec); in ec_remove_handlers()
1638 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags); in ec_remove_handlers()
1640 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) { in ec_remove_handlers()
1642 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags); in ec_remove_handlers()
1662 pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr, in acpi_ec_setup()
1663 ec->data_addr); in acpi_ec_setup()
1665 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) { in acpi_ec_setup()
1666 if (ec->gpe >= 0) in acpi_ec_setup()
1667 pr_info("GPE=0x%x\n", ec->gpe); in acpi_ec_setup()
1669 pr_info("IRQ=%d\n", ec->irq); in acpi_ec_setup()
1683 if (boot_ec && (boot_ec->handle == device->handle || in acpi_ec_add()
1692 return -ENOMEM; in acpi_ec_add()
1694 status = ec_parse_device(device->handle, 0, ec, NULL); in acpi_ec_add()
1696 ret = -EINVAL; in acpi_ec_add()
1700 if (boot_ec && ec->command_addr == boot_ec->command_addr && in acpi_ec_add()
1701 ec->data_addr == boot_ec->data_addr) { in acpi_ec_add()
1705 * quirks. So do not change boot_ec->gpe to ec->gpe, in acpi_ec_add()
1708 boot_ec->handle = ec->handle; in acpi_ec_add()
1711 boot_ec->gpe = ec->gpe; in acpi_ec_add()
1713 acpi_handle_debug(ec->handle, "duplicated.\n"); in acpi_ec_add()
1724 acpi_handle_info(boot_ec->handle, in acpi_ec_add()
1728 acpi_handle_info(ec->handle, in acpi_ec_add()
1731 device->driver_data = ec; in acpi_ec_add()
1733 ret = !!request_region(ec->data_addr, 1, "EC data"); in acpi_ec_add()
1734 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); in acpi_ec_add()
1735 ret = !!request_region(ec->command_addr, 1, "EC cmd"); in acpi_ec_add()
1736 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); in acpi_ec_add()
1741 acpi_handle_debug(ec->handle, "enumerated.\n"); in acpi_ec_add()
1759 release_region(ec->data_addr, 1); in acpi_ec_remove()
1760 release_region(ec->command_addr, 1); in acpi_ec_remove()
1761 device->driver_data = NULL; in acpi_ec_remove()
1770 if (first_ec && first_ec->handle != adev->handle) in acpi_ec_register_opregions()
1771 acpi_execute_reg_methods(adev->handle, 1, ACPI_ADR_SPACE_EC); in acpi_ec_register_opregions()
1779 if (resource->type != ACPI_RESOURCE_TYPE_IO) in ec_parse_io_ports()
1787 if (ec->data_addr == 0) in ec_parse_io_ports()
1788 ec->data_addr = resource->data.io.minimum; in ec_parse_io_ports()
1789 else if (ec->command_addr == 0) in ec_parse_io_ports()
1790 ec->command_addr = resource->data.io.minimum; in ec_parse_io_ports()
1804 * This function is not Windows-compatible as Windows never enumerates the
1832 if (ACPI_FAILURE(status) || !ec->handle) { in acpi_ec_dsdt_probe()
1838 * When the DSDT EC is available, always re-configure boot EC to in acpi_ec_dsdt_probe()
1852 acpi_handle_info(ec->handle, in acpi_ec_dsdt_probe()
1857 * acpi_ec_ecdt_start - Finalize the boot ECDT EC initialization.
1876 if (!boot_ec || boot_ec->handle != ACPI_ROOT_OBJECT) in acpi_ec_ecdt_start()
1885 status = acpi_get_handle(NULL, ecdt_ptr->id, &handle); in acpi_ec_ecdt_start()
1887 boot_ec->handle = handle; in acpi_ec_ecdt_start()
1923 * MSI MS-171F
1948 * MSI MS-171F
1953 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
1954 DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),
1959 * HP Pavilion Gaming Laptop 15-cx0xxx
1965 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-cx0xxx"),
1970 * HP Pavilion Gaming Laptop 15-cx0041ur
1975 DMI_MATCH(DMI_PRODUCT_NAME, "HP 15-cx0041ur"),
1980 * HP Pavilion Gaming Laptop 15-dk1xxx
1986 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
2026 if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) { in acpi_ec_ecdt_probe()
2039 ec->command_addr = ecdt_ptr->data.address; in acpi_ec_ecdt_probe()
2040 ec->data_addr = ecdt_ptr->control.address; in acpi_ec_ecdt_probe()
2042 ec->command_addr = ecdt_ptr->control.address; in acpi_ec_ecdt_probe()
2043 ec->data_addr = ecdt_ptr->data.address; in acpi_ec_ecdt_probe()
2051 ec->gpe = ecdt_ptr->gpe; in acpi_ec_ecdt_probe()
2053 ec->handle = ACPI_ROOT_OBJECT; in acpi_ec_ecdt_probe()
2093 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_suspend_noirq()
2094 ec->gpe >= 0 && ec->reference_count >= 1) in acpi_ec_suspend_noirq()
2095 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE); in acpi_ec_suspend_noirq()
2108 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) && in acpi_ec_resume_noirq()
2109 ec->gpe >= 0 && ec->reference_count >= 1) in acpi_ec_resume_noirq()
2110 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE); in acpi_ec_resume_noirq()
2127 acpi_mark_gpe_for_wake(NULL, first_ec->gpe); in acpi_ec_mark_gpe_for_wake()
2131 void acpi_ec_set_gpe_wake_mask(u8 action) in acpi_ec_set_gpe_wake_mask() argument
2134 acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action); in acpi_ec_set_gpe_wake_mask()
2139 return ec->events_in_progress + ec->queries_in_progress > 0; in acpi_ec_work_in_progress()
2150 * Report wakeup if the status bit is set for any enabled GPE other in acpi_ec_dispatch_gpe()
2153 if (acpi_any_gpe_status_set(first_ec->gpe)) in acpi_ec_dispatch_gpe()
2157 * Cancel the SCI wakeup and process all pending events in case there in acpi_ec_dispatch_gpe()
2158 * are any wakeup ones in there. in acpi_ec_dispatch_gpe()
2160 * Note that if any non-EC GPEs are active at this point, the SCI will in acpi_ec_dispatch_gpe()
2162 * should be missed by canceling the wakeup here. in acpi_ec_dispatch_gpe()
2167 * Dispatch the EC GPE in-band, but do not report wakeup in any case in acpi_ec_dispatch_gpe()
2170 spin_lock_irq(&first_ec->lock); in acpi_ec_dispatch_gpe()
2179 spin_unlock_irq(&first_ec->lock); in acpi_ec_dispatch_gpe()
2192 spin_lock_irq(&first_ec->lock); in acpi_ec_dispatch_gpe()
2196 spin_unlock_irq(&first_ec->lock); in acpi_ec_dispatch_gpe()
2213 if (!strncmp(val, "status", sizeof("status") - 1)) { in param_set_event_clearing()
2216 } else if (!strncmp(val, "query", sizeof("query") - 1)) { in param_set_event_clearing()
2219 } else if (!strncmp(val, "event", sizeof("event") - 1)) { in param_set_event_clearing()
2221 pr_info("Assuming SCI_EVT clearing on event reads\n"); in param_set_event_clearing()
2223 result = -EINVAL; in param_set_event_clearing()
2236 return sprintf(buffer, "event\n"); in param_get_event_clearing()
2280 return -ENODEV; in acpi_ec_init_workqueues()
2316 * Disable EC wakeup on following systems to prevent periodic in acpi_ec_init()
2317 * wakeup from EC GPE. in acpi_ec_init()
2321 pr_debug("Disabling EC wakeup on suspend-to-idle\n"); in acpi_ec_init()