1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 4 * 5 * Copyright (C) 2000 Andrew Henroid 6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * Copyright (c) 2008 Intel Corporation 9 * Author: Matthew Wilcox <willy@linux.intel.com> 10 */ 11 12 #define pr_fmt(fmt) "ACPI: OSL: " fmt 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/highmem.h> 19 #include <linux/lockdep.h> 20 #include <linux/pci.h> 21 #include <linux/interrupt.h> 22 #include <linux/kmod.h> 23 #include <linux/delay.h> 24 #include <linux/workqueue.h> 25 #include <linux/nmi.h> 26 #include <linux/acpi.h> 27 #include <linux/efi.h> 28 #include <linux/ioport.h> 29 #include <linux/list.h> 30 #include <linux/jiffies.h> 31 #include <linux/semaphore.h> 32 #include <linux/security.h> 33 34 #include <asm/io.h> 35 #include <linux/uaccess.h> 36 #include <linux/io-64-nonatomic-lo-hi.h> 37 38 #include "acpica/accommon.h" 39 #include "acpica/acnamesp.h" 40 #include "internal.h" 41 42 /* Definitions for ACPI_DEBUG_PRINT() */ 43 #define _COMPONENT ACPI_OS_SERVICES 44 ACPI_MODULE_NAME("osl"); 45 46 struct acpi_os_dpc { 47 acpi_osd_exec_callback function; 48 void *context; 49 struct work_struct work; 50 }; 51 52 #ifdef ENABLE_DEBUGGER 53 #include <linux/kdb.h> 54 55 /* stuff for debugger support */ 56 int acpi_in_debugger; 57 EXPORT_SYMBOL(acpi_in_debugger); 58 #endif /*ENABLE_DEBUGGER */ 59 60 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 61 u32 pm1b_ctrl); 62 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 63 u32 val_b); 64 65 static acpi_osd_handler acpi_irq_handler; 66 static void *acpi_irq_context; 67 static struct workqueue_struct *kacpid_wq; 68 static struct workqueue_struct *kacpi_notify_wq; 69 static struct workqueue_struct *kacpi_hotplug_wq; 70 static bool acpi_os_initialized; 71 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 72 bool acpi_permanent_mmap = false; 73 74 /* 75 * This list of permanent mappings is for memory that may be accessed from 76 * interrupt context, where we can't do the ioremap(). 77 */ 78 struct acpi_ioremap { 79 struct list_head list; 80 void __iomem *virt; 81 acpi_physical_address phys; 82 acpi_size size; 83 union { 84 unsigned long refcount; 85 struct rcu_work rwork; 86 } track; 87 }; 88 89 static LIST_HEAD(acpi_ioremaps); 90 static DEFINE_MUTEX(acpi_ioremap_lock); 91 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) 92 93 static void __init acpi_request_region (struct acpi_generic_address *gas, 94 unsigned int length, char *desc) 95 { 96 u64 addr; 97 98 /* Handle possible alignment issues */ 99 memcpy(&addr, &gas->address, sizeof(addr)); 100 if (!addr || !length) 101 return; 102 103 /* Resources are never freed */ 104 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 105 request_region(addr, length, desc); 106 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 107 request_mem_region(addr, length, desc); 108 } 109 110 static int __init acpi_reserve_resources(void) 111 { 112 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 113 "ACPI PM1a_EVT_BLK"); 114 115 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 116 "ACPI PM1b_EVT_BLK"); 117 118 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 119 "ACPI PM1a_CNT_BLK"); 120 121 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 122 "ACPI PM1b_CNT_BLK"); 123 124 if (acpi_gbl_FADT.pm_timer_length == 4) 125 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 126 127 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 128 "ACPI PM2_CNT_BLK"); 129 130 /* Length of GPE blocks must be a non-negative multiple of 2 */ 131 132 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 133 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 134 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 135 136 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 137 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 138 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 139 140 return 0; 141 } 142 fs_initcall_sync(acpi_reserve_resources); 143 144 void acpi_os_printf(const char *fmt, ...) 145 { 146 va_list args; 147 va_start(args, fmt); 148 acpi_os_vprintf(fmt, args); 149 va_end(args); 150 } 151 EXPORT_SYMBOL(acpi_os_printf); 152 153 void acpi_os_vprintf(const char *fmt, va_list args) 154 { 155 static char buffer[512]; 156 157 vsprintf(buffer, fmt, args); 158 159 #ifdef ENABLE_DEBUGGER 160 if (acpi_in_debugger) { 161 kdb_printf("%s", buffer); 162 } else { 163 if (printk_get_level(buffer)) 164 printk("%s", buffer); 165 else 166 printk(KERN_CONT "%s", buffer); 167 } 168 #else 169 if (acpi_debugger_write_log(buffer) < 0) { 170 if (printk_get_level(buffer)) 171 printk("%s", buffer); 172 else 173 printk(KERN_CONT "%s", buffer); 174 } 175 #endif 176 } 177 178 #ifdef CONFIG_KEXEC 179 static unsigned long acpi_rsdp; 180 static int __init setup_acpi_rsdp(char *arg) 181 { 182 return kstrtoul(arg, 16, &acpi_rsdp); 183 } 184 early_param("acpi_rsdp", setup_acpi_rsdp); 185 #endif 186 187 acpi_physical_address __init acpi_os_get_root_pointer(void) 188 { 189 acpi_physical_address pa; 190 191 #ifdef CONFIG_KEXEC 192 /* 193 * We may have been provided with an RSDP on the command line, 194 * but if a malicious user has done so they may be pointing us 195 * at modified ACPI tables that could alter kernel behaviour - 196 * so, we check the lockdown status before making use of 197 * it. If we trust it then also stash it in an architecture 198 * specific location (if appropriate) so it can be carried 199 * over further kexec()s. 200 */ 201 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) { 202 acpi_arch_set_root_pointer(acpi_rsdp); 203 return acpi_rsdp; 204 } 205 #endif 206 pa = acpi_arch_get_root_pointer(); 207 if (pa) 208 return pa; 209 210 if (efi_enabled(EFI_CONFIG_TABLES)) { 211 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 212 return efi.acpi20; 213 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 214 return efi.acpi; 215 pr_err("System description tables not found\n"); 216 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 217 acpi_find_root_pointer(&pa); 218 } 219 220 return pa; 221 } 222 223 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 224 static struct acpi_ioremap * 225 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 226 { 227 struct acpi_ioremap *map; 228 229 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 230 if (map->phys <= phys && 231 phys + size <= map->phys + map->size) 232 return map; 233 234 return NULL; 235 } 236 237 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 238 static void __iomem * 239 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 240 { 241 struct acpi_ioremap *map; 242 243 map = acpi_map_lookup(phys, size); 244 if (map) 245 return map->virt + (phys - map->phys); 246 247 return NULL; 248 } 249 250 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 251 { 252 struct acpi_ioremap *map; 253 void __iomem *virt = NULL; 254 255 mutex_lock(&acpi_ioremap_lock); 256 map = acpi_map_lookup(phys, size); 257 if (map) { 258 virt = map->virt + (phys - map->phys); 259 map->track.refcount++; 260 } 261 mutex_unlock(&acpi_ioremap_lock); 262 return virt; 263 } 264 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 265 266 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 267 static struct acpi_ioremap * 268 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 269 { 270 struct acpi_ioremap *map; 271 272 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 273 if (map->virt <= virt && 274 virt + size <= map->virt + map->size) 275 return map; 276 277 return NULL; 278 } 279 280 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 281 /* ioremap will take care of cache attributes */ 282 #define should_use_kmap(pfn) 0 283 #else 284 #define should_use_kmap(pfn) page_is_ram(pfn) 285 #endif 286 287 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 288 { 289 unsigned long pfn; 290 291 pfn = pg_off >> PAGE_SHIFT; 292 if (should_use_kmap(pfn)) { 293 if (pg_sz > PAGE_SIZE) 294 return NULL; 295 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 296 } else 297 return acpi_os_ioremap(pg_off, pg_sz); 298 } 299 300 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 301 { 302 unsigned long pfn; 303 304 pfn = pg_off >> PAGE_SHIFT; 305 if (should_use_kmap(pfn)) 306 kunmap(pfn_to_page(pfn)); 307 else 308 iounmap(vaddr); 309 } 310 311 /** 312 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 313 * @phys: Start of the physical address range to map. 314 * @size: Size of the physical address range to map. 315 * 316 * Look up the given physical address range in the list of existing ACPI memory 317 * mappings. If found, get a reference to it and return a pointer to it (its 318 * virtual address). If not found, map it, add it to that list and return a 319 * pointer to it. 320 * 321 * During early init (when acpi_permanent_mmap has not been set yet) this 322 * routine simply calls __acpi_map_table() to get the job done. 323 */ 324 void __iomem __ref 325 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 326 { 327 struct acpi_ioremap *map; 328 void __iomem *virt; 329 acpi_physical_address pg_off; 330 acpi_size pg_sz; 331 332 if (phys > ULONG_MAX) { 333 pr_err("Cannot map memory that high: 0x%llx\n", phys); 334 return NULL; 335 } 336 337 if (!acpi_permanent_mmap) 338 return __acpi_map_table((unsigned long)phys, size); 339 340 mutex_lock(&acpi_ioremap_lock); 341 /* Check if there's a suitable mapping already. */ 342 map = acpi_map_lookup(phys, size); 343 if (map) { 344 map->track.refcount++; 345 goto out; 346 } 347 348 map = kzalloc(sizeof(*map), GFP_KERNEL); 349 if (!map) { 350 mutex_unlock(&acpi_ioremap_lock); 351 return NULL; 352 } 353 354 pg_off = round_down(phys, PAGE_SIZE); 355 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 356 virt = acpi_map(phys, size); 357 if (!virt) { 358 mutex_unlock(&acpi_ioremap_lock); 359 kfree(map); 360 return NULL; 361 } 362 363 INIT_LIST_HEAD(&map->list); 364 map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); 365 map->phys = pg_off; 366 map->size = pg_sz; 367 map->track.refcount = 1; 368 369 list_add_tail_rcu(&map->list, &acpi_ioremaps); 370 371 out: 372 mutex_unlock(&acpi_ioremap_lock); 373 return map->virt + (phys - map->phys); 374 } 375 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 376 377 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 378 { 379 return (void *)acpi_os_map_iomem(phys, size); 380 } 381 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 382 383 static void acpi_os_map_remove(struct work_struct *work) 384 { 385 struct acpi_ioremap *map = container_of(to_rcu_work(work), 386 struct acpi_ioremap, 387 track.rwork); 388 389 acpi_unmap(map->phys, map->virt); 390 kfree(map); 391 } 392 393 /* Must be called with mutex_lock(&acpi_ioremap_lock) */ 394 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 395 { 396 if (--map->track.refcount) 397 return; 398 399 list_del_rcu(&map->list); 400 401 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); 402 queue_rcu_work(system_wq, &map->track.rwork); 403 } 404 405 /** 406 * acpi_os_unmap_iomem - Drop a memory mapping reference. 407 * @virt: Start of the address range to drop a reference to. 408 * @size: Size of the address range to drop a reference to. 409 * 410 * Look up the given virtual address range in the list of existing ACPI memory 411 * mappings, drop a reference to it and if there are no more active references 412 * to it, queue it up for later removal. 413 * 414 * During early init (when acpi_permanent_mmap has not been set yet) this 415 * routine simply calls __acpi_unmap_table() to get the job done. Since 416 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 417 * here. 418 */ 419 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 420 { 421 struct acpi_ioremap *map; 422 423 if (!acpi_permanent_mmap) { 424 __acpi_unmap_table(virt, size); 425 return; 426 } 427 428 mutex_lock(&acpi_ioremap_lock); 429 430 map = acpi_map_lookup_virt(virt, size); 431 if (!map) { 432 mutex_unlock(&acpi_ioremap_lock); 433 WARN(true, "ACPI: %s: bad address %p\n", __func__, virt); 434 return; 435 } 436 acpi_os_drop_map_ref(map); 437 438 mutex_unlock(&acpi_ioremap_lock); 439 } 440 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 441 442 /** 443 * acpi_os_unmap_memory - Drop a memory mapping reference. 444 * @virt: Start of the address range to drop a reference to. 445 * @size: Size of the address range to drop a reference to. 446 */ 447 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 448 { 449 acpi_os_unmap_iomem((void __iomem *)virt, size); 450 } 451 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 452 453 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas) 454 { 455 u64 addr; 456 457 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 458 return NULL; 459 460 /* Handle possible alignment issues */ 461 memcpy(&addr, &gas->address, sizeof(addr)); 462 if (!addr || !gas->bit_width) 463 return NULL; 464 465 return acpi_os_map_iomem(addr, gas->bit_width / 8); 466 } 467 EXPORT_SYMBOL(acpi_os_map_generic_address); 468 469 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 470 { 471 u64 addr; 472 struct acpi_ioremap *map; 473 474 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 475 return; 476 477 /* Handle possible alignment issues */ 478 memcpy(&addr, &gas->address, sizeof(addr)); 479 if (!addr || !gas->bit_width) 480 return; 481 482 mutex_lock(&acpi_ioremap_lock); 483 484 map = acpi_map_lookup(addr, gas->bit_width / 8); 485 if (!map) { 486 mutex_unlock(&acpi_ioremap_lock); 487 return; 488 } 489 acpi_os_drop_map_ref(map); 490 491 mutex_unlock(&acpi_ioremap_lock); 492 } 493 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 494 495 #ifdef ACPI_FUTURE_USAGE 496 acpi_status 497 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 498 { 499 if (!phys || !virt) 500 return AE_BAD_PARAMETER; 501 502 *phys = virt_to_phys(virt); 503 504 return AE_OK; 505 } 506 #endif 507 508 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 509 static bool acpi_rev_override; 510 511 int __init acpi_rev_override_setup(char *str) 512 { 513 acpi_rev_override = true; 514 return 1; 515 } 516 __setup("acpi_rev_override", acpi_rev_override_setup); 517 #else 518 #define acpi_rev_override false 519 #endif 520 521 #define ACPI_MAX_OVERRIDE_LEN 100 522 523 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 524 525 acpi_status 526 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 527 acpi_string *new_val) 528 { 529 if (!init_val || !new_val) 530 return AE_BAD_PARAMETER; 531 532 *new_val = NULL; 533 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 534 pr_info("Overriding _OS definition to '%s'\n", acpi_os_name); 535 *new_val = acpi_os_name; 536 } 537 538 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 539 pr_info("Overriding _REV return value to 5\n"); 540 *new_val = (char *)5; 541 } 542 543 return AE_OK; 544 } 545 546 static irqreturn_t acpi_irq(int irq, void *dev_id) 547 { 548 u32 handled; 549 550 handled = (*acpi_irq_handler) (acpi_irq_context); 551 552 if (handled) { 553 acpi_irq_handled++; 554 return IRQ_HANDLED; 555 } else { 556 acpi_irq_not_handled++; 557 return IRQ_NONE; 558 } 559 } 560 561 acpi_status 562 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 563 void *context) 564 { 565 unsigned int irq; 566 567 acpi_irq_stats_init(); 568 569 /* 570 * ACPI interrupts different from the SCI in our copy of the FADT are 571 * not supported. 572 */ 573 if (gsi != acpi_gbl_FADT.sci_interrupt) 574 return AE_BAD_PARAMETER; 575 576 if (acpi_irq_handler) 577 return AE_ALREADY_ACQUIRED; 578 579 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 580 pr_err("SCI (ACPI GSI %d) not registered\n", gsi); 581 return AE_OK; 582 } 583 584 acpi_irq_handler = handler; 585 acpi_irq_context = context; 586 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 587 pr_err("SCI (IRQ%d) allocation failed\n", irq); 588 acpi_irq_handler = NULL; 589 return AE_NOT_ACQUIRED; 590 } 591 acpi_sci_irq = irq; 592 593 return AE_OK; 594 } 595 596 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 597 { 598 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 599 return AE_BAD_PARAMETER; 600 601 free_irq(acpi_sci_irq, acpi_irq); 602 acpi_irq_handler = NULL; 603 acpi_sci_irq = INVALID_ACPI_IRQ; 604 605 return AE_OK; 606 } 607 608 /* 609 * Running in interpreter thread context, safe to sleep 610 */ 611 612 void acpi_os_sleep(u64 ms) 613 { 614 msleep(ms); 615 } 616 617 void acpi_os_stall(u32 us) 618 { 619 while (us) { 620 u32 delay = 1000; 621 622 if (delay > us) 623 delay = us; 624 udelay(delay); 625 touch_nmi_watchdog(); 626 us -= delay; 627 } 628 } 629 630 /* 631 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 632 * monotonically increasing timer with 100ns granularity. Do not use 633 * ktime_get() to implement this function because this function may get 634 * called after timekeeping has been suspended. Note: calling this function 635 * after timekeeping has been suspended may lead to unexpected results 636 * because when timekeeping is suspended the jiffies counter is not 637 * incremented. See also timekeeping_suspend(). 638 */ 639 u64 acpi_os_get_timer(void) 640 { 641 return (get_jiffies_64() - INITIAL_JIFFIES) * 642 (ACPI_100NSEC_PER_SEC / HZ); 643 } 644 645 acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width) 646 { 647 u32 dummy; 648 649 if (value) 650 *value = 0; 651 else 652 value = &dummy; 653 654 if (width <= 8) { 655 *value = inb(port); 656 } else if (width <= 16) { 657 *value = inw(port); 658 } else if (width <= 32) { 659 *value = inl(port); 660 } else { 661 pr_debug("%s: Access width %d not supported\n", __func__, width); 662 return AE_BAD_PARAMETER; 663 } 664 665 return AE_OK; 666 } 667 668 EXPORT_SYMBOL(acpi_os_read_port); 669 670 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 671 { 672 if (width <= 8) { 673 outb(value, port); 674 } else if (width <= 16) { 675 outw(value, port); 676 } else if (width <= 32) { 677 outl(value, port); 678 } else { 679 pr_debug("%s: Access width %d not supported\n", __func__, width); 680 return AE_BAD_PARAMETER; 681 } 682 683 return AE_OK; 684 } 685 686 EXPORT_SYMBOL(acpi_os_write_port); 687 688 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 689 { 690 691 switch (width) { 692 case 8: 693 *(u8 *) value = readb(virt_addr); 694 break; 695 case 16: 696 *(u16 *) value = readw(virt_addr); 697 break; 698 case 32: 699 *(u32 *) value = readl(virt_addr); 700 break; 701 case 64: 702 *(u64 *) value = readq(virt_addr); 703 break; 704 default: 705 return -EINVAL; 706 } 707 708 return 0; 709 } 710 711 acpi_status 712 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 713 { 714 void __iomem *virt_addr; 715 unsigned int size = width / 8; 716 bool unmap = false; 717 u64 dummy; 718 int error; 719 720 rcu_read_lock(); 721 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 722 if (!virt_addr) { 723 rcu_read_unlock(); 724 virt_addr = acpi_os_ioremap(phys_addr, size); 725 if (!virt_addr) 726 return AE_BAD_ADDRESS; 727 unmap = true; 728 } 729 730 if (!value) 731 value = &dummy; 732 733 error = acpi_os_read_iomem(virt_addr, value, width); 734 BUG_ON(error); 735 736 if (unmap) 737 iounmap(virt_addr); 738 else 739 rcu_read_unlock(); 740 741 return AE_OK; 742 } 743 744 acpi_status 745 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 746 { 747 void __iomem *virt_addr; 748 unsigned int size = width / 8; 749 bool unmap = false; 750 751 rcu_read_lock(); 752 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 753 if (!virt_addr) { 754 rcu_read_unlock(); 755 virt_addr = acpi_os_ioremap(phys_addr, size); 756 if (!virt_addr) 757 return AE_BAD_ADDRESS; 758 unmap = true; 759 } 760 761 switch (width) { 762 case 8: 763 writeb(value, virt_addr); 764 break; 765 case 16: 766 writew(value, virt_addr); 767 break; 768 case 32: 769 writel(value, virt_addr); 770 break; 771 case 64: 772 writeq(value, virt_addr); 773 break; 774 default: 775 BUG(); 776 } 777 778 if (unmap) 779 iounmap(virt_addr); 780 else 781 rcu_read_unlock(); 782 783 return AE_OK; 784 } 785 786 #ifdef CONFIG_PCI 787 acpi_status 788 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 789 u64 *value, u32 width) 790 { 791 int result, size; 792 u32 value32; 793 794 if (!value) 795 return AE_BAD_PARAMETER; 796 797 switch (width) { 798 case 8: 799 size = 1; 800 break; 801 case 16: 802 size = 2; 803 break; 804 case 32: 805 size = 4; 806 break; 807 default: 808 return AE_ERROR; 809 } 810 811 result = raw_pci_read(pci_id->segment, pci_id->bus, 812 PCI_DEVFN(pci_id->device, pci_id->function), 813 reg, size, &value32); 814 *value = value32; 815 816 return (result ? AE_ERROR : AE_OK); 817 } 818 819 acpi_status 820 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 821 u64 value, u32 width) 822 { 823 int result, size; 824 825 switch (width) { 826 case 8: 827 size = 1; 828 break; 829 case 16: 830 size = 2; 831 break; 832 case 32: 833 size = 4; 834 break; 835 default: 836 return AE_ERROR; 837 } 838 839 result = raw_pci_write(pci_id->segment, pci_id->bus, 840 PCI_DEVFN(pci_id->device, pci_id->function), 841 reg, size, value); 842 843 return (result ? AE_ERROR : AE_OK); 844 } 845 #endif 846 847 static void acpi_os_execute_deferred(struct work_struct *work) 848 { 849 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 850 851 dpc->function(dpc->context); 852 kfree(dpc); 853 } 854 855 #ifdef CONFIG_ACPI_DEBUGGER 856 static struct acpi_debugger acpi_debugger; 857 static bool acpi_debugger_initialized; 858 859 int acpi_register_debugger(struct module *owner, 860 const struct acpi_debugger_ops *ops) 861 { 862 int ret = 0; 863 864 mutex_lock(&acpi_debugger.lock); 865 if (acpi_debugger.ops) { 866 ret = -EBUSY; 867 goto err_lock; 868 } 869 870 acpi_debugger.owner = owner; 871 acpi_debugger.ops = ops; 872 873 err_lock: 874 mutex_unlock(&acpi_debugger.lock); 875 return ret; 876 } 877 EXPORT_SYMBOL(acpi_register_debugger); 878 879 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 880 { 881 mutex_lock(&acpi_debugger.lock); 882 if (ops == acpi_debugger.ops) { 883 acpi_debugger.ops = NULL; 884 acpi_debugger.owner = NULL; 885 } 886 mutex_unlock(&acpi_debugger.lock); 887 } 888 EXPORT_SYMBOL(acpi_unregister_debugger); 889 890 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 891 { 892 int ret; 893 int (*func)(acpi_osd_exec_callback, void *); 894 struct module *owner; 895 896 if (!acpi_debugger_initialized) 897 return -ENODEV; 898 mutex_lock(&acpi_debugger.lock); 899 if (!acpi_debugger.ops) { 900 ret = -ENODEV; 901 goto err_lock; 902 } 903 if (!try_module_get(acpi_debugger.owner)) { 904 ret = -ENODEV; 905 goto err_lock; 906 } 907 func = acpi_debugger.ops->create_thread; 908 owner = acpi_debugger.owner; 909 mutex_unlock(&acpi_debugger.lock); 910 911 ret = func(function, context); 912 913 mutex_lock(&acpi_debugger.lock); 914 module_put(owner); 915 err_lock: 916 mutex_unlock(&acpi_debugger.lock); 917 return ret; 918 } 919 920 ssize_t acpi_debugger_write_log(const char *msg) 921 { 922 ssize_t ret; 923 ssize_t (*func)(const char *); 924 struct module *owner; 925 926 if (!acpi_debugger_initialized) 927 return -ENODEV; 928 mutex_lock(&acpi_debugger.lock); 929 if (!acpi_debugger.ops) { 930 ret = -ENODEV; 931 goto err_lock; 932 } 933 if (!try_module_get(acpi_debugger.owner)) { 934 ret = -ENODEV; 935 goto err_lock; 936 } 937 func = acpi_debugger.ops->write_log; 938 owner = acpi_debugger.owner; 939 mutex_unlock(&acpi_debugger.lock); 940 941 ret = func(msg); 942 943 mutex_lock(&acpi_debugger.lock); 944 module_put(owner); 945 err_lock: 946 mutex_unlock(&acpi_debugger.lock); 947 return ret; 948 } 949 950 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 951 { 952 ssize_t ret; 953 ssize_t (*func)(char *, size_t); 954 struct module *owner; 955 956 if (!acpi_debugger_initialized) 957 return -ENODEV; 958 mutex_lock(&acpi_debugger.lock); 959 if (!acpi_debugger.ops) { 960 ret = -ENODEV; 961 goto err_lock; 962 } 963 if (!try_module_get(acpi_debugger.owner)) { 964 ret = -ENODEV; 965 goto err_lock; 966 } 967 func = acpi_debugger.ops->read_cmd; 968 owner = acpi_debugger.owner; 969 mutex_unlock(&acpi_debugger.lock); 970 971 ret = func(buffer, buffer_length); 972 973 mutex_lock(&acpi_debugger.lock); 974 module_put(owner); 975 err_lock: 976 mutex_unlock(&acpi_debugger.lock); 977 return ret; 978 } 979 980 int acpi_debugger_wait_command_ready(void) 981 { 982 int ret; 983 int (*func)(bool, char *, size_t); 984 struct module *owner; 985 986 if (!acpi_debugger_initialized) 987 return -ENODEV; 988 mutex_lock(&acpi_debugger.lock); 989 if (!acpi_debugger.ops) { 990 ret = -ENODEV; 991 goto err_lock; 992 } 993 if (!try_module_get(acpi_debugger.owner)) { 994 ret = -ENODEV; 995 goto err_lock; 996 } 997 func = acpi_debugger.ops->wait_command_ready; 998 owner = acpi_debugger.owner; 999 mutex_unlock(&acpi_debugger.lock); 1000 1001 ret = func(acpi_gbl_method_executing, 1002 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 1003 1004 mutex_lock(&acpi_debugger.lock); 1005 module_put(owner); 1006 err_lock: 1007 mutex_unlock(&acpi_debugger.lock); 1008 return ret; 1009 } 1010 1011 int acpi_debugger_notify_command_complete(void) 1012 { 1013 int ret; 1014 int (*func)(void); 1015 struct module *owner; 1016 1017 if (!acpi_debugger_initialized) 1018 return -ENODEV; 1019 mutex_lock(&acpi_debugger.lock); 1020 if (!acpi_debugger.ops) { 1021 ret = -ENODEV; 1022 goto err_lock; 1023 } 1024 if (!try_module_get(acpi_debugger.owner)) { 1025 ret = -ENODEV; 1026 goto err_lock; 1027 } 1028 func = acpi_debugger.ops->notify_command_complete; 1029 owner = acpi_debugger.owner; 1030 mutex_unlock(&acpi_debugger.lock); 1031 1032 ret = func(); 1033 1034 mutex_lock(&acpi_debugger.lock); 1035 module_put(owner); 1036 err_lock: 1037 mutex_unlock(&acpi_debugger.lock); 1038 return ret; 1039 } 1040 1041 int __init acpi_debugger_init(void) 1042 { 1043 mutex_init(&acpi_debugger.lock); 1044 acpi_debugger_initialized = true; 1045 return 0; 1046 } 1047 #endif 1048 1049 /******************************************************************************* 1050 * 1051 * FUNCTION: acpi_os_execute 1052 * 1053 * PARAMETERS: Type - Type of the callback 1054 * Function - Function to be executed 1055 * Context - Function parameters 1056 * 1057 * RETURN: Status 1058 * 1059 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1060 * immediately executes function on a separate thread. 1061 * 1062 ******************************************************************************/ 1063 1064 acpi_status acpi_os_execute(acpi_execute_type type, 1065 acpi_osd_exec_callback function, void *context) 1066 { 1067 acpi_status status = AE_OK; 1068 struct acpi_os_dpc *dpc; 1069 struct workqueue_struct *queue; 1070 int ret; 1071 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1072 "Scheduling function [%p(%p)] for deferred execution.\n", 1073 function, context)); 1074 1075 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1076 ret = acpi_debugger_create_thread(function, context); 1077 if (ret) { 1078 pr_err("Kernel thread creation failed\n"); 1079 status = AE_ERROR; 1080 } 1081 goto out_thread; 1082 } 1083 1084 /* 1085 * Allocate/initialize DPC structure. Note that this memory will be 1086 * freed by the callee. The kernel handles the work_struct list in a 1087 * way that allows us to also free its memory inside the callee. 1088 * Because we may want to schedule several tasks with different 1089 * parameters we can't use the approach some kernel code uses of 1090 * having a static work_struct. 1091 */ 1092 1093 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1094 if (!dpc) 1095 return AE_NO_MEMORY; 1096 1097 dpc->function = function; 1098 dpc->context = context; 1099 1100 /* 1101 * To prevent lockdep from complaining unnecessarily, make sure that 1102 * there is a different static lockdep key for each workqueue by using 1103 * INIT_WORK() for each of them separately. 1104 */ 1105 if (type == OSL_NOTIFY_HANDLER) { 1106 queue = kacpi_notify_wq; 1107 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1108 } else if (type == OSL_GPE_HANDLER) { 1109 queue = kacpid_wq; 1110 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1111 } else { 1112 pr_err("Unsupported os_execute type %d.\n", type); 1113 status = AE_ERROR; 1114 } 1115 1116 if (ACPI_FAILURE(status)) 1117 goto err_workqueue; 1118 1119 /* 1120 * On some machines, a software-initiated SMI causes corruption unless 1121 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1122 * typically it's done in GPE-related methods that are run via 1123 * workqueues, so we can avoid the known corruption cases by always 1124 * queueing on CPU 0. 1125 */ 1126 ret = queue_work_on(0, queue, &dpc->work); 1127 if (!ret) { 1128 pr_err("Unable to queue work\n"); 1129 status = AE_ERROR; 1130 } 1131 err_workqueue: 1132 if (ACPI_FAILURE(status)) 1133 kfree(dpc); 1134 out_thread: 1135 return status; 1136 } 1137 EXPORT_SYMBOL(acpi_os_execute); 1138 1139 void acpi_os_wait_events_complete(void) 1140 { 1141 /* 1142 * Make sure the GPE handler or the fixed event handler is not used 1143 * on another CPU after removal. 1144 */ 1145 if (acpi_sci_irq_valid()) 1146 synchronize_hardirq(acpi_sci_irq); 1147 flush_workqueue(kacpid_wq); 1148 flush_workqueue(kacpi_notify_wq); 1149 } 1150 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1151 1152 struct acpi_hp_work { 1153 struct work_struct work; 1154 struct acpi_device *adev; 1155 u32 src; 1156 }; 1157 1158 static void acpi_hotplug_work_fn(struct work_struct *work) 1159 { 1160 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1161 1162 acpi_os_wait_events_complete(); 1163 acpi_device_hotplug(hpw->adev, hpw->src); 1164 kfree(hpw); 1165 } 1166 1167 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1168 { 1169 struct acpi_hp_work *hpw; 1170 1171 acpi_handle_debug(adev->handle, 1172 "Scheduling hotplug event %u for deferred handling\n", 1173 src); 1174 1175 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1176 if (!hpw) 1177 return AE_NO_MEMORY; 1178 1179 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1180 hpw->adev = adev; 1181 hpw->src = src; 1182 /* 1183 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1184 * the hotplug code may call driver .remove() functions, which may 1185 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1186 * these workqueues. 1187 */ 1188 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1189 kfree(hpw); 1190 return AE_ERROR; 1191 } 1192 return AE_OK; 1193 } 1194 1195 bool acpi_queue_hotplug_work(struct work_struct *work) 1196 { 1197 return queue_work(kacpi_hotplug_wq, work); 1198 } 1199 1200 acpi_status 1201 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1202 { 1203 struct semaphore *sem = NULL; 1204 1205 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1206 if (!sem) 1207 return AE_NO_MEMORY; 1208 1209 sema_init(sem, initial_units); 1210 1211 *handle = (acpi_handle *) sem; 1212 1213 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1214 *handle, initial_units)); 1215 1216 return AE_OK; 1217 } 1218 1219 /* 1220 * TODO: A better way to delete semaphores? Linux doesn't have a 1221 * 'delete_semaphore()' function -- may result in an invalid 1222 * pointer dereference for non-synchronized consumers. Should 1223 * we at least check for blocked threads and signal/cancel them? 1224 */ 1225 1226 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1227 { 1228 struct semaphore *sem = (struct semaphore *)handle; 1229 1230 if (!sem) 1231 return AE_BAD_PARAMETER; 1232 1233 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1234 1235 BUG_ON(!list_empty(&sem->wait_list)); 1236 kfree(sem); 1237 sem = NULL; 1238 1239 return AE_OK; 1240 } 1241 1242 /* 1243 * TODO: Support for units > 1? 1244 */ 1245 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1246 { 1247 acpi_status status = AE_OK; 1248 struct semaphore *sem = (struct semaphore *)handle; 1249 long jiffies; 1250 int ret = 0; 1251 1252 if (!acpi_os_initialized) 1253 return AE_OK; 1254 1255 if (!sem || (units < 1)) 1256 return AE_BAD_PARAMETER; 1257 1258 if (units > 1) 1259 return AE_SUPPORT; 1260 1261 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1262 handle, units, timeout)); 1263 1264 if (timeout == ACPI_WAIT_FOREVER) 1265 jiffies = MAX_SCHEDULE_TIMEOUT; 1266 else 1267 jiffies = msecs_to_jiffies(timeout); 1268 1269 ret = down_timeout(sem, jiffies); 1270 if (ret) 1271 status = AE_TIME; 1272 1273 if (ACPI_FAILURE(status)) { 1274 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1275 "Failed to acquire semaphore[%p|%d|%d], %s", 1276 handle, units, timeout, 1277 acpi_format_exception(status))); 1278 } else { 1279 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1280 "Acquired semaphore[%p|%d|%d]", handle, 1281 units, timeout)); 1282 } 1283 1284 return status; 1285 } 1286 1287 /* 1288 * TODO: Support for units > 1? 1289 */ 1290 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1291 { 1292 struct semaphore *sem = (struct semaphore *)handle; 1293 1294 if (!acpi_os_initialized) 1295 return AE_OK; 1296 1297 if (!sem || (units < 1)) 1298 return AE_BAD_PARAMETER; 1299 1300 if (units > 1) 1301 return AE_SUPPORT; 1302 1303 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1304 units)); 1305 1306 up(sem); 1307 1308 return AE_OK; 1309 } 1310 1311 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1312 { 1313 #ifdef ENABLE_DEBUGGER 1314 if (acpi_in_debugger) { 1315 u32 chars; 1316 1317 kdb_read(buffer, buffer_length); 1318 1319 /* remove the CR kdb includes */ 1320 chars = strlen(buffer) - 1; 1321 buffer[chars] = '\0'; 1322 } 1323 #else 1324 int ret; 1325 1326 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1327 if (ret < 0) 1328 return AE_ERROR; 1329 if (bytes_read) 1330 *bytes_read = ret; 1331 #endif 1332 1333 return AE_OK; 1334 } 1335 EXPORT_SYMBOL(acpi_os_get_line); 1336 1337 acpi_status acpi_os_wait_command_ready(void) 1338 { 1339 int ret; 1340 1341 ret = acpi_debugger_wait_command_ready(); 1342 if (ret < 0) 1343 return AE_ERROR; 1344 return AE_OK; 1345 } 1346 1347 acpi_status acpi_os_notify_command_complete(void) 1348 { 1349 int ret; 1350 1351 ret = acpi_debugger_notify_command_complete(); 1352 if (ret < 0) 1353 return AE_ERROR; 1354 return AE_OK; 1355 } 1356 1357 acpi_status acpi_os_signal(u32 function, void *info) 1358 { 1359 switch (function) { 1360 case ACPI_SIGNAL_FATAL: 1361 pr_err("Fatal opcode executed\n"); 1362 break; 1363 case ACPI_SIGNAL_BREAKPOINT: 1364 /* 1365 * AML Breakpoint 1366 * ACPI spec. says to treat it as a NOP unless 1367 * you are debugging. So if/when we integrate 1368 * AML debugger into the kernel debugger its 1369 * hook will go here. But until then it is 1370 * not useful to print anything on breakpoints. 1371 */ 1372 break; 1373 default: 1374 break; 1375 } 1376 1377 return AE_OK; 1378 } 1379 1380 static int __init acpi_os_name_setup(char *str) 1381 { 1382 char *p = acpi_os_name; 1383 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1384 1385 if (!str || !*str) 1386 return 0; 1387 1388 for (; count-- && *str; str++) { 1389 if (isalnum(*str) || *str == ' ' || *str == ':') 1390 *p++ = *str; 1391 else if (*str == '\'' || *str == '"') 1392 continue; 1393 else 1394 break; 1395 } 1396 *p = 0; 1397 1398 return 1; 1399 1400 } 1401 1402 __setup("acpi_os_name=", acpi_os_name_setup); 1403 1404 /* 1405 * Disable the auto-serialization of named objects creation methods. 1406 * 1407 * This feature is enabled by default. It marks the AML control methods 1408 * that contain the opcodes to create named objects as "Serialized". 1409 */ 1410 static int __init acpi_no_auto_serialize_setup(char *str) 1411 { 1412 acpi_gbl_auto_serialize_methods = FALSE; 1413 pr_info("Auto-serialization disabled\n"); 1414 1415 return 1; 1416 } 1417 1418 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1419 1420 /* Check of resource interference between native drivers and ACPI 1421 * OperationRegions (SystemIO and System Memory only). 1422 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1423 * in arbitrary AML code and can interfere with legacy drivers. 1424 * acpi_enforce_resources= can be set to: 1425 * 1426 * - strict (default) (2) 1427 * -> further driver trying to access the resources will not load 1428 * - lax (1) 1429 * -> further driver trying to access the resources will load, but you 1430 * get a system message that something might go wrong... 1431 * 1432 * - no (0) 1433 * -> ACPI Operation Region resources will not be registered 1434 * 1435 */ 1436 #define ENFORCE_RESOURCES_STRICT 2 1437 #define ENFORCE_RESOURCES_LAX 1 1438 #define ENFORCE_RESOURCES_NO 0 1439 1440 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1441 1442 static int __init acpi_enforce_resources_setup(char *str) 1443 { 1444 if (str == NULL || *str == '\0') 1445 return 0; 1446 1447 if (!strcmp("strict", str)) 1448 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1449 else if (!strcmp("lax", str)) 1450 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1451 else if (!strcmp("no", str)) 1452 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1453 1454 return 1; 1455 } 1456 1457 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1458 1459 /* Check for resource conflicts between ACPI OperationRegions and native 1460 * drivers */ 1461 int acpi_check_resource_conflict(const struct resource *res) 1462 { 1463 acpi_adr_space_type space_id; 1464 1465 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1466 return 0; 1467 1468 if (res->flags & IORESOURCE_IO) 1469 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1470 else if (res->flags & IORESOURCE_MEM) 1471 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1472 else 1473 return 0; 1474 1475 if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1)) 1476 return 0; 1477 1478 pr_info("Resource conflict; ACPI support missing from driver?\n"); 1479 1480 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1481 return -EBUSY; 1482 1483 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1484 pr_notice("Resource conflict: System may be unstable or behave erratically\n"); 1485 1486 return 0; 1487 } 1488 EXPORT_SYMBOL(acpi_check_resource_conflict); 1489 1490 int acpi_check_region(resource_size_t start, resource_size_t n, 1491 const char *name) 1492 { 1493 struct resource res = DEFINE_RES_IO_NAMED(start, n, name); 1494 1495 return acpi_check_resource_conflict(&res); 1496 } 1497 EXPORT_SYMBOL(acpi_check_region); 1498 1499 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, 1500 void *_res, void **return_value) 1501 { 1502 struct acpi_mem_space_context **mem_ctx; 1503 union acpi_operand_object *handler_obj; 1504 union acpi_operand_object *region_obj2; 1505 union acpi_operand_object *region_obj; 1506 struct resource *res = _res; 1507 acpi_status status; 1508 1509 region_obj = acpi_ns_get_attached_object(handle); 1510 if (!region_obj) 1511 return AE_OK; 1512 1513 handler_obj = region_obj->region.handler; 1514 if (!handler_obj) 1515 return AE_OK; 1516 1517 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 1518 return AE_OK; 1519 1520 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) 1521 return AE_OK; 1522 1523 region_obj2 = acpi_ns_get_secondary_object(region_obj); 1524 if (!region_obj2) 1525 return AE_OK; 1526 1527 mem_ctx = (void *)®ion_obj2->extra.region_context; 1528 1529 if (!(mem_ctx[0]->address >= res->start && 1530 mem_ctx[0]->address < res->end)) 1531 return AE_OK; 1532 1533 status = handler_obj->address_space.setup(region_obj, 1534 ACPI_REGION_DEACTIVATE, 1535 NULL, (void **)mem_ctx); 1536 if (ACPI_SUCCESS(status)) 1537 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); 1538 1539 return status; 1540 } 1541 1542 /** 1543 * acpi_release_memory - Release any mappings done to a memory region 1544 * @handle: Handle to namespace node 1545 * @res: Memory resource 1546 * @level: A level that terminates the search 1547 * 1548 * Walks through @handle and unmaps all SystemMemory Operation Regions that 1549 * overlap with @res and that have already been activated (mapped). 1550 * 1551 * This is a helper that allows drivers to place special requirements on memory 1552 * region that may overlap with operation regions, primarily allowing them to 1553 * safely map the region as non-cached memory. 1554 * 1555 * The unmapped Operation Regions will be automatically remapped next time they 1556 * are called, so the drivers do not need to do anything else. 1557 */ 1558 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, 1559 u32 level) 1560 { 1561 acpi_status status; 1562 1563 if (!(res->flags & IORESOURCE_MEM)) 1564 return AE_TYPE; 1565 1566 status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, 1567 acpi_deactivate_mem_region, NULL, 1568 res, NULL); 1569 if (ACPI_FAILURE(status)) 1570 return status; 1571 1572 /* 1573 * Wait for all of the mappings queued up for removal by 1574 * acpi_deactivate_mem_region() to actually go away. 1575 */ 1576 synchronize_rcu(); 1577 rcu_barrier(); 1578 flush_scheduled_work(); 1579 1580 return AE_OK; 1581 } 1582 EXPORT_SYMBOL_GPL(acpi_release_memory); 1583 1584 /* 1585 * Let drivers know whether the resource checks are effective 1586 */ 1587 int acpi_resources_are_enforced(void) 1588 { 1589 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1590 } 1591 EXPORT_SYMBOL(acpi_resources_are_enforced); 1592 1593 /* 1594 * Deallocate the memory for a spinlock. 1595 */ 1596 void acpi_os_delete_lock(acpi_spinlock handle) 1597 { 1598 ACPI_FREE(handle); 1599 } 1600 1601 /* 1602 * Acquire a spinlock. 1603 * 1604 * handle is a pointer to the spinlock_t. 1605 */ 1606 1607 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1608 __acquires(lockp) 1609 { 1610 acpi_cpu_flags flags; 1611 spin_lock_irqsave(lockp, flags); 1612 return flags; 1613 } 1614 1615 /* 1616 * Release a spinlock. See above. 1617 */ 1618 1619 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1620 __releases(lockp) 1621 { 1622 spin_unlock_irqrestore(lockp, flags); 1623 } 1624 1625 #ifndef ACPI_USE_LOCAL_CACHE 1626 1627 /******************************************************************************* 1628 * 1629 * FUNCTION: acpi_os_create_cache 1630 * 1631 * PARAMETERS: name - Ascii name for the cache 1632 * size - Size of each cached object 1633 * depth - Maximum depth of the cache (in objects) <ignored> 1634 * cache - Where the new cache object is returned 1635 * 1636 * RETURN: status 1637 * 1638 * DESCRIPTION: Create a cache object 1639 * 1640 ******************************************************************************/ 1641 1642 acpi_status 1643 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1644 { 1645 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1646 if (*cache == NULL) 1647 return AE_ERROR; 1648 else 1649 return AE_OK; 1650 } 1651 1652 /******************************************************************************* 1653 * 1654 * FUNCTION: acpi_os_purge_cache 1655 * 1656 * PARAMETERS: Cache - Handle to cache object 1657 * 1658 * RETURN: Status 1659 * 1660 * DESCRIPTION: Free all objects within the requested cache. 1661 * 1662 ******************************************************************************/ 1663 1664 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1665 { 1666 kmem_cache_shrink(cache); 1667 return (AE_OK); 1668 } 1669 1670 /******************************************************************************* 1671 * 1672 * FUNCTION: acpi_os_delete_cache 1673 * 1674 * PARAMETERS: Cache - Handle to cache object 1675 * 1676 * RETURN: Status 1677 * 1678 * DESCRIPTION: Free all objects within the requested cache and delete the 1679 * cache object. 1680 * 1681 ******************************************************************************/ 1682 1683 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1684 { 1685 kmem_cache_destroy(cache); 1686 return (AE_OK); 1687 } 1688 1689 /******************************************************************************* 1690 * 1691 * FUNCTION: acpi_os_release_object 1692 * 1693 * PARAMETERS: Cache - Handle to cache object 1694 * Object - The object to be released 1695 * 1696 * RETURN: None 1697 * 1698 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1699 * the object is deleted. 1700 * 1701 ******************************************************************************/ 1702 1703 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1704 { 1705 kmem_cache_free(cache, object); 1706 return (AE_OK); 1707 } 1708 #endif 1709 1710 static int __init acpi_no_static_ssdt_setup(char *s) 1711 { 1712 acpi_gbl_disable_ssdt_table_install = TRUE; 1713 pr_info("Static SSDT installation disabled\n"); 1714 1715 return 0; 1716 } 1717 1718 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1719 1720 static int __init acpi_disable_return_repair(char *s) 1721 { 1722 pr_notice("Predefined validation mechanism disabled\n"); 1723 acpi_gbl_disable_auto_repair = TRUE; 1724 1725 return 1; 1726 } 1727 1728 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1729 1730 acpi_status __init acpi_os_initialize(void) 1731 { 1732 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1733 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1734 1735 acpi_gbl_xgpe0_block_logical_address = 1736 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1737 acpi_gbl_xgpe1_block_logical_address = 1738 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1739 1740 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1741 /* 1742 * Use acpi_os_map_generic_address to pre-map the reset 1743 * register if it's in system memory. 1744 */ 1745 void *rv; 1746 1747 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1748 pr_debug("%s: Reset register mapping %s\n", __func__, 1749 rv ? "successful" : "failed"); 1750 } 1751 acpi_os_initialized = true; 1752 1753 return AE_OK; 1754 } 1755 1756 acpi_status __init acpi_os_initialize1(void) 1757 { 1758 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1759 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1760 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1761 BUG_ON(!kacpid_wq); 1762 BUG_ON(!kacpi_notify_wq); 1763 BUG_ON(!kacpi_hotplug_wq); 1764 acpi_osi_init(); 1765 return AE_OK; 1766 } 1767 1768 acpi_status acpi_os_terminate(void) 1769 { 1770 if (acpi_irq_handler) { 1771 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1772 acpi_irq_handler); 1773 } 1774 1775 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1776 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1777 acpi_gbl_xgpe0_block_logical_address = 0UL; 1778 acpi_gbl_xgpe1_block_logical_address = 0UL; 1779 1780 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1781 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1782 1783 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1784 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1785 1786 destroy_workqueue(kacpid_wq); 1787 destroy_workqueue(kacpi_notify_wq); 1788 destroy_workqueue(kacpi_hotplug_wq); 1789 1790 return AE_OK; 1791 } 1792 1793 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1794 u32 pm1b_control) 1795 { 1796 int rc = 0; 1797 if (__acpi_os_prepare_sleep) 1798 rc = __acpi_os_prepare_sleep(sleep_state, 1799 pm1a_control, pm1b_control); 1800 if (rc < 0) 1801 return AE_ERROR; 1802 else if (rc > 0) 1803 return AE_CTRL_TERMINATE; 1804 1805 return AE_OK; 1806 } 1807 1808 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1809 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1810 { 1811 __acpi_os_prepare_sleep = func; 1812 } 1813 1814 #if (ACPI_REDUCED_HARDWARE) 1815 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1816 u32 val_b) 1817 { 1818 int rc = 0; 1819 if (__acpi_os_prepare_extended_sleep) 1820 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1821 val_a, val_b); 1822 if (rc < 0) 1823 return AE_ERROR; 1824 else if (rc > 0) 1825 return AE_CTRL_TERMINATE; 1826 1827 return AE_OK; 1828 } 1829 #else 1830 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1831 u32 val_b) 1832 { 1833 return AE_OK; 1834 } 1835 #endif 1836 1837 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1838 u32 val_a, u32 val_b)) 1839 { 1840 __acpi_os_prepare_extended_sleep = func; 1841 } 1842 1843 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1844 u32 reg_a_value, u32 reg_b_value) 1845 { 1846 acpi_status status; 1847 1848 if (acpi_gbl_reduced_hardware) 1849 status = acpi_os_prepare_extended_sleep(sleep_state, 1850 reg_a_value, 1851 reg_b_value); 1852 else 1853 status = acpi_os_prepare_sleep(sleep_state, 1854 reg_a_value, reg_b_value); 1855 return status; 1856 } 1857