1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 4 * 5 * Copyright (C) 2000 Andrew Henroid 6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * Copyright (c) 2008 Intel Corporation 9 * Author: Matthew Wilcox <willy@linux.intel.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/lockdep.h> 18 #include <linux/pci.h> 19 #include <linux/interrupt.h> 20 #include <linux/kmod.h> 21 #include <linux/delay.h> 22 #include <linux/workqueue.h> 23 #include <linux/nmi.h> 24 #include <linux/acpi.h> 25 #include <linux/efi.h> 26 #include <linux/ioport.h> 27 #include <linux/list.h> 28 #include <linux/jiffies.h> 29 #include <linux/semaphore.h> 30 31 #include <asm/io.h> 32 #include <linux/uaccess.h> 33 #include <linux/io-64-nonatomic-lo-hi.h> 34 35 #include "acpica/accommon.h" 36 #include "acpica/acnamesp.h" 37 #include "internal.h" 38 39 #define _COMPONENT ACPI_OS_SERVICES 40 ACPI_MODULE_NAME("osl"); 41 42 struct acpi_os_dpc { 43 acpi_osd_exec_callback function; 44 void *context; 45 struct work_struct work; 46 }; 47 48 #ifdef ENABLE_DEBUGGER 49 #include <linux/kdb.h> 50 51 /* stuff for debugger support */ 52 int acpi_in_debugger; 53 EXPORT_SYMBOL(acpi_in_debugger); 54 #endif /*ENABLE_DEBUGGER */ 55 56 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 57 u32 pm1b_ctrl); 58 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 59 u32 val_b); 60 61 static acpi_osd_handler acpi_irq_handler; 62 static void *acpi_irq_context; 63 static struct workqueue_struct *kacpid_wq; 64 static struct workqueue_struct *kacpi_notify_wq; 65 static struct workqueue_struct *kacpi_hotplug_wq; 66 static bool acpi_os_initialized; 67 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 68 bool acpi_permanent_mmap = false; 69 70 /* 71 * This list of permanent mappings is for memory that may be accessed from 72 * interrupt context, where we can't do the ioremap(). 73 */ 74 struct acpi_ioremap { 75 struct list_head list; 76 void __iomem *virt; 77 acpi_physical_address phys; 78 acpi_size size; 79 unsigned long refcount; 80 }; 81 82 static LIST_HEAD(acpi_ioremaps); 83 static DEFINE_MUTEX(acpi_ioremap_lock); 84 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) 85 86 static void __init acpi_request_region (struct acpi_generic_address *gas, 87 unsigned int length, char *desc) 88 { 89 u64 addr; 90 91 /* Handle possible alignment issues */ 92 memcpy(&addr, &gas->address, sizeof(addr)); 93 if (!addr || !length) 94 return; 95 96 /* Resources are never freed */ 97 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 98 request_region(addr, length, desc); 99 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 100 request_mem_region(addr, length, desc); 101 } 102 103 static int __init acpi_reserve_resources(void) 104 { 105 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 106 "ACPI PM1a_EVT_BLK"); 107 108 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 109 "ACPI PM1b_EVT_BLK"); 110 111 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 112 "ACPI PM1a_CNT_BLK"); 113 114 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 115 "ACPI PM1b_CNT_BLK"); 116 117 if (acpi_gbl_FADT.pm_timer_length == 4) 118 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 119 120 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 121 "ACPI PM2_CNT_BLK"); 122 123 /* Length of GPE blocks must be a non-negative multiple of 2 */ 124 125 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 126 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 127 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 128 129 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 130 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 131 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 132 133 return 0; 134 } 135 fs_initcall_sync(acpi_reserve_resources); 136 137 void acpi_os_printf(const char *fmt, ...) 138 { 139 va_list args; 140 va_start(args, fmt); 141 acpi_os_vprintf(fmt, args); 142 va_end(args); 143 } 144 EXPORT_SYMBOL(acpi_os_printf); 145 146 void acpi_os_vprintf(const char *fmt, va_list args) 147 { 148 static char buffer[512]; 149 150 vsprintf(buffer, fmt, args); 151 152 #ifdef ENABLE_DEBUGGER 153 if (acpi_in_debugger) { 154 kdb_printf("%s", buffer); 155 } else { 156 if (printk_get_level(buffer)) 157 printk("%s", buffer); 158 else 159 printk(KERN_CONT "%s", buffer); 160 } 161 #else 162 if (acpi_debugger_write_log(buffer) < 0) { 163 if (printk_get_level(buffer)) 164 printk("%s", buffer); 165 else 166 printk(KERN_CONT "%s", buffer); 167 } 168 #endif 169 } 170 171 #ifdef CONFIG_KEXEC 172 static unsigned long acpi_rsdp; 173 static int __init setup_acpi_rsdp(char *arg) 174 { 175 return kstrtoul(arg, 16, &acpi_rsdp); 176 } 177 early_param("acpi_rsdp", setup_acpi_rsdp); 178 #endif 179 180 acpi_physical_address __init acpi_os_get_root_pointer(void) 181 { 182 acpi_physical_address pa; 183 184 #ifdef CONFIG_KEXEC 185 if (acpi_rsdp) 186 return acpi_rsdp; 187 #endif 188 pa = acpi_arch_get_root_pointer(); 189 if (pa) 190 return pa; 191 192 if (efi_enabled(EFI_CONFIG_TABLES)) { 193 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 194 return efi.acpi20; 195 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 196 return efi.acpi; 197 pr_err(PREFIX "System description tables not found\n"); 198 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 199 acpi_find_root_pointer(&pa); 200 } 201 202 return pa; 203 } 204 205 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 206 static struct acpi_ioremap * 207 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 208 { 209 struct acpi_ioremap *map; 210 211 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 212 if (map->phys <= phys && 213 phys + size <= map->phys + map->size) 214 return map; 215 216 return NULL; 217 } 218 219 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 220 static void __iomem * 221 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 222 { 223 struct acpi_ioremap *map; 224 225 map = acpi_map_lookup(phys, size); 226 if (map) 227 return map->virt + (phys - map->phys); 228 229 return NULL; 230 } 231 232 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 233 { 234 struct acpi_ioremap *map; 235 void __iomem *virt = NULL; 236 237 mutex_lock(&acpi_ioremap_lock); 238 map = acpi_map_lookup(phys, size); 239 if (map) { 240 virt = map->virt + (phys - map->phys); 241 map->refcount++; 242 } 243 mutex_unlock(&acpi_ioremap_lock); 244 return virt; 245 } 246 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 247 248 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 249 static struct acpi_ioremap * 250 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 251 { 252 struct acpi_ioremap *map; 253 254 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 255 if (map->virt <= virt && 256 virt + size <= map->virt + map->size) 257 return map; 258 259 return NULL; 260 } 261 262 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 263 /* ioremap will take care of cache attributes */ 264 #define should_use_kmap(pfn) 0 265 #else 266 #define should_use_kmap(pfn) page_is_ram(pfn) 267 #endif 268 269 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 270 { 271 unsigned long pfn; 272 273 pfn = pg_off >> PAGE_SHIFT; 274 if (should_use_kmap(pfn)) { 275 if (pg_sz > PAGE_SIZE) 276 return NULL; 277 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 278 } else 279 return acpi_os_ioremap(pg_off, pg_sz); 280 } 281 282 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 283 { 284 unsigned long pfn; 285 286 pfn = pg_off >> PAGE_SHIFT; 287 if (should_use_kmap(pfn)) 288 kunmap(pfn_to_page(pfn)); 289 else 290 iounmap(vaddr); 291 } 292 293 /** 294 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 295 * @phys: Start of the physical address range to map. 296 * @size: Size of the physical address range to map. 297 * 298 * Look up the given physical address range in the list of existing ACPI memory 299 * mappings. If found, get a reference to it and return a pointer to it (its 300 * virtual address). If not found, map it, add it to that list and return a 301 * pointer to it. 302 * 303 * During early init (when acpi_permanent_mmap has not been set yet) this 304 * routine simply calls __acpi_map_table() to get the job done. 305 */ 306 void __iomem __ref 307 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 308 { 309 struct acpi_ioremap *map; 310 void __iomem *virt; 311 acpi_physical_address pg_off; 312 acpi_size pg_sz; 313 314 if (phys > ULONG_MAX) { 315 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 316 return NULL; 317 } 318 319 if (!acpi_permanent_mmap) 320 return __acpi_map_table((unsigned long)phys, size); 321 322 mutex_lock(&acpi_ioremap_lock); 323 /* Check if there's a suitable mapping already. */ 324 map = acpi_map_lookup(phys, size); 325 if (map) { 326 map->refcount++; 327 goto out; 328 } 329 330 map = kzalloc(sizeof(*map), GFP_KERNEL); 331 if (!map) { 332 mutex_unlock(&acpi_ioremap_lock); 333 return NULL; 334 } 335 336 pg_off = round_down(phys, PAGE_SIZE); 337 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 338 virt = acpi_map(pg_off, pg_sz); 339 if (!virt) { 340 mutex_unlock(&acpi_ioremap_lock); 341 kfree(map); 342 return NULL; 343 } 344 345 INIT_LIST_HEAD(&map->list); 346 map->virt = virt; 347 map->phys = pg_off; 348 map->size = pg_sz; 349 map->refcount = 1; 350 351 list_add_tail_rcu(&map->list, &acpi_ioremaps); 352 353 out: 354 mutex_unlock(&acpi_ioremap_lock); 355 return map->virt + (phys - map->phys); 356 } 357 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 358 359 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 360 { 361 return (void *)acpi_os_map_iomem(phys, size); 362 } 363 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 364 365 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 366 { 367 if (!--map->refcount) 368 list_del_rcu(&map->list); 369 } 370 371 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 372 { 373 if (!map->refcount) { 374 synchronize_rcu_expedited(); 375 acpi_unmap(map->phys, map->virt); 376 kfree(map); 377 } 378 } 379 380 /** 381 * acpi_os_unmap_iomem - Drop a memory mapping reference. 382 * @virt: Start of the address range to drop a reference to. 383 * @size: Size of the address range to drop a reference to. 384 * 385 * Look up the given virtual address range in the list of existing ACPI memory 386 * mappings, drop a reference to it and unmap it if there are no more active 387 * references to it. 388 * 389 * During early init (when acpi_permanent_mmap has not been set yet) this 390 * routine simply calls __acpi_unmap_table() to get the job done. Since 391 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 392 * here. 393 */ 394 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 395 { 396 struct acpi_ioremap *map; 397 398 if (!acpi_permanent_mmap) { 399 __acpi_unmap_table(virt, size); 400 return; 401 } 402 403 mutex_lock(&acpi_ioremap_lock); 404 map = acpi_map_lookup_virt(virt, size); 405 if (!map) { 406 mutex_unlock(&acpi_ioremap_lock); 407 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 408 return; 409 } 410 acpi_os_drop_map_ref(map); 411 mutex_unlock(&acpi_ioremap_lock); 412 413 acpi_os_map_cleanup(map); 414 } 415 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 416 417 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 418 { 419 return acpi_os_unmap_iomem((void __iomem *)virt, size); 420 } 421 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 422 423 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 424 { 425 u64 addr; 426 void __iomem *virt; 427 428 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 429 return 0; 430 431 /* Handle possible alignment issues */ 432 memcpy(&addr, &gas->address, sizeof(addr)); 433 if (!addr || !gas->bit_width) 434 return -EINVAL; 435 436 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 437 if (!virt) 438 return -EIO; 439 440 return 0; 441 } 442 EXPORT_SYMBOL(acpi_os_map_generic_address); 443 444 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 445 { 446 u64 addr; 447 struct acpi_ioremap *map; 448 449 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 450 return; 451 452 /* Handle possible alignment issues */ 453 memcpy(&addr, &gas->address, sizeof(addr)); 454 if (!addr || !gas->bit_width) 455 return; 456 457 mutex_lock(&acpi_ioremap_lock); 458 map = acpi_map_lookup(addr, gas->bit_width / 8); 459 if (!map) { 460 mutex_unlock(&acpi_ioremap_lock); 461 return; 462 } 463 acpi_os_drop_map_ref(map); 464 mutex_unlock(&acpi_ioremap_lock); 465 466 acpi_os_map_cleanup(map); 467 } 468 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 469 470 #ifdef ACPI_FUTURE_USAGE 471 acpi_status 472 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 473 { 474 if (!phys || !virt) 475 return AE_BAD_PARAMETER; 476 477 *phys = virt_to_phys(virt); 478 479 return AE_OK; 480 } 481 #endif 482 483 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 484 static bool acpi_rev_override; 485 486 int __init acpi_rev_override_setup(char *str) 487 { 488 acpi_rev_override = true; 489 return 1; 490 } 491 __setup("acpi_rev_override", acpi_rev_override_setup); 492 #else 493 #define acpi_rev_override false 494 #endif 495 496 #define ACPI_MAX_OVERRIDE_LEN 100 497 498 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 499 500 acpi_status 501 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 502 acpi_string *new_val) 503 { 504 if (!init_val || !new_val) 505 return AE_BAD_PARAMETER; 506 507 *new_val = NULL; 508 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 509 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 510 acpi_os_name); 511 *new_val = acpi_os_name; 512 } 513 514 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 515 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 516 *new_val = (char *)5; 517 } 518 519 return AE_OK; 520 } 521 522 static irqreturn_t acpi_irq(int irq, void *dev_id) 523 { 524 u32 handled; 525 526 handled = (*acpi_irq_handler) (acpi_irq_context); 527 528 if (handled) { 529 acpi_irq_handled++; 530 return IRQ_HANDLED; 531 } else { 532 acpi_irq_not_handled++; 533 return IRQ_NONE; 534 } 535 } 536 537 acpi_status 538 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 539 void *context) 540 { 541 unsigned int irq; 542 543 acpi_irq_stats_init(); 544 545 /* 546 * ACPI interrupts different from the SCI in our copy of the FADT are 547 * not supported. 548 */ 549 if (gsi != acpi_gbl_FADT.sci_interrupt) 550 return AE_BAD_PARAMETER; 551 552 if (acpi_irq_handler) 553 return AE_ALREADY_ACQUIRED; 554 555 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 556 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 557 gsi); 558 return AE_OK; 559 } 560 561 acpi_irq_handler = handler; 562 acpi_irq_context = context; 563 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 564 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 565 acpi_irq_handler = NULL; 566 return AE_NOT_ACQUIRED; 567 } 568 acpi_sci_irq = irq; 569 570 return AE_OK; 571 } 572 573 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 574 { 575 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 576 return AE_BAD_PARAMETER; 577 578 free_irq(acpi_sci_irq, acpi_irq); 579 acpi_irq_handler = NULL; 580 acpi_sci_irq = INVALID_ACPI_IRQ; 581 582 return AE_OK; 583 } 584 585 /* 586 * Running in interpreter thread context, safe to sleep 587 */ 588 589 void acpi_os_sleep(u64 ms) 590 { 591 msleep(ms); 592 } 593 594 void acpi_os_stall(u32 us) 595 { 596 while (us) { 597 u32 delay = 1000; 598 599 if (delay > us) 600 delay = us; 601 udelay(delay); 602 touch_nmi_watchdog(); 603 us -= delay; 604 } 605 } 606 607 /* 608 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 609 * monotonically increasing timer with 100ns granularity. Do not use 610 * ktime_get() to implement this function because this function may get 611 * called after timekeeping has been suspended. Note: calling this function 612 * after timekeeping has been suspended may lead to unexpected results 613 * because when timekeeping is suspended the jiffies counter is not 614 * incremented. See also timekeeping_suspend(). 615 */ 616 u64 acpi_os_get_timer(void) 617 { 618 return (get_jiffies_64() - INITIAL_JIFFIES) * 619 (ACPI_100NSEC_PER_SEC / HZ); 620 } 621 622 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 623 { 624 u32 dummy; 625 626 if (!value) 627 value = &dummy; 628 629 *value = 0; 630 if (width <= 8) { 631 *(u8 *) value = inb(port); 632 } else if (width <= 16) { 633 *(u16 *) value = inw(port); 634 } else if (width <= 32) { 635 *(u32 *) value = inl(port); 636 } else { 637 BUG(); 638 } 639 640 return AE_OK; 641 } 642 643 EXPORT_SYMBOL(acpi_os_read_port); 644 645 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 646 { 647 if (width <= 8) { 648 outb(value, port); 649 } else if (width <= 16) { 650 outw(value, port); 651 } else if (width <= 32) { 652 outl(value, port); 653 } else { 654 BUG(); 655 } 656 657 return AE_OK; 658 } 659 660 EXPORT_SYMBOL(acpi_os_write_port); 661 662 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 663 { 664 665 switch (width) { 666 case 8: 667 *(u8 *) value = readb(virt_addr); 668 break; 669 case 16: 670 *(u16 *) value = readw(virt_addr); 671 break; 672 case 32: 673 *(u32 *) value = readl(virt_addr); 674 break; 675 case 64: 676 *(u64 *) value = readq(virt_addr); 677 break; 678 default: 679 return -EINVAL; 680 } 681 682 return 0; 683 } 684 685 acpi_status 686 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 687 { 688 void __iomem *virt_addr; 689 unsigned int size = width / 8; 690 bool unmap = false; 691 u64 dummy; 692 int error; 693 694 rcu_read_lock(); 695 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 696 if (!virt_addr) { 697 rcu_read_unlock(); 698 virt_addr = acpi_os_ioremap(phys_addr, size); 699 if (!virt_addr) 700 return AE_BAD_ADDRESS; 701 unmap = true; 702 } 703 704 if (!value) 705 value = &dummy; 706 707 error = acpi_os_read_iomem(virt_addr, value, width); 708 BUG_ON(error); 709 710 if (unmap) 711 iounmap(virt_addr); 712 else 713 rcu_read_unlock(); 714 715 return AE_OK; 716 } 717 718 acpi_status 719 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 720 { 721 void __iomem *virt_addr; 722 unsigned int size = width / 8; 723 bool unmap = false; 724 725 rcu_read_lock(); 726 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 727 if (!virt_addr) { 728 rcu_read_unlock(); 729 virt_addr = acpi_os_ioremap(phys_addr, size); 730 if (!virt_addr) 731 return AE_BAD_ADDRESS; 732 unmap = true; 733 } 734 735 switch (width) { 736 case 8: 737 writeb(value, virt_addr); 738 break; 739 case 16: 740 writew(value, virt_addr); 741 break; 742 case 32: 743 writel(value, virt_addr); 744 break; 745 case 64: 746 writeq(value, virt_addr); 747 break; 748 default: 749 BUG(); 750 } 751 752 if (unmap) 753 iounmap(virt_addr); 754 else 755 rcu_read_unlock(); 756 757 return AE_OK; 758 } 759 760 #ifdef CONFIG_PCI 761 acpi_status 762 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 763 u64 *value, u32 width) 764 { 765 int result, size; 766 u32 value32; 767 768 if (!value) 769 return AE_BAD_PARAMETER; 770 771 switch (width) { 772 case 8: 773 size = 1; 774 break; 775 case 16: 776 size = 2; 777 break; 778 case 32: 779 size = 4; 780 break; 781 default: 782 return AE_ERROR; 783 } 784 785 result = raw_pci_read(pci_id->segment, pci_id->bus, 786 PCI_DEVFN(pci_id->device, pci_id->function), 787 reg, size, &value32); 788 *value = value32; 789 790 return (result ? AE_ERROR : AE_OK); 791 } 792 793 acpi_status 794 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 795 u64 value, u32 width) 796 { 797 int result, size; 798 799 switch (width) { 800 case 8: 801 size = 1; 802 break; 803 case 16: 804 size = 2; 805 break; 806 case 32: 807 size = 4; 808 break; 809 default: 810 return AE_ERROR; 811 } 812 813 result = raw_pci_write(pci_id->segment, pci_id->bus, 814 PCI_DEVFN(pci_id->device, pci_id->function), 815 reg, size, value); 816 817 return (result ? AE_ERROR : AE_OK); 818 } 819 #endif 820 821 static void acpi_os_execute_deferred(struct work_struct *work) 822 { 823 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 824 825 dpc->function(dpc->context); 826 kfree(dpc); 827 } 828 829 #ifdef CONFIG_ACPI_DEBUGGER 830 static struct acpi_debugger acpi_debugger; 831 static bool acpi_debugger_initialized; 832 833 int acpi_register_debugger(struct module *owner, 834 const struct acpi_debugger_ops *ops) 835 { 836 int ret = 0; 837 838 mutex_lock(&acpi_debugger.lock); 839 if (acpi_debugger.ops) { 840 ret = -EBUSY; 841 goto err_lock; 842 } 843 844 acpi_debugger.owner = owner; 845 acpi_debugger.ops = ops; 846 847 err_lock: 848 mutex_unlock(&acpi_debugger.lock); 849 return ret; 850 } 851 EXPORT_SYMBOL(acpi_register_debugger); 852 853 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 854 { 855 mutex_lock(&acpi_debugger.lock); 856 if (ops == acpi_debugger.ops) { 857 acpi_debugger.ops = NULL; 858 acpi_debugger.owner = NULL; 859 } 860 mutex_unlock(&acpi_debugger.lock); 861 } 862 EXPORT_SYMBOL(acpi_unregister_debugger); 863 864 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 865 { 866 int ret; 867 int (*func)(acpi_osd_exec_callback, void *); 868 struct module *owner; 869 870 if (!acpi_debugger_initialized) 871 return -ENODEV; 872 mutex_lock(&acpi_debugger.lock); 873 if (!acpi_debugger.ops) { 874 ret = -ENODEV; 875 goto err_lock; 876 } 877 if (!try_module_get(acpi_debugger.owner)) { 878 ret = -ENODEV; 879 goto err_lock; 880 } 881 func = acpi_debugger.ops->create_thread; 882 owner = acpi_debugger.owner; 883 mutex_unlock(&acpi_debugger.lock); 884 885 ret = func(function, context); 886 887 mutex_lock(&acpi_debugger.lock); 888 module_put(owner); 889 err_lock: 890 mutex_unlock(&acpi_debugger.lock); 891 return ret; 892 } 893 894 ssize_t acpi_debugger_write_log(const char *msg) 895 { 896 ssize_t ret; 897 ssize_t (*func)(const char *); 898 struct module *owner; 899 900 if (!acpi_debugger_initialized) 901 return -ENODEV; 902 mutex_lock(&acpi_debugger.lock); 903 if (!acpi_debugger.ops) { 904 ret = -ENODEV; 905 goto err_lock; 906 } 907 if (!try_module_get(acpi_debugger.owner)) { 908 ret = -ENODEV; 909 goto err_lock; 910 } 911 func = acpi_debugger.ops->write_log; 912 owner = acpi_debugger.owner; 913 mutex_unlock(&acpi_debugger.lock); 914 915 ret = func(msg); 916 917 mutex_lock(&acpi_debugger.lock); 918 module_put(owner); 919 err_lock: 920 mutex_unlock(&acpi_debugger.lock); 921 return ret; 922 } 923 924 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 925 { 926 ssize_t ret; 927 ssize_t (*func)(char *, size_t); 928 struct module *owner; 929 930 if (!acpi_debugger_initialized) 931 return -ENODEV; 932 mutex_lock(&acpi_debugger.lock); 933 if (!acpi_debugger.ops) { 934 ret = -ENODEV; 935 goto err_lock; 936 } 937 if (!try_module_get(acpi_debugger.owner)) { 938 ret = -ENODEV; 939 goto err_lock; 940 } 941 func = acpi_debugger.ops->read_cmd; 942 owner = acpi_debugger.owner; 943 mutex_unlock(&acpi_debugger.lock); 944 945 ret = func(buffer, buffer_length); 946 947 mutex_lock(&acpi_debugger.lock); 948 module_put(owner); 949 err_lock: 950 mutex_unlock(&acpi_debugger.lock); 951 return ret; 952 } 953 954 int acpi_debugger_wait_command_ready(void) 955 { 956 int ret; 957 int (*func)(bool, char *, size_t); 958 struct module *owner; 959 960 if (!acpi_debugger_initialized) 961 return -ENODEV; 962 mutex_lock(&acpi_debugger.lock); 963 if (!acpi_debugger.ops) { 964 ret = -ENODEV; 965 goto err_lock; 966 } 967 if (!try_module_get(acpi_debugger.owner)) { 968 ret = -ENODEV; 969 goto err_lock; 970 } 971 func = acpi_debugger.ops->wait_command_ready; 972 owner = acpi_debugger.owner; 973 mutex_unlock(&acpi_debugger.lock); 974 975 ret = func(acpi_gbl_method_executing, 976 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 977 978 mutex_lock(&acpi_debugger.lock); 979 module_put(owner); 980 err_lock: 981 mutex_unlock(&acpi_debugger.lock); 982 return ret; 983 } 984 985 int acpi_debugger_notify_command_complete(void) 986 { 987 int ret; 988 int (*func)(void); 989 struct module *owner; 990 991 if (!acpi_debugger_initialized) 992 return -ENODEV; 993 mutex_lock(&acpi_debugger.lock); 994 if (!acpi_debugger.ops) { 995 ret = -ENODEV; 996 goto err_lock; 997 } 998 if (!try_module_get(acpi_debugger.owner)) { 999 ret = -ENODEV; 1000 goto err_lock; 1001 } 1002 func = acpi_debugger.ops->notify_command_complete; 1003 owner = acpi_debugger.owner; 1004 mutex_unlock(&acpi_debugger.lock); 1005 1006 ret = func(); 1007 1008 mutex_lock(&acpi_debugger.lock); 1009 module_put(owner); 1010 err_lock: 1011 mutex_unlock(&acpi_debugger.lock); 1012 return ret; 1013 } 1014 1015 int __init acpi_debugger_init(void) 1016 { 1017 mutex_init(&acpi_debugger.lock); 1018 acpi_debugger_initialized = true; 1019 return 0; 1020 } 1021 #endif 1022 1023 /******************************************************************************* 1024 * 1025 * FUNCTION: acpi_os_execute 1026 * 1027 * PARAMETERS: Type - Type of the callback 1028 * Function - Function to be executed 1029 * Context - Function parameters 1030 * 1031 * RETURN: Status 1032 * 1033 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1034 * immediately executes function on a separate thread. 1035 * 1036 ******************************************************************************/ 1037 1038 acpi_status acpi_os_execute(acpi_execute_type type, 1039 acpi_osd_exec_callback function, void *context) 1040 { 1041 acpi_status status = AE_OK; 1042 struct acpi_os_dpc *dpc; 1043 struct workqueue_struct *queue; 1044 int ret; 1045 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1046 "Scheduling function [%p(%p)] for deferred execution.\n", 1047 function, context)); 1048 1049 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1050 ret = acpi_debugger_create_thread(function, context); 1051 if (ret) { 1052 pr_err("Call to kthread_create() failed.\n"); 1053 status = AE_ERROR; 1054 } 1055 goto out_thread; 1056 } 1057 1058 /* 1059 * Allocate/initialize DPC structure. Note that this memory will be 1060 * freed by the callee. The kernel handles the work_struct list in a 1061 * way that allows us to also free its memory inside the callee. 1062 * Because we may want to schedule several tasks with different 1063 * parameters we can't use the approach some kernel code uses of 1064 * having a static work_struct. 1065 */ 1066 1067 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1068 if (!dpc) 1069 return AE_NO_MEMORY; 1070 1071 dpc->function = function; 1072 dpc->context = context; 1073 1074 /* 1075 * To prevent lockdep from complaining unnecessarily, make sure that 1076 * there is a different static lockdep key for each workqueue by using 1077 * INIT_WORK() for each of them separately. 1078 */ 1079 if (type == OSL_NOTIFY_HANDLER) { 1080 queue = kacpi_notify_wq; 1081 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1082 } else if (type == OSL_GPE_HANDLER) { 1083 queue = kacpid_wq; 1084 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1085 } else { 1086 pr_err("Unsupported os_execute type %d.\n", type); 1087 status = AE_ERROR; 1088 } 1089 1090 if (ACPI_FAILURE(status)) 1091 goto err_workqueue; 1092 1093 /* 1094 * On some machines, a software-initiated SMI causes corruption unless 1095 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1096 * typically it's done in GPE-related methods that are run via 1097 * workqueues, so we can avoid the known corruption cases by always 1098 * queueing on CPU 0. 1099 */ 1100 ret = queue_work_on(0, queue, &dpc->work); 1101 if (!ret) { 1102 printk(KERN_ERR PREFIX 1103 "Call to queue_work() failed.\n"); 1104 status = AE_ERROR; 1105 } 1106 err_workqueue: 1107 if (ACPI_FAILURE(status)) 1108 kfree(dpc); 1109 out_thread: 1110 return status; 1111 } 1112 EXPORT_SYMBOL(acpi_os_execute); 1113 1114 void acpi_os_wait_events_complete(void) 1115 { 1116 /* 1117 * Make sure the GPE handler or the fixed event handler is not used 1118 * on another CPU after removal. 1119 */ 1120 if (acpi_sci_irq_valid()) 1121 synchronize_hardirq(acpi_sci_irq); 1122 flush_workqueue(kacpid_wq); 1123 flush_workqueue(kacpi_notify_wq); 1124 } 1125 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1126 1127 struct acpi_hp_work { 1128 struct work_struct work; 1129 struct acpi_device *adev; 1130 u32 src; 1131 }; 1132 1133 static void acpi_hotplug_work_fn(struct work_struct *work) 1134 { 1135 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1136 1137 acpi_os_wait_events_complete(); 1138 acpi_device_hotplug(hpw->adev, hpw->src); 1139 kfree(hpw); 1140 } 1141 1142 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1143 { 1144 struct acpi_hp_work *hpw; 1145 1146 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1147 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1148 adev, src)); 1149 1150 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1151 if (!hpw) 1152 return AE_NO_MEMORY; 1153 1154 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1155 hpw->adev = adev; 1156 hpw->src = src; 1157 /* 1158 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1159 * the hotplug code may call driver .remove() functions, which may 1160 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1161 * these workqueues. 1162 */ 1163 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1164 kfree(hpw); 1165 return AE_ERROR; 1166 } 1167 return AE_OK; 1168 } 1169 1170 bool acpi_queue_hotplug_work(struct work_struct *work) 1171 { 1172 return queue_work(kacpi_hotplug_wq, work); 1173 } 1174 1175 acpi_status 1176 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1177 { 1178 struct semaphore *sem = NULL; 1179 1180 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1181 if (!sem) 1182 return AE_NO_MEMORY; 1183 1184 sema_init(sem, initial_units); 1185 1186 *handle = (acpi_handle *) sem; 1187 1188 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1189 *handle, initial_units)); 1190 1191 return AE_OK; 1192 } 1193 1194 /* 1195 * TODO: A better way to delete semaphores? Linux doesn't have a 1196 * 'delete_semaphore()' function -- may result in an invalid 1197 * pointer dereference for non-synchronized consumers. Should 1198 * we at least check for blocked threads and signal/cancel them? 1199 */ 1200 1201 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1202 { 1203 struct semaphore *sem = (struct semaphore *)handle; 1204 1205 if (!sem) 1206 return AE_BAD_PARAMETER; 1207 1208 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1209 1210 BUG_ON(!list_empty(&sem->wait_list)); 1211 kfree(sem); 1212 sem = NULL; 1213 1214 return AE_OK; 1215 } 1216 1217 /* 1218 * TODO: Support for units > 1? 1219 */ 1220 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1221 { 1222 acpi_status status = AE_OK; 1223 struct semaphore *sem = (struct semaphore *)handle; 1224 long jiffies; 1225 int ret = 0; 1226 1227 if (!acpi_os_initialized) 1228 return AE_OK; 1229 1230 if (!sem || (units < 1)) 1231 return AE_BAD_PARAMETER; 1232 1233 if (units > 1) 1234 return AE_SUPPORT; 1235 1236 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1237 handle, units, timeout)); 1238 1239 if (timeout == ACPI_WAIT_FOREVER) 1240 jiffies = MAX_SCHEDULE_TIMEOUT; 1241 else 1242 jiffies = msecs_to_jiffies(timeout); 1243 1244 ret = down_timeout(sem, jiffies); 1245 if (ret) 1246 status = AE_TIME; 1247 1248 if (ACPI_FAILURE(status)) { 1249 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1250 "Failed to acquire semaphore[%p|%d|%d], %s", 1251 handle, units, timeout, 1252 acpi_format_exception(status))); 1253 } else { 1254 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1255 "Acquired semaphore[%p|%d|%d]", handle, 1256 units, timeout)); 1257 } 1258 1259 return status; 1260 } 1261 1262 /* 1263 * TODO: Support for units > 1? 1264 */ 1265 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1266 { 1267 struct semaphore *sem = (struct semaphore *)handle; 1268 1269 if (!acpi_os_initialized) 1270 return AE_OK; 1271 1272 if (!sem || (units < 1)) 1273 return AE_BAD_PARAMETER; 1274 1275 if (units > 1) 1276 return AE_SUPPORT; 1277 1278 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1279 units)); 1280 1281 up(sem); 1282 1283 return AE_OK; 1284 } 1285 1286 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1287 { 1288 #ifdef ENABLE_DEBUGGER 1289 if (acpi_in_debugger) { 1290 u32 chars; 1291 1292 kdb_read(buffer, buffer_length); 1293 1294 /* remove the CR kdb includes */ 1295 chars = strlen(buffer) - 1; 1296 buffer[chars] = '\0'; 1297 } 1298 #else 1299 int ret; 1300 1301 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1302 if (ret < 0) 1303 return AE_ERROR; 1304 if (bytes_read) 1305 *bytes_read = ret; 1306 #endif 1307 1308 return AE_OK; 1309 } 1310 EXPORT_SYMBOL(acpi_os_get_line); 1311 1312 acpi_status acpi_os_wait_command_ready(void) 1313 { 1314 int ret; 1315 1316 ret = acpi_debugger_wait_command_ready(); 1317 if (ret < 0) 1318 return AE_ERROR; 1319 return AE_OK; 1320 } 1321 1322 acpi_status acpi_os_notify_command_complete(void) 1323 { 1324 int ret; 1325 1326 ret = acpi_debugger_notify_command_complete(); 1327 if (ret < 0) 1328 return AE_ERROR; 1329 return AE_OK; 1330 } 1331 1332 acpi_status acpi_os_signal(u32 function, void *info) 1333 { 1334 switch (function) { 1335 case ACPI_SIGNAL_FATAL: 1336 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1337 break; 1338 case ACPI_SIGNAL_BREAKPOINT: 1339 /* 1340 * AML Breakpoint 1341 * ACPI spec. says to treat it as a NOP unless 1342 * you are debugging. So if/when we integrate 1343 * AML debugger into the kernel debugger its 1344 * hook will go here. But until then it is 1345 * not useful to print anything on breakpoints. 1346 */ 1347 break; 1348 default: 1349 break; 1350 } 1351 1352 return AE_OK; 1353 } 1354 1355 static int __init acpi_os_name_setup(char *str) 1356 { 1357 char *p = acpi_os_name; 1358 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1359 1360 if (!str || !*str) 1361 return 0; 1362 1363 for (; count-- && *str; str++) { 1364 if (isalnum(*str) || *str == ' ' || *str == ':') 1365 *p++ = *str; 1366 else if (*str == '\'' || *str == '"') 1367 continue; 1368 else 1369 break; 1370 } 1371 *p = 0; 1372 1373 return 1; 1374 1375 } 1376 1377 __setup("acpi_os_name=", acpi_os_name_setup); 1378 1379 /* 1380 * Disable the auto-serialization of named objects creation methods. 1381 * 1382 * This feature is enabled by default. It marks the AML control methods 1383 * that contain the opcodes to create named objects as "Serialized". 1384 */ 1385 static int __init acpi_no_auto_serialize_setup(char *str) 1386 { 1387 acpi_gbl_auto_serialize_methods = FALSE; 1388 pr_info("ACPI: auto-serialization disabled\n"); 1389 1390 return 1; 1391 } 1392 1393 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1394 1395 /* Check of resource interference between native drivers and ACPI 1396 * OperationRegions (SystemIO and System Memory only). 1397 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1398 * in arbitrary AML code and can interfere with legacy drivers. 1399 * acpi_enforce_resources= can be set to: 1400 * 1401 * - strict (default) (2) 1402 * -> further driver trying to access the resources will not load 1403 * - lax (1) 1404 * -> further driver trying to access the resources will load, but you 1405 * get a system message that something might go wrong... 1406 * 1407 * - no (0) 1408 * -> ACPI Operation Region resources will not be registered 1409 * 1410 */ 1411 #define ENFORCE_RESOURCES_STRICT 2 1412 #define ENFORCE_RESOURCES_LAX 1 1413 #define ENFORCE_RESOURCES_NO 0 1414 1415 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1416 1417 static int __init acpi_enforce_resources_setup(char *str) 1418 { 1419 if (str == NULL || *str == '\0') 1420 return 0; 1421 1422 if (!strcmp("strict", str)) 1423 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1424 else if (!strcmp("lax", str)) 1425 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1426 else if (!strcmp("no", str)) 1427 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1428 1429 return 1; 1430 } 1431 1432 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1433 1434 /* Check for resource conflicts between ACPI OperationRegions and native 1435 * drivers */ 1436 int acpi_check_resource_conflict(const struct resource *res) 1437 { 1438 acpi_adr_space_type space_id; 1439 acpi_size length; 1440 u8 warn = 0; 1441 int clash = 0; 1442 1443 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1444 return 0; 1445 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1446 return 0; 1447 1448 if (res->flags & IORESOURCE_IO) 1449 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1450 else 1451 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1452 1453 length = resource_size(res); 1454 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1455 warn = 1; 1456 clash = acpi_check_address_range(space_id, res->start, length, warn); 1457 1458 if (clash) { 1459 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1460 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1461 printk(KERN_NOTICE "ACPI: This conflict may" 1462 " cause random problems and system" 1463 " instability\n"); 1464 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1465 " for this device, you should use it instead of" 1466 " the native driver\n"); 1467 } 1468 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1469 return -EBUSY; 1470 } 1471 return 0; 1472 } 1473 EXPORT_SYMBOL(acpi_check_resource_conflict); 1474 1475 int acpi_check_region(resource_size_t start, resource_size_t n, 1476 const char *name) 1477 { 1478 struct resource res = { 1479 .start = start, 1480 .end = start + n - 1, 1481 .name = name, 1482 .flags = IORESOURCE_IO, 1483 }; 1484 1485 return acpi_check_resource_conflict(&res); 1486 } 1487 EXPORT_SYMBOL(acpi_check_region); 1488 1489 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, 1490 void *_res, void **return_value) 1491 { 1492 struct acpi_mem_space_context **mem_ctx; 1493 union acpi_operand_object *handler_obj; 1494 union acpi_operand_object *region_obj2; 1495 union acpi_operand_object *region_obj; 1496 struct resource *res = _res; 1497 acpi_status status; 1498 1499 region_obj = acpi_ns_get_attached_object(handle); 1500 if (!region_obj) 1501 return AE_OK; 1502 1503 handler_obj = region_obj->region.handler; 1504 if (!handler_obj) 1505 return AE_OK; 1506 1507 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 1508 return AE_OK; 1509 1510 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) 1511 return AE_OK; 1512 1513 region_obj2 = acpi_ns_get_secondary_object(region_obj); 1514 if (!region_obj2) 1515 return AE_OK; 1516 1517 mem_ctx = (void *)®ion_obj2->extra.region_context; 1518 1519 if (!(mem_ctx[0]->address >= res->start && 1520 mem_ctx[0]->address < res->end)) 1521 return AE_OK; 1522 1523 status = handler_obj->address_space.setup(region_obj, 1524 ACPI_REGION_DEACTIVATE, 1525 NULL, (void **)mem_ctx); 1526 if (ACPI_SUCCESS(status)) 1527 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); 1528 1529 return status; 1530 } 1531 1532 /** 1533 * acpi_release_memory - Release any mappings done to a memory region 1534 * @handle: Handle to namespace node 1535 * @res: Memory resource 1536 * @level: A level that terminates the search 1537 * 1538 * Walks through @handle and unmaps all SystemMemory Operation Regions that 1539 * overlap with @res and that have already been activated (mapped). 1540 * 1541 * This is a helper that allows drivers to place special requirements on memory 1542 * region that may overlap with operation regions, primarily allowing them to 1543 * safely map the region as non-cached memory. 1544 * 1545 * The unmapped Operation Regions will be automatically remapped next time they 1546 * are called, so the drivers do not need to do anything else. 1547 */ 1548 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, 1549 u32 level) 1550 { 1551 if (!(res->flags & IORESOURCE_MEM)) 1552 return AE_TYPE; 1553 1554 return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, 1555 acpi_deactivate_mem_region, NULL, res, NULL); 1556 } 1557 EXPORT_SYMBOL_GPL(acpi_release_memory); 1558 1559 /* 1560 * Let drivers know whether the resource checks are effective 1561 */ 1562 int acpi_resources_are_enforced(void) 1563 { 1564 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1565 } 1566 EXPORT_SYMBOL(acpi_resources_are_enforced); 1567 1568 /* 1569 * Deallocate the memory for a spinlock. 1570 */ 1571 void acpi_os_delete_lock(acpi_spinlock handle) 1572 { 1573 ACPI_FREE(handle); 1574 } 1575 1576 /* 1577 * Acquire a spinlock. 1578 * 1579 * handle is a pointer to the spinlock_t. 1580 */ 1581 1582 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1583 { 1584 acpi_cpu_flags flags; 1585 spin_lock_irqsave(lockp, flags); 1586 return flags; 1587 } 1588 1589 /* 1590 * Release a spinlock. See above. 1591 */ 1592 1593 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1594 { 1595 spin_unlock_irqrestore(lockp, flags); 1596 } 1597 1598 #ifndef ACPI_USE_LOCAL_CACHE 1599 1600 /******************************************************************************* 1601 * 1602 * FUNCTION: acpi_os_create_cache 1603 * 1604 * PARAMETERS: name - Ascii name for the cache 1605 * size - Size of each cached object 1606 * depth - Maximum depth of the cache (in objects) <ignored> 1607 * cache - Where the new cache object is returned 1608 * 1609 * RETURN: status 1610 * 1611 * DESCRIPTION: Create a cache object 1612 * 1613 ******************************************************************************/ 1614 1615 acpi_status 1616 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1617 { 1618 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1619 if (*cache == NULL) 1620 return AE_ERROR; 1621 else 1622 return AE_OK; 1623 } 1624 1625 /******************************************************************************* 1626 * 1627 * FUNCTION: acpi_os_purge_cache 1628 * 1629 * PARAMETERS: Cache - Handle to cache object 1630 * 1631 * RETURN: Status 1632 * 1633 * DESCRIPTION: Free all objects within the requested cache. 1634 * 1635 ******************************************************************************/ 1636 1637 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1638 { 1639 kmem_cache_shrink(cache); 1640 return (AE_OK); 1641 } 1642 1643 /******************************************************************************* 1644 * 1645 * FUNCTION: acpi_os_delete_cache 1646 * 1647 * PARAMETERS: Cache - Handle to cache object 1648 * 1649 * RETURN: Status 1650 * 1651 * DESCRIPTION: Free all objects within the requested cache and delete the 1652 * cache object. 1653 * 1654 ******************************************************************************/ 1655 1656 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1657 { 1658 kmem_cache_destroy(cache); 1659 return (AE_OK); 1660 } 1661 1662 /******************************************************************************* 1663 * 1664 * FUNCTION: acpi_os_release_object 1665 * 1666 * PARAMETERS: Cache - Handle to cache object 1667 * Object - The object to be released 1668 * 1669 * RETURN: None 1670 * 1671 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1672 * the object is deleted. 1673 * 1674 ******************************************************************************/ 1675 1676 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1677 { 1678 kmem_cache_free(cache, object); 1679 return (AE_OK); 1680 } 1681 #endif 1682 1683 static int __init acpi_no_static_ssdt_setup(char *s) 1684 { 1685 acpi_gbl_disable_ssdt_table_install = TRUE; 1686 pr_info("ACPI: static SSDT installation disabled\n"); 1687 1688 return 0; 1689 } 1690 1691 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1692 1693 static int __init acpi_disable_return_repair(char *s) 1694 { 1695 printk(KERN_NOTICE PREFIX 1696 "ACPI: Predefined validation mechanism disabled\n"); 1697 acpi_gbl_disable_auto_repair = TRUE; 1698 1699 return 1; 1700 } 1701 1702 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1703 1704 acpi_status __init acpi_os_initialize(void) 1705 { 1706 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1707 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1708 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1709 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1710 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1711 /* 1712 * Use acpi_os_map_generic_address to pre-map the reset 1713 * register if it's in system memory. 1714 */ 1715 int rv; 1716 1717 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1718 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1719 } 1720 acpi_os_initialized = true; 1721 1722 return AE_OK; 1723 } 1724 1725 acpi_status __init acpi_os_initialize1(void) 1726 { 1727 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1728 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1729 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1730 BUG_ON(!kacpid_wq); 1731 BUG_ON(!kacpi_notify_wq); 1732 BUG_ON(!kacpi_hotplug_wq); 1733 acpi_osi_init(); 1734 return AE_OK; 1735 } 1736 1737 acpi_status acpi_os_terminate(void) 1738 { 1739 if (acpi_irq_handler) { 1740 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1741 acpi_irq_handler); 1742 } 1743 1744 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1745 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1746 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1747 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1748 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1749 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1750 1751 destroy_workqueue(kacpid_wq); 1752 destroy_workqueue(kacpi_notify_wq); 1753 destroy_workqueue(kacpi_hotplug_wq); 1754 1755 return AE_OK; 1756 } 1757 1758 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1759 u32 pm1b_control) 1760 { 1761 int rc = 0; 1762 if (__acpi_os_prepare_sleep) 1763 rc = __acpi_os_prepare_sleep(sleep_state, 1764 pm1a_control, pm1b_control); 1765 if (rc < 0) 1766 return AE_ERROR; 1767 else if (rc > 0) 1768 return AE_CTRL_TERMINATE; 1769 1770 return AE_OK; 1771 } 1772 1773 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1774 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1775 { 1776 __acpi_os_prepare_sleep = func; 1777 } 1778 1779 #if (ACPI_REDUCED_HARDWARE) 1780 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1781 u32 val_b) 1782 { 1783 int rc = 0; 1784 if (__acpi_os_prepare_extended_sleep) 1785 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1786 val_a, val_b); 1787 if (rc < 0) 1788 return AE_ERROR; 1789 else if (rc > 0) 1790 return AE_CTRL_TERMINATE; 1791 1792 return AE_OK; 1793 } 1794 #else 1795 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1796 u32 val_b) 1797 { 1798 return AE_OK; 1799 } 1800 #endif 1801 1802 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1803 u32 val_a, u32 val_b)) 1804 { 1805 __acpi_os_prepare_extended_sleep = func; 1806 } 1807 1808 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1809 u32 reg_a_value, u32 reg_b_value) 1810 { 1811 acpi_status status; 1812 1813 if (acpi_gbl_reduced_hardware) 1814 status = acpi_os_prepare_extended_sleep(sleep_state, 1815 reg_a_value, 1816 reg_b_value); 1817 else 1818 status = acpi_os_prepare_sleep(sleep_state, 1819 reg_a_value, reg_b_value); 1820 return status; 1821 } 1822