1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 4 * 5 * Copyright (C) 2000 Andrew Henroid 6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * Copyright (c) 2008 Intel Corporation 9 * Author: Matthew Wilcox <willy@linux.intel.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/pci.h> 18 #include <linux/interrupt.h> 19 #include <linux/kmod.h> 20 #include <linux/delay.h> 21 #include <linux/workqueue.h> 22 #include <linux/nmi.h> 23 #include <linux/acpi.h> 24 #include <linux/efi.h> 25 #include <linux/ioport.h> 26 #include <linux/list.h> 27 #include <linux/jiffies.h> 28 #include <linux/semaphore.h> 29 30 #include <asm/io.h> 31 #include <linux/uaccess.h> 32 #include <linux/io-64-nonatomic-lo-hi.h> 33 34 #include "acpica/accommon.h" 35 #include "acpica/acnamesp.h" 36 #include "internal.h" 37 38 #define _COMPONENT ACPI_OS_SERVICES 39 ACPI_MODULE_NAME("osl"); 40 41 struct acpi_os_dpc { 42 acpi_osd_exec_callback function; 43 void *context; 44 struct work_struct work; 45 }; 46 47 #ifdef ENABLE_DEBUGGER 48 #include <linux/kdb.h> 49 50 /* stuff for debugger support */ 51 int acpi_in_debugger; 52 EXPORT_SYMBOL(acpi_in_debugger); 53 #endif /*ENABLE_DEBUGGER */ 54 55 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 56 u32 pm1b_ctrl); 57 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 58 u32 val_b); 59 60 static acpi_osd_handler acpi_irq_handler; 61 static void *acpi_irq_context; 62 static struct workqueue_struct *kacpid_wq; 63 static struct workqueue_struct *kacpi_notify_wq; 64 static struct workqueue_struct *kacpi_hotplug_wq; 65 static bool acpi_os_initialized; 66 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 67 bool acpi_permanent_mmap = false; 68 69 /* 70 * This list of permanent mappings is for memory that may be accessed from 71 * interrupt context, where we can't do the ioremap(). 72 */ 73 struct acpi_ioremap { 74 struct list_head list; 75 void __iomem *virt; 76 acpi_physical_address phys; 77 acpi_size size; 78 unsigned long refcount; 79 }; 80 81 static LIST_HEAD(acpi_ioremaps); 82 static DEFINE_MUTEX(acpi_ioremap_lock); 83 84 static void __init acpi_request_region (struct acpi_generic_address *gas, 85 unsigned int length, char *desc) 86 { 87 u64 addr; 88 89 /* Handle possible alignment issues */ 90 memcpy(&addr, &gas->address, sizeof(addr)); 91 if (!addr || !length) 92 return; 93 94 /* Resources are never freed */ 95 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 96 request_region(addr, length, desc); 97 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 98 request_mem_region(addr, length, desc); 99 } 100 101 static int __init acpi_reserve_resources(void) 102 { 103 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 104 "ACPI PM1a_EVT_BLK"); 105 106 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 107 "ACPI PM1b_EVT_BLK"); 108 109 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 110 "ACPI PM1a_CNT_BLK"); 111 112 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 113 "ACPI PM1b_CNT_BLK"); 114 115 if (acpi_gbl_FADT.pm_timer_length == 4) 116 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 117 118 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 119 "ACPI PM2_CNT_BLK"); 120 121 /* Length of GPE blocks must be a non-negative multiple of 2 */ 122 123 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 124 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 125 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 126 127 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 128 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 129 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 130 131 return 0; 132 } 133 fs_initcall_sync(acpi_reserve_resources); 134 135 void acpi_os_printf(const char *fmt, ...) 136 { 137 va_list args; 138 va_start(args, fmt); 139 acpi_os_vprintf(fmt, args); 140 va_end(args); 141 } 142 EXPORT_SYMBOL(acpi_os_printf); 143 144 void acpi_os_vprintf(const char *fmt, va_list args) 145 { 146 static char buffer[512]; 147 148 vsprintf(buffer, fmt, args); 149 150 #ifdef ENABLE_DEBUGGER 151 if (acpi_in_debugger) { 152 kdb_printf("%s", buffer); 153 } else { 154 if (printk_get_level(buffer)) 155 printk("%s", buffer); 156 else 157 printk(KERN_CONT "%s", buffer); 158 } 159 #else 160 if (acpi_debugger_write_log(buffer) < 0) { 161 if (printk_get_level(buffer)) 162 printk("%s", buffer); 163 else 164 printk(KERN_CONT "%s", buffer); 165 } 166 #endif 167 } 168 169 #ifdef CONFIG_KEXEC 170 static unsigned long acpi_rsdp; 171 static int __init setup_acpi_rsdp(char *arg) 172 { 173 return kstrtoul(arg, 16, &acpi_rsdp); 174 } 175 early_param("acpi_rsdp", setup_acpi_rsdp); 176 #endif 177 178 acpi_physical_address __init acpi_os_get_root_pointer(void) 179 { 180 acpi_physical_address pa; 181 182 #ifdef CONFIG_KEXEC 183 if (acpi_rsdp) 184 return acpi_rsdp; 185 #endif 186 pa = acpi_arch_get_root_pointer(); 187 if (pa) 188 return pa; 189 190 if (efi_enabled(EFI_CONFIG_TABLES)) { 191 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 192 return efi.acpi20; 193 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 194 return efi.acpi; 195 pr_err(PREFIX "System description tables not found\n"); 196 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 197 acpi_find_root_pointer(&pa); 198 } 199 200 return pa; 201 } 202 203 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 204 static struct acpi_ioremap * 205 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 206 { 207 struct acpi_ioremap *map; 208 209 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 210 if (map->phys <= phys && 211 phys + size <= map->phys + map->size) 212 return map; 213 214 return NULL; 215 } 216 217 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 218 static void __iomem * 219 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 220 { 221 struct acpi_ioremap *map; 222 223 map = acpi_map_lookup(phys, size); 224 if (map) 225 return map->virt + (phys - map->phys); 226 227 return NULL; 228 } 229 230 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 231 { 232 struct acpi_ioremap *map; 233 void __iomem *virt = NULL; 234 235 mutex_lock(&acpi_ioremap_lock); 236 map = acpi_map_lookup(phys, size); 237 if (map) { 238 virt = map->virt + (phys - map->phys); 239 map->refcount++; 240 } 241 mutex_unlock(&acpi_ioremap_lock); 242 return virt; 243 } 244 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 245 246 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 247 static struct acpi_ioremap * 248 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 249 { 250 struct acpi_ioremap *map; 251 252 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 253 if (map->virt <= virt && 254 virt + size <= map->virt + map->size) 255 return map; 256 257 return NULL; 258 } 259 260 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 261 /* ioremap will take care of cache attributes */ 262 #define should_use_kmap(pfn) 0 263 #else 264 #define should_use_kmap(pfn) page_is_ram(pfn) 265 #endif 266 267 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 268 { 269 unsigned long pfn; 270 271 pfn = pg_off >> PAGE_SHIFT; 272 if (should_use_kmap(pfn)) { 273 if (pg_sz > PAGE_SIZE) 274 return NULL; 275 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 276 } else 277 return acpi_os_ioremap(pg_off, pg_sz); 278 } 279 280 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 281 { 282 unsigned long pfn; 283 284 pfn = pg_off >> PAGE_SHIFT; 285 if (should_use_kmap(pfn)) 286 kunmap(pfn_to_page(pfn)); 287 else 288 iounmap(vaddr); 289 } 290 291 /** 292 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 293 * @phys: Start of the physical address range to map. 294 * @size: Size of the physical address range to map. 295 * 296 * Look up the given physical address range in the list of existing ACPI memory 297 * mappings. If found, get a reference to it and return a pointer to it (its 298 * virtual address). If not found, map it, add it to that list and return a 299 * pointer to it. 300 * 301 * During early init (when acpi_permanent_mmap has not been set yet) this 302 * routine simply calls __acpi_map_table() to get the job done. 303 */ 304 void __iomem *__ref 305 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 306 { 307 struct acpi_ioremap *map; 308 void __iomem *virt; 309 acpi_physical_address pg_off; 310 acpi_size pg_sz; 311 312 if (phys > ULONG_MAX) { 313 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 314 return NULL; 315 } 316 317 if (!acpi_permanent_mmap) 318 return __acpi_map_table((unsigned long)phys, size); 319 320 mutex_lock(&acpi_ioremap_lock); 321 /* Check if there's a suitable mapping already. */ 322 map = acpi_map_lookup(phys, size); 323 if (map) { 324 map->refcount++; 325 goto out; 326 } 327 328 map = kzalloc(sizeof(*map), GFP_KERNEL); 329 if (!map) { 330 mutex_unlock(&acpi_ioremap_lock); 331 return NULL; 332 } 333 334 pg_off = round_down(phys, PAGE_SIZE); 335 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 336 virt = acpi_map(pg_off, pg_sz); 337 if (!virt) { 338 mutex_unlock(&acpi_ioremap_lock); 339 kfree(map); 340 return NULL; 341 } 342 343 INIT_LIST_HEAD(&map->list); 344 map->virt = virt; 345 map->phys = pg_off; 346 map->size = pg_sz; 347 map->refcount = 1; 348 349 list_add_tail_rcu(&map->list, &acpi_ioremaps); 350 351 out: 352 mutex_unlock(&acpi_ioremap_lock); 353 return map->virt + (phys - map->phys); 354 } 355 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 356 357 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 358 { 359 return (void *)acpi_os_map_iomem(phys, size); 360 } 361 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 362 363 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 364 { 365 if (!--map->refcount) 366 list_del_rcu(&map->list); 367 } 368 369 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 370 { 371 if (!map->refcount) { 372 synchronize_rcu_expedited(); 373 acpi_unmap(map->phys, map->virt); 374 kfree(map); 375 } 376 } 377 378 /** 379 * acpi_os_unmap_iomem - Drop a memory mapping reference. 380 * @virt: Start of the address range to drop a reference to. 381 * @size: Size of the address range to drop a reference to. 382 * 383 * Look up the given virtual address range in the list of existing ACPI memory 384 * mappings, drop a reference to it and unmap it if there are no more active 385 * references to it. 386 * 387 * During early init (when acpi_permanent_mmap has not been set yet) this 388 * routine simply calls __acpi_unmap_table() to get the job done. Since 389 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 390 * here. 391 */ 392 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 393 { 394 struct acpi_ioremap *map; 395 396 if (!acpi_permanent_mmap) { 397 __acpi_unmap_table(virt, size); 398 return; 399 } 400 401 mutex_lock(&acpi_ioremap_lock); 402 map = acpi_map_lookup_virt(virt, size); 403 if (!map) { 404 mutex_unlock(&acpi_ioremap_lock); 405 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 406 return; 407 } 408 acpi_os_drop_map_ref(map); 409 mutex_unlock(&acpi_ioremap_lock); 410 411 acpi_os_map_cleanup(map); 412 } 413 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 414 415 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 416 { 417 return acpi_os_unmap_iomem((void __iomem *)virt, size); 418 } 419 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 420 421 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 422 { 423 u64 addr; 424 void __iomem *virt; 425 426 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 427 return 0; 428 429 /* Handle possible alignment issues */ 430 memcpy(&addr, &gas->address, sizeof(addr)); 431 if (!addr || !gas->bit_width) 432 return -EINVAL; 433 434 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 435 if (!virt) 436 return -EIO; 437 438 return 0; 439 } 440 EXPORT_SYMBOL(acpi_os_map_generic_address); 441 442 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 443 { 444 u64 addr; 445 struct acpi_ioremap *map; 446 447 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 448 return; 449 450 /* Handle possible alignment issues */ 451 memcpy(&addr, &gas->address, sizeof(addr)); 452 if (!addr || !gas->bit_width) 453 return; 454 455 mutex_lock(&acpi_ioremap_lock); 456 map = acpi_map_lookup(addr, gas->bit_width / 8); 457 if (!map) { 458 mutex_unlock(&acpi_ioremap_lock); 459 return; 460 } 461 acpi_os_drop_map_ref(map); 462 mutex_unlock(&acpi_ioremap_lock); 463 464 acpi_os_map_cleanup(map); 465 } 466 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 467 468 #ifdef ACPI_FUTURE_USAGE 469 acpi_status 470 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 471 { 472 if (!phys || !virt) 473 return AE_BAD_PARAMETER; 474 475 *phys = virt_to_phys(virt); 476 477 return AE_OK; 478 } 479 #endif 480 481 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 482 static bool acpi_rev_override; 483 484 int __init acpi_rev_override_setup(char *str) 485 { 486 acpi_rev_override = true; 487 return 1; 488 } 489 __setup("acpi_rev_override", acpi_rev_override_setup); 490 #else 491 #define acpi_rev_override false 492 #endif 493 494 #define ACPI_MAX_OVERRIDE_LEN 100 495 496 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 497 498 acpi_status 499 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 500 acpi_string *new_val) 501 { 502 if (!init_val || !new_val) 503 return AE_BAD_PARAMETER; 504 505 *new_val = NULL; 506 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 507 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 508 acpi_os_name); 509 *new_val = acpi_os_name; 510 } 511 512 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 513 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 514 *new_val = (char *)5; 515 } 516 517 return AE_OK; 518 } 519 520 static irqreturn_t acpi_irq(int irq, void *dev_id) 521 { 522 u32 handled; 523 524 handled = (*acpi_irq_handler) (acpi_irq_context); 525 526 if (handled) { 527 acpi_irq_handled++; 528 return IRQ_HANDLED; 529 } else { 530 acpi_irq_not_handled++; 531 return IRQ_NONE; 532 } 533 } 534 535 acpi_status 536 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 537 void *context) 538 { 539 unsigned int irq; 540 541 acpi_irq_stats_init(); 542 543 /* 544 * ACPI interrupts different from the SCI in our copy of the FADT are 545 * not supported. 546 */ 547 if (gsi != acpi_gbl_FADT.sci_interrupt) 548 return AE_BAD_PARAMETER; 549 550 if (acpi_irq_handler) 551 return AE_ALREADY_ACQUIRED; 552 553 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 554 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 555 gsi); 556 return AE_OK; 557 } 558 559 acpi_irq_handler = handler; 560 acpi_irq_context = context; 561 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 562 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 563 acpi_irq_handler = NULL; 564 return AE_NOT_ACQUIRED; 565 } 566 acpi_sci_irq = irq; 567 568 return AE_OK; 569 } 570 571 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 572 { 573 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 574 return AE_BAD_PARAMETER; 575 576 free_irq(acpi_sci_irq, acpi_irq); 577 acpi_irq_handler = NULL; 578 acpi_sci_irq = INVALID_ACPI_IRQ; 579 580 return AE_OK; 581 } 582 583 /* 584 * Running in interpreter thread context, safe to sleep 585 */ 586 587 void acpi_os_sleep(u64 ms) 588 { 589 msleep(ms); 590 } 591 592 void acpi_os_stall(u32 us) 593 { 594 while (us) { 595 u32 delay = 1000; 596 597 if (delay > us) 598 delay = us; 599 udelay(delay); 600 touch_nmi_watchdog(); 601 us -= delay; 602 } 603 } 604 605 /* 606 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 607 * monotonically increasing timer with 100ns granularity. Do not use 608 * ktime_get() to implement this function because this function may get 609 * called after timekeeping has been suspended. Note: calling this function 610 * after timekeeping has been suspended may lead to unexpected results 611 * because when timekeeping is suspended the jiffies counter is not 612 * incremented. See also timekeeping_suspend(). 613 */ 614 u64 acpi_os_get_timer(void) 615 { 616 return (get_jiffies_64() - INITIAL_JIFFIES) * 617 (ACPI_100NSEC_PER_SEC / HZ); 618 } 619 620 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 621 { 622 u32 dummy; 623 624 if (!value) 625 value = &dummy; 626 627 *value = 0; 628 if (width <= 8) { 629 *(u8 *) value = inb(port); 630 } else if (width <= 16) { 631 *(u16 *) value = inw(port); 632 } else if (width <= 32) { 633 *(u32 *) value = inl(port); 634 } else { 635 BUG(); 636 } 637 638 return AE_OK; 639 } 640 641 EXPORT_SYMBOL(acpi_os_read_port); 642 643 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 644 { 645 if (width <= 8) { 646 outb(value, port); 647 } else if (width <= 16) { 648 outw(value, port); 649 } else if (width <= 32) { 650 outl(value, port); 651 } else { 652 BUG(); 653 } 654 655 return AE_OK; 656 } 657 658 EXPORT_SYMBOL(acpi_os_write_port); 659 660 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 661 { 662 663 switch (width) { 664 case 8: 665 *(u8 *) value = readb(virt_addr); 666 break; 667 case 16: 668 *(u16 *) value = readw(virt_addr); 669 break; 670 case 32: 671 *(u32 *) value = readl(virt_addr); 672 break; 673 case 64: 674 *(u64 *) value = readq(virt_addr); 675 break; 676 default: 677 return -EINVAL; 678 } 679 680 return 0; 681 } 682 683 acpi_status 684 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 685 { 686 void __iomem *virt_addr; 687 unsigned int size = width / 8; 688 bool unmap = false; 689 u64 dummy; 690 int error; 691 692 rcu_read_lock(); 693 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 694 if (!virt_addr) { 695 rcu_read_unlock(); 696 virt_addr = acpi_os_ioremap(phys_addr, size); 697 if (!virt_addr) 698 return AE_BAD_ADDRESS; 699 unmap = true; 700 } 701 702 if (!value) 703 value = &dummy; 704 705 error = acpi_os_read_iomem(virt_addr, value, width); 706 BUG_ON(error); 707 708 if (unmap) 709 iounmap(virt_addr); 710 else 711 rcu_read_unlock(); 712 713 return AE_OK; 714 } 715 716 acpi_status 717 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 718 { 719 void __iomem *virt_addr; 720 unsigned int size = width / 8; 721 bool unmap = false; 722 723 rcu_read_lock(); 724 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 725 if (!virt_addr) { 726 rcu_read_unlock(); 727 virt_addr = acpi_os_ioremap(phys_addr, size); 728 if (!virt_addr) 729 return AE_BAD_ADDRESS; 730 unmap = true; 731 } 732 733 switch (width) { 734 case 8: 735 writeb(value, virt_addr); 736 break; 737 case 16: 738 writew(value, virt_addr); 739 break; 740 case 32: 741 writel(value, virt_addr); 742 break; 743 case 64: 744 writeq(value, virt_addr); 745 break; 746 default: 747 BUG(); 748 } 749 750 if (unmap) 751 iounmap(virt_addr); 752 else 753 rcu_read_unlock(); 754 755 return AE_OK; 756 } 757 758 #ifdef CONFIG_PCI 759 acpi_status 760 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 761 u64 *value, u32 width) 762 { 763 int result, size; 764 u32 value32; 765 766 if (!value) 767 return AE_BAD_PARAMETER; 768 769 switch (width) { 770 case 8: 771 size = 1; 772 break; 773 case 16: 774 size = 2; 775 break; 776 case 32: 777 size = 4; 778 break; 779 default: 780 return AE_ERROR; 781 } 782 783 result = raw_pci_read(pci_id->segment, pci_id->bus, 784 PCI_DEVFN(pci_id->device, pci_id->function), 785 reg, size, &value32); 786 *value = value32; 787 788 return (result ? AE_ERROR : AE_OK); 789 } 790 791 acpi_status 792 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 793 u64 value, u32 width) 794 { 795 int result, size; 796 797 switch (width) { 798 case 8: 799 size = 1; 800 break; 801 case 16: 802 size = 2; 803 break; 804 case 32: 805 size = 4; 806 break; 807 default: 808 return AE_ERROR; 809 } 810 811 result = raw_pci_write(pci_id->segment, pci_id->bus, 812 PCI_DEVFN(pci_id->device, pci_id->function), 813 reg, size, value); 814 815 return (result ? AE_ERROR : AE_OK); 816 } 817 #endif 818 819 static void acpi_os_execute_deferred(struct work_struct *work) 820 { 821 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 822 823 dpc->function(dpc->context); 824 kfree(dpc); 825 } 826 827 #ifdef CONFIG_ACPI_DEBUGGER 828 static struct acpi_debugger acpi_debugger; 829 static bool acpi_debugger_initialized; 830 831 int acpi_register_debugger(struct module *owner, 832 const struct acpi_debugger_ops *ops) 833 { 834 int ret = 0; 835 836 mutex_lock(&acpi_debugger.lock); 837 if (acpi_debugger.ops) { 838 ret = -EBUSY; 839 goto err_lock; 840 } 841 842 acpi_debugger.owner = owner; 843 acpi_debugger.ops = ops; 844 845 err_lock: 846 mutex_unlock(&acpi_debugger.lock); 847 return ret; 848 } 849 EXPORT_SYMBOL(acpi_register_debugger); 850 851 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 852 { 853 mutex_lock(&acpi_debugger.lock); 854 if (ops == acpi_debugger.ops) { 855 acpi_debugger.ops = NULL; 856 acpi_debugger.owner = NULL; 857 } 858 mutex_unlock(&acpi_debugger.lock); 859 } 860 EXPORT_SYMBOL(acpi_unregister_debugger); 861 862 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 863 { 864 int ret; 865 int (*func)(acpi_osd_exec_callback, void *); 866 struct module *owner; 867 868 if (!acpi_debugger_initialized) 869 return -ENODEV; 870 mutex_lock(&acpi_debugger.lock); 871 if (!acpi_debugger.ops) { 872 ret = -ENODEV; 873 goto err_lock; 874 } 875 if (!try_module_get(acpi_debugger.owner)) { 876 ret = -ENODEV; 877 goto err_lock; 878 } 879 func = acpi_debugger.ops->create_thread; 880 owner = acpi_debugger.owner; 881 mutex_unlock(&acpi_debugger.lock); 882 883 ret = func(function, context); 884 885 mutex_lock(&acpi_debugger.lock); 886 module_put(owner); 887 err_lock: 888 mutex_unlock(&acpi_debugger.lock); 889 return ret; 890 } 891 892 ssize_t acpi_debugger_write_log(const char *msg) 893 { 894 ssize_t ret; 895 ssize_t (*func)(const char *); 896 struct module *owner; 897 898 if (!acpi_debugger_initialized) 899 return -ENODEV; 900 mutex_lock(&acpi_debugger.lock); 901 if (!acpi_debugger.ops) { 902 ret = -ENODEV; 903 goto err_lock; 904 } 905 if (!try_module_get(acpi_debugger.owner)) { 906 ret = -ENODEV; 907 goto err_lock; 908 } 909 func = acpi_debugger.ops->write_log; 910 owner = acpi_debugger.owner; 911 mutex_unlock(&acpi_debugger.lock); 912 913 ret = func(msg); 914 915 mutex_lock(&acpi_debugger.lock); 916 module_put(owner); 917 err_lock: 918 mutex_unlock(&acpi_debugger.lock); 919 return ret; 920 } 921 922 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 923 { 924 ssize_t ret; 925 ssize_t (*func)(char *, size_t); 926 struct module *owner; 927 928 if (!acpi_debugger_initialized) 929 return -ENODEV; 930 mutex_lock(&acpi_debugger.lock); 931 if (!acpi_debugger.ops) { 932 ret = -ENODEV; 933 goto err_lock; 934 } 935 if (!try_module_get(acpi_debugger.owner)) { 936 ret = -ENODEV; 937 goto err_lock; 938 } 939 func = acpi_debugger.ops->read_cmd; 940 owner = acpi_debugger.owner; 941 mutex_unlock(&acpi_debugger.lock); 942 943 ret = func(buffer, buffer_length); 944 945 mutex_lock(&acpi_debugger.lock); 946 module_put(owner); 947 err_lock: 948 mutex_unlock(&acpi_debugger.lock); 949 return ret; 950 } 951 952 int acpi_debugger_wait_command_ready(void) 953 { 954 int ret; 955 int (*func)(bool, char *, size_t); 956 struct module *owner; 957 958 if (!acpi_debugger_initialized) 959 return -ENODEV; 960 mutex_lock(&acpi_debugger.lock); 961 if (!acpi_debugger.ops) { 962 ret = -ENODEV; 963 goto err_lock; 964 } 965 if (!try_module_get(acpi_debugger.owner)) { 966 ret = -ENODEV; 967 goto err_lock; 968 } 969 func = acpi_debugger.ops->wait_command_ready; 970 owner = acpi_debugger.owner; 971 mutex_unlock(&acpi_debugger.lock); 972 973 ret = func(acpi_gbl_method_executing, 974 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 975 976 mutex_lock(&acpi_debugger.lock); 977 module_put(owner); 978 err_lock: 979 mutex_unlock(&acpi_debugger.lock); 980 return ret; 981 } 982 983 int acpi_debugger_notify_command_complete(void) 984 { 985 int ret; 986 int (*func)(void); 987 struct module *owner; 988 989 if (!acpi_debugger_initialized) 990 return -ENODEV; 991 mutex_lock(&acpi_debugger.lock); 992 if (!acpi_debugger.ops) { 993 ret = -ENODEV; 994 goto err_lock; 995 } 996 if (!try_module_get(acpi_debugger.owner)) { 997 ret = -ENODEV; 998 goto err_lock; 999 } 1000 func = acpi_debugger.ops->notify_command_complete; 1001 owner = acpi_debugger.owner; 1002 mutex_unlock(&acpi_debugger.lock); 1003 1004 ret = func(); 1005 1006 mutex_lock(&acpi_debugger.lock); 1007 module_put(owner); 1008 err_lock: 1009 mutex_unlock(&acpi_debugger.lock); 1010 return ret; 1011 } 1012 1013 int __init acpi_debugger_init(void) 1014 { 1015 mutex_init(&acpi_debugger.lock); 1016 acpi_debugger_initialized = true; 1017 return 0; 1018 } 1019 #endif 1020 1021 /******************************************************************************* 1022 * 1023 * FUNCTION: acpi_os_execute 1024 * 1025 * PARAMETERS: Type - Type of the callback 1026 * Function - Function to be executed 1027 * Context - Function parameters 1028 * 1029 * RETURN: Status 1030 * 1031 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1032 * immediately executes function on a separate thread. 1033 * 1034 ******************************************************************************/ 1035 1036 acpi_status acpi_os_execute(acpi_execute_type type, 1037 acpi_osd_exec_callback function, void *context) 1038 { 1039 acpi_status status = AE_OK; 1040 struct acpi_os_dpc *dpc; 1041 struct workqueue_struct *queue; 1042 int ret; 1043 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1044 "Scheduling function [%p(%p)] for deferred execution.\n", 1045 function, context)); 1046 1047 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1048 ret = acpi_debugger_create_thread(function, context); 1049 if (ret) { 1050 pr_err("Call to kthread_create() failed.\n"); 1051 status = AE_ERROR; 1052 } 1053 goto out_thread; 1054 } 1055 1056 /* 1057 * Allocate/initialize DPC structure. Note that this memory will be 1058 * freed by the callee. The kernel handles the work_struct list in a 1059 * way that allows us to also free its memory inside the callee. 1060 * Because we may want to schedule several tasks with different 1061 * parameters we can't use the approach some kernel code uses of 1062 * having a static work_struct. 1063 */ 1064 1065 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1066 if (!dpc) 1067 return AE_NO_MEMORY; 1068 1069 dpc->function = function; 1070 dpc->context = context; 1071 1072 /* 1073 * To prevent lockdep from complaining unnecessarily, make sure that 1074 * there is a different static lockdep key for each workqueue by using 1075 * INIT_WORK() for each of them separately. 1076 */ 1077 if (type == OSL_NOTIFY_HANDLER) { 1078 queue = kacpi_notify_wq; 1079 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1080 } else if (type == OSL_GPE_HANDLER) { 1081 queue = kacpid_wq; 1082 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1083 } else { 1084 pr_err("Unsupported os_execute type %d.\n", type); 1085 status = AE_ERROR; 1086 } 1087 1088 if (ACPI_FAILURE(status)) 1089 goto err_workqueue; 1090 1091 /* 1092 * On some machines, a software-initiated SMI causes corruption unless 1093 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1094 * typically it's done in GPE-related methods that are run via 1095 * workqueues, so we can avoid the known corruption cases by always 1096 * queueing on CPU 0. 1097 */ 1098 ret = queue_work_on(0, queue, &dpc->work); 1099 if (!ret) { 1100 printk(KERN_ERR PREFIX 1101 "Call to queue_work() failed.\n"); 1102 status = AE_ERROR; 1103 } 1104 err_workqueue: 1105 if (ACPI_FAILURE(status)) 1106 kfree(dpc); 1107 out_thread: 1108 return status; 1109 } 1110 EXPORT_SYMBOL(acpi_os_execute); 1111 1112 void acpi_os_wait_events_complete(void) 1113 { 1114 /* 1115 * Make sure the GPE handler or the fixed event handler is not used 1116 * on another CPU after removal. 1117 */ 1118 if (acpi_sci_irq_valid()) 1119 synchronize_hardirq(acpi_sci_irq); 1120 flush_workqueue(kacpid_wq); 1121 flush_workqueue(kacpi_notify_wq); 1122 } 1123 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1124 1125 struct acpi_hp_work { 1126 struct work_struct work; 1127 struct acpi_device *adev; 1128 u32 src; 1129 }; 1130 1131 static void acpi_hotplug_work_fn(struct work_struct *work) 1132 { 1133 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1134 1135 acpi_os_wait_events_complete(); 1136 acpi_device_hotplug(hpw->adev, hpw->src); 1137 kfree(hpw); 1138 } 1139 1140 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1141 { 1142 struct acpi_hp_work *hpw; 1143 1144 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1145 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1146 adev, src)); 1147 1148 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1149 if (!hpw) 1150 return AE_NO_MEMORY; 1151 1152 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1153 hpw->adev = adev; 1154 hpw->src = src; 1155 /* 1156 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1157 * the hotplug code may call driver .remove() functions, which may 1158 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1159 * these workqueues. 1160 */ 1161 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1162 kfree(hpw); 1163 return AE_ERROR; 1164 } 1165 return AE_OK; 1166 } 1167 1168 bool acpi_queue_hotplug_work(struct work_struct *work) 1169 { 1170 return queue_work(kacpi_hotplug_wq, work); 1171 } 1172 1173 acpi_status 1174 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1175 { 1176 struct semaphore *sem = NULL; 1177 1178 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1179 if (!sem) 1180 return AE_NO_MEMORY; 1181 1182 sema_init(sem, initial_units); 1183 1184 *handle = (acpi_handle *) sem; 1185 1186 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1187 *handle, initial_units)); 1188 1189 return AE_OK; 1190 } 1191 1192 /* 1193 * TODO: A better way to delete semaphores? Linux doesn't have a 1194 * 'delete_semaphore()' function -- may result in an invalid 1195 * pointer dereference for non-synchronized consumers. Should 1196 * we at least check for blocked threads and signal/cancel them? 1197 */ 1198 1199 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1200 { 1201 struct semaphore *sem = (struct semaphore *)handle; 1202 1203 if (!sem) 1204 return AE_BAD_PARAMETER; 1205 1206 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1207 1208 BUG_ON(!list_empty(&sem->wait_list)); 1209 kfree(sem); 1210 sem = NULL; 1211 1212 return AE_OK; 1213 } 1214 1215 /* 1216 * TODO: Support for units > 1? 1217 */ 1218 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1219 { 1220 acpi_status status = AE_OK; 1221 struct semaphore *sem = (struct semaphore *)handle; 1222 long jiffies; 1223 int ret = 0; 1224 1225 if (!acpi_os_initialized) 1226 return AE_OK; 1227 1228 if (!sem || (units < 1)) 1229 return AE_BAD_PARAMETER; 1230 1231 if (units > 1) 1232 return AE_SUPPORT; 1233 1234 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1235 handle, units, timeout)); 1236 1237 if (timeout == ACPI_WAIT_FOREVER) 1238 jiffies = MAX_SCHEDULE_TIMEOUT; 1239 else 1240 jiffies = msecs_to_jiffies(timeout); 1241 1242 ret = down_timeout(sem, jiffies); 1243 if (ret) 1244 status = AE_TIME; 1245 1246 if (ACPI_FAILURE(status)) { 1247 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1248 "Failed to acquire semaphore[%p|%d|%d], %s", 1249 handle, units, timeout, 1250 acpi_format_exception(status))); 1251 } else { 1252 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1253 "Acquired semaphore[%p|%d|%d]", handle, 1254 units, timeout)); 1255 } 1256 1257 return status; 1258 } 1259 1260 /* 1261 * TODO: Support for units > 1? 1262 */ 1263 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1264 { 1265 struct semaphore *sem = (struct semaphore *)handle; 1266 1267 if (!acpi_os_initialized) 1268 return AE_OK; 1269 1270 if (!sem || (units < 1)) 1271 return AE_BAD_PARAMETER; 1272 1273 if (units > 1) 1274 return AE_SUPPORT; 1275 1276 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1277 units)); 1278 1279 up(sem); 1280 1281 return AE_OK; 1282 } 1283 1284 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1285 { 1286 #ifdef ENABLE_DEBUGGER 1287 if (acpi_in_debugger) { 1288 u32 chars; 1289 1290 kdb_read(buffer, buffer_length); 1291 1292 /* remove the CR kdb includes */ 1293 chars = strlen(buffer) - 1; 1294 buffer[chars] = '\0'; 1295 } 1296 #else 1297 int ret; 1298 1299 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1300 if (ret < 0) 1301 return AE_ERROR; 1302 if (bytes_read) 1303 *bytes_read = ret; 1304 #endif 1305 1306 return AE_OK; 1307 } 1308 EXPORT_SYMBOL(acpi_os_get_line); 1309 1310 acpi_status acpi_os_wait_command_ready(void) 1311 { 1312 int ret; 1313 1314 ret = acpi_debugger_wait_command_ready(); 1315 if (ret < 0) 1316 return AE_ERROR; 1317 return AE_OK; 1318 } 1319 1320 acpi_status acpi_os_notify_command_complete(void) 1321 { 1322 int ret; 1323 1324 ret = acpi_debugger_notify_command_complete(); 1325 if (ret < 0) 1326 return AE_ERROR; 1327 return AE_OK; 1328 } 1329 1330 acpi_status acpi_os_signal(u32 function, void *info) 1331 { 1332 switch (function) { 1333 case ACPI_SIGNAL_FATAL: 1334 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1335 break; 1336 case ACPI_SIGNAL_BREAKPOINT: 1337 /* 1338 * AML Breakpoint 1339 * ACPI spec. says to treat it as a NOP unless 1340 * you are debugging. So if/when we integrate 1341 * AML debugger into the kernel debugger its 1342 * hook will go here. But until then it is 1343 * not useful to print anything on breakpoints. 1344 */ 1345 break; 1346 default: 1347 break; 1348 } 1349 1350 return AE_OK; 1351 } 1352 1353 static int __init acpi_os_name_setup(char *str) 1354 { 1355 char *p = acpi_os_name; 1356 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1357 1358 if (!str || !*str) 1359 return 0; 1360 1361 for (; count-- && *str; str++) { 1362 if (isalnum(*str) || *str == ' ' || *str == ':') 1363 *p++ = *str; 1364 else if (*str == '\'' || *str == '"') 1365 continue; 1366 else 1367 break; 1368 } 1369 *p = 0; 1370 1371 return 1; 1372 1373 } 1374 1375 __setup("acpi_os_name=", acpi_os_name_setup); 1376 1377 /* 1378 * Disable the auto-serialization of named objects creation methods. 1379 * 1380 * This feature is enabled by default. It marks the AML control methods 1381 * that contain the opcodes to create named objects as "Serialized". 1382 */ 1383 static int __init acpi_no_auto_serialize_setup(char *str) 1384 { 1385 acpi_gbl_auto_serialize_methods = FALSE; 1386 pr_info("ACPI: auto-serialization disabled\n"); 1387 1388 return 1; 1389 } 1390 1391 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1392 1393 /* Check of resource interference between native drivers and ACPI 1394 * OperationRegions (SystemIO and System Memory only). 1395 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1396 * in arbitrary AML code and can interfere with legacy drivers. 1397 * acpi_enforce_resources= can be set to: 1398 * 1399 * - strict (default) (2) 1400 * -> further driver trying to access the resources will not load 1401 * - lax (1) 1402 * -> further driver trying to access the resources will load, but you 1403 * get a system message that something might go wrong... 1404 * 1405 * - no (0) 1406 * -> ACPI Operation Region resources will not be registered 1407 * 1408 */ 1409 #define ENFORCE_RESOURCES_STRICT 2 1410 #define ENFORCE_RESOURCES_LAX 1 1411 #define ENFORCE_RESOURCES_NO 0 1412 1413 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1414 1415 static int __init acpi_enforce_resources_setup(char *str) 1416 { 1417 if (str == NULL || *str == '\0') 1418 return 0; 1419 1420 if (!strcmp("strict", str)) 1421 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1422 else if (!strcmp("lax", str)) 1423 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1424 else if (!strcmp("no", str)) 1425 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1426 1427 return 1; 1428 } 1429 1430 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1431 1432 /* Check for resource conflicts between ACPI OperationRegions and native 1433 * drivers */ 1434 int acpi_check_resource_conflict(const struct resource *res) 1435 { 1436 acpi_adr_space_type space_id; 1437 acpi_size length; 1438 u8 warn = 0; 1439 int clash = 0; 1440 1441 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1442 return 0; 1443 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1444 return 0; 1445 1446 if (res->flags & IORESOURCE_IO) 1447 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1448 else 1449 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1450 1451 length = resource_size(res); 1452 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1453 warn = 1; 1454 clash = acpi_check_address_range(space_id, res->start, length, warn); 1455 1456 if (clash) { 1457 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1458 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1459 printk(KERN_NOTICE "ACPI: This conflict may" 1460 " cause random problems and system" 1461 " instability\n"); 1462 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1463 " for this device, you should use it instead of" 1464 " the native driver\n"); 1465 } 1466 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1467 return -EBUSY; 1468 } 1469 return 0; 1470 } 1471 EXPORT_SYMBOL(acpi_check_resource_conflict); 1472 1473 int acpi_check_region(resource_size_t start, resource_size_t n, 1474 const char *name) 1475 { 1476 struct resource res = { 1477 .start = start, 1478 .end = start + n - 1, 1479 .name = name, 1480 .flags = IORESOURCE_IO, 1481 }; 1482 1483 return acpi_check_resource_conflict(&res); 1484 } 1485 EXPORT_SYMBOL(acpi_check_region); 1486 1487 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, 1488 void *_res, void **return_value) 1489 { 1490 struct acpi_mem_space_context **mem_ctx; 1491 union acpi_operand_object *handler_obj; 1492 union acpi_operand_object *region_obj2; 1493 union acpi_operand_object *region_obj; 1494 struct resource *res = _res; 1495 acpi_status status; 1496 1497 region_obj = acpi_ns_get_attached_object(handle); 1498 if (!region_obj) 1499 return AE_OK; 1500 1501 handler_obj = region_obj->region.handler; 1502 if (!handler_obj) 1503 return AE_OK; 1504 1505 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 1506 return AE_OK; 1507 1508 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) 1509 return AE_OK; 1510 1511 region_obj2 = acpi_ns_get_secondary_object(region_obj); 1512 if (!region_obj2) 1513 return AE_OK; 1514 1515 mem_ctx = (void *)®ion_obj2->extra.region_context; 1516 1517 if (!(mem_ctx[0]->address >= res->start && 1518 mem_ctx[0]->address < res->end)) 1519 return AE_OK; 1520 1521 status = handler_obj->address_space.setup(region_obj, 1522 ACPI_REGION_DEACTIVATE, 1523 NULL, (void **)mem_ctx); 1524 if (ACPI_SUCCESS(status)) 1525 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); 1526 1527 return status; 1528 } 1529 1530 /** 1531 * acpi_release_memory - Release any mappings done to a memory region 1532 * @handle: Handle to namespace node 1533 * @res: Memory resource 1534 * @level: A level that terminates the search 1535 * 1536 * Walks through @handle and unmaps all SystemMemory Operation Regions that 1537 * overlap with @res and that have already been activated (mapped). 1538 * 1539 * This is a helper that allows drivers to place special requirements on memory 1540 * region that may overlap with operation regions, primarily allowing them to 1541 * safely map the region as non-cached memory. 1542 * 1543 * The unmapped Operation Regions will be automatically remapped next time they 1544 * are called, so the drivers do not need to do anything else. 1545 */ 1546 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, 1547 u32 level) 1548 { 1549 if (!(res->flags & IORESOURCE_MEM)) 1550 return AE_TYPE; 1551 1552 return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, 1553 acpi_deactivate_mem_region, NULL, res, NULL); 1554 } 1555 EXPORT_SYMBOL_GPL(acpi_release_memory); 1556 1557 /* 1558 * Let drivers know whether the resource checks are effective 1559 */ 1560 int acpi_resources_are_enforced(void) 1561 { 1562 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1563 } 1564 EXPORT_SYMBOL(acpi_resources_are_enforced); 1565 1566 /* 1567 * Deallocate the memory for a spinlock. 1568 */ 1569 void acpi_os_delete_lock(acpi_spinlock handle) 1570 { 1571 ACPI_FREE(handle); 1572 } 1573 1574 /* 1575 * Acquire a spinlock. 1576 * 1577 * handle is a pointer to the spinlock_t. 1578 */ 1579 1580 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1581 { 1582 acpi_cpu_flags flags; 1583 spin_lock_irqsave(lockp, flags); 1584 return flags; 1585 } 1586 1587 /* 1588 * Release a spinlock. See above. 1589 */ 1590 1591 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1592 { 1593 spin_unlock_irqrestore(lockp, flags); 1594 } 1595 1596 #ifndef ACPI_USE_LOCAL_CACHE 1597 1598 /******************************************************************************* 1599 * 1600 * FUNCTION: acpi_os_create_cache 1601 * 1602 * PARAMETERS: name - Ascii name for the cache 1603 * size - Size of each cached object 1604 * depth - Maximum depth of the cache (in objects) <ignored> 1605 * cache - Where the new cache object is returned 1606 * 1607 * RETURN: status 1608 * 1609 * DESCRIPTION: Create a cache object 1610 * 1611 ******************************************************************************/ 1612 1613 acpi_status 1614 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1615 { 1616 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1617 if (*cache == NULL) 1618 return AE_ERROR; 1619 else 1620 return AE_OK; 1621 } 1622 1623 /******************************************************************************* 1624 * 1625 * FUNCTION: acpi_os_purge_cache 1626 * 1627 * PARAMETERS: Cache - Handle to cache object 1628 * 1629 * RETURN: Status 1630 * 1631 * DESCRIPTION: Free all objects within the requested cache. 1632 * 1633 ******************************************************************************/ 1634 1635 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1636 { 1637 kmem_cache_shrink(cache); 1638 return (AE_OK); 1639 } 1640 1641 /******************************************************************************* 1642 * 1643 * FUNCTION: acpi_os_delete_cache 1644 * 1645 * PARAMETERS: Cache - Handle to cache object 1646 * 1647 * RETURN: Status 1648 * 1649 * DESCRIPTION: Free all objects within the requested cache and delete the 1650 * cache object. 1651 * 1652 ******************************************************************************/ 1653 1654 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1655 { 1656 kmem_cache_destroy(cache); 1657 return (AE_OK); 1658 } 1659 1660 /******************************************************************************* 1661 * 1662 * FUNCTION: acpi_os_release_object 1663 * 1664 * PARAMETERS: Cache - Handle to cache object 1665 * Object - The object to be released 1666 * 1667 * RETURN: None 1668 * 1669 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1670 * the object is deleted. 1671 * 1672 ******************************************************************************/ 1673 1674 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1675 { 1676 kmem_cache_free(cache, object); 1677 return (AE_OK); 1678 } 1679 #endif 1680 1681 static int __init acpi_no_static_ssdt_setup(char *s) 1682 { 1683 acpi_gbl_disable_ssdt_table_install = TRUE; 1684 pr_info("ACPI: static SSDT installation disabled\n"); 1685 1686 return 0; 1687 } 1688 1689 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1690 1691 static int __init acpi_disable_return_repair(char *s) 1692 { 1693 printk(KERN_NOTICE PREFIX 1694 "ACPI: Predefined validation mechanism disabled\n"); 1695 acpi_gbl_disable_auto_repair = TRUE; 1696 1697 return 1; 1698 } 1699 1700 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1701 1702 acpi_status __init acpi_os_initialize(void) 1703 { 1704 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1705 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1706 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1707 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1708 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1709 /* 1710 * Use acpi_os_map_generic_address to pre-map the reset 1711 * register if it's in system memory. 1712 */ 1713 int rv; 1714 1715 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1716 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1717 } 1718 acpi_os_initialized = true; 1719 1720 return AE_OK; 1721 } 1722 1723 acpi_status __init acpi_os_initialize1(void) 1724 { 1725 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1726 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1727 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1728 BUG_ON(!kacpid_wq); 1729 BUG_ON(!kacpi_notify_wq); 1730 BUG_ON(!kacpi_hotplug_wq); 1731 acpi_osi_init(); 1732 return AE_OK; 1733 } 1734 1735 acpi_status acpi_os_terminate(void) 1736 { 1737 if (acpi_irq_handler) { 1738 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1739 acpi_irq_handler); 1740 } 1741 1742 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1743 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1744 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1745 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1746 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1747 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1748 1749 destroy_workqueue(kacpid_wq); 1750 destroy_workqueue(kacpi_notify_wq); 1751 destroy_workqueue(kacpi_hotplug_wq); 1752 1753 return AE_OK; 1754 } 1755 1756 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1757 u32 pm1b_control) 1758 { 1759 int rc = 0; 1760 if (__acpi_os_prepare_sleep) 1761 rc = __acpi_os_prepare_sleep(sleep_state, 1762 pm1a_control, pm1b_control); 1763 if (rc < 0) 1764 return AE_ERROR; 1765 else if (rc > 0) 1766 return AE_CTRL_TERMINATE; 1767 1768 return AE_OK; 1769 } 1770 1771 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1772 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1773 { 1774 __acpi_os_prepare_sleep = func; 1775 } 1776 1777 #if (ACPI_REDUCED_HARDWARE) 1778 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1779 u32 val_b) 1780 { 1781 int rc = 0; 1782 if (__acpi_os_prepare_extended_sleep) 1783 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1784 val_a, val_b); 1785 if (rc < 0) 1786 return AE_ERROR; 1787 else if (rc > 0) 1788 return AE_CTRL_TERMINATE; 1789 1790 return AE_OK; 1791 } 1792 #else 1793 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1794 u32 val_b) 1795 { 1796 return AE_OK; 1797 } 1798 #endif 1799 1800 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1801 u32 val_a, u32 val_b)) 1802 { 1803 __acpi_os_prepare_extended_sleep = func; 1804 } 1805 1806 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1807 u32 reg_a_value, u32 reg_b_value) 1808 { 1809 acpi_status status; 1810 1811 if (acpi_gbl_reduced_hardware) 1812 status = acpi_os_prepare_extended_sleep(sleep_state, 1813 reg_a_value, 1814 reg_b_value); 1815 else 1816 status = acpi_os_prepare_sleep(sleep_state, 1817 reg_a_value, reg_b_value); 1818 return status; 1819 } 1820