1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 4 * 5 * Copyright (C) 2000 Andrew Henroid 6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * Copyright (c) 2008 Intel Corporation 9 * Author: Matthew Wilcox <willy@linux.intel.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/lockdep.h> 18 #include <linux/pci.h> 19 #include <linux/interrupt.h> 20 #include <linux/kmod.h> 21 #include <linux/delay.h> 22 #include <linux/workqueue.h> 23 #include <linux/nmi.h> 24 #include <linux/acpi.h> 25 #include <linux/efi.h> 26 #include <linux/ioport.h> 27 #include <linux/list.h> 28 #include <linux/jiffies.h> 29 #include <linux/semaphore.h> 30 #include <linux/security.h> 31 32 #include <asm/io.h> 33 #include <linux/uaccess.h> 34 #include <linux/io-64-nonatomic-lo-hi.h> 35 36 #include "acpica/accommon.h" 37 #include "acpica/acnamesp.h" 38 #include "internal.h" 39 40 #define _COMPONENT ACPI_OS_SERVICES 41 ACPI_MODULE_NAME("osl"); 42 43 struct acpi_os_dpc { 44 acpi_osd_exec_callback function; 45 void *context; 46 struct work_struct work; 47 }; 48 49 #ifdef ENABLE_DEBUGGER 50 #include <linux/kdb.h> 51 52 /* stuff for debugger support */ 53 int acpi_in_debugger; 54 EXPORT_SYMBOL(acpi_in_debugger); 55 #endif /*ENABLE_DEBUGGER */ 56 57 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 58 u32 pm1b_ctrl); 59 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 60 u32 val_b); 61 62 static acpi_osd_handler acpi_irq_handler; 63 static void *acpi_irq_context; 64 static struct workqueue_struct *kacpid_wq; 65 static struct workqueue_struct *kacpi_notify_wq; 66 static struct workqueue_struct *kacpi_hotplug_wq; 67 static bool acpi_os_initialized; 68 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 69 bool acpi_permanent_mmap = false; 70 71 /* 72 * This list of permanent mappings is for memory that may be accessed from 73 * interrupt context, where we can't do the ioremap(). 74 */ 75 struct acpi_ioremap { 76 struct list_head list; 77 void __iomem *virt; 78 acpi_physical_address phys; 79 acpi_size size; 80 union { 81 unsigned long refcount; 82 struct rcu_work rwork; 83 } track; 84 }; 85 86 static LIST_HEAD(acpi_ioremaps); 87 static DEFINE_MUTEX(acpi_ioremap_lock); 88 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) 89 90 static void __init acpi_request_region (struct acpi_generic_address *gas, 91 unsigned int length, char *desc) 92 { 93 u64 addr; 94 95 /* Handle possible alignment issues */ 96 memcpy(&addr, &gas->address, sizeof(addr)); 97 if (!addr || !length) 98 return; 99 100 /* Resources are never freed */ 101 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 102 request_region(addr, length, desc); 103 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 104 request_mem_region(addr, length, desc); 105 } 106 107 static int __init acpi_reserve_resources(void) 108 { 109 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 110 "ACPI PM1a_EVT_BLK"); 111 112 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 113 "ACPI PM1b_EVT_BLK"); 114 115 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 116 "ACPI PM1a_CNT_BLK"); 117 118 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 119 "ACPI PM1b_CNT_BLK"); 120 121 if (acpi_gbl_FADT.pm_timer_length == 4) 122 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 123 124 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 125 "ACPI PM2_CNT_BLK"); 126 127 /* Length of GPE blocks must be a non-negative multiple of 2 */ 128 129 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 130 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 131 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 132 133 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 134 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 135 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 136 137 return 0; 138 } 139 fs_initcall_sync(acpi_reserve_resources); 140 141 void acpi_os_printf(const char *fmt, ...) 142 { 143 va_list args; 144 va_start(args, fmt); 145 acpi_os_vprintf(fmt, args); 146 va_end(args); 147 } 148 EXPORT_SYMBOL(acpi_os_printf); 149 150 void acpi_os_vprintf(const char *fmt, va_list args) 151 { 152 static char buffer[512]; 153 154 vsprintf(buffer, fmt, args); 155 156 #ifdef ENABLE_DEBUGGER 157 if (acpi_in_debugger) { 158 kdb_printf("%s", buffer); 159 } else { 160 if (printk_get_level(buffer)) 161 printk("%s", buffer); 162 else 163 printk(KERN_CONT "%s", buffer); 164 } 165 #else 166 if (acpi_debugger_write_log(buffer) < 0) { 167 if (printk_get_level(buffer)) 168 printk("%s", buffer); 169 else 170 printk(KERN_CONT "%s", buffer); 171 } 172 #endif 173 } 174 175 #ifdef CONFIG_KEXEC 176 static unsigned long acpi_rsdp; 177 static int __init setup_acpi_rsdp(char *arg) 178 { 179 return kstrtoul(arg, 16, &acpi_rsdp); 180 } 181 early_param("acpi_rsdp", setup_acpi_rsdp); 182 #endif 183 184 acpi_physical_address __init acpi_os_get_root_pointer(void) 185 { 186 acpi_physical_address pa; 187 188 #ifdef CONFIG_KEXEC 189 /* 190 * We may have been provided with an RSDP on the command line, 191 * but if a malicious user has done so they may be pointing us 192 * at modified ACPI tables that could alter kernel behaviour - 193 * so, we check the lockdown status before making use of 194 * it. If we trust it then also stash it in an architecture 195 * specific location (if appropriate) so it can be carried 196 * over further kexec()s. 197 */ 198 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) { 199 acpi_arch_set_root_pointer(acpi_rsdp); 200 return acpi_rsdp; 201 } 202 #endif 203 pa = acpi_arch_get_root_pointer(); 204 if (pa) 205 return pa; 206 207 if (efi_enabled(EFI_CONFIG_TABLES)) { 208 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 209 return efi.acpi20; 210 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 211 return efi.acpi; 212 pr_err(PREFIX "System description tables not found\n"); 213 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 214 acpi_find_root_pointer(&pa); 215 } 216 217 return pa; 218 } 219 220 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 221 static struct acpi_ioremap * 222 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 223 { 224 struct acpi_ioremap *map; 225 226 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 227 if (map->phys <= phys && 228 phys + size <= map->phys + map->size) 229 return map; 230 231 return NULL; 232 } 233 234 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 235 static void __iomem * 236 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 237 { 238 struct acpi_ioremap *map; 239 240 map = acpi_map_lookup(phys, size); 241 if (map) 242 return map->virt + (phys - map->phys); 243 244 return NULL; 245 } 246 247 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 248 { 249 struct acpi_ioremap *map; 250 void __iomem *virt = NULL; 251 252 mutex_lock(&acpi_ioremap_lock); 253 map = acpi_map_lookup(phys, size); 254 if (map) { 255 virt = map->virt + (phys - map->phys); 256 map->track.refcount++; 257 } 258 mutex_unlock(&acpi_ioremap_lock); 259 return virt; 260 } 261 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 262 263 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 264 static struct acpi_ioremap * 265 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 266 { 267 struct acpi_ioremap *map; 268 269 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 270 if (map->virt <= virt && 271 virt + size <= map->virt + map->size) 272 return map; 273 274 return NULL; 275 } 276 277 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 278 /* ioremap will take care of cache attributes */ 279 #define should_use_kmap(pfn) 0 280 #else 281 #define should_use_kmap(pfn) page_is_ram(pfn) 282 #endif 283 284 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 285 { 286 unsigned long pfn; 287 288 pfn = pg_off >> PAGE_SHIFT; 289 if (should_use_kmap(pfn)) { 290 if (pg_sz > PAGE_SIZE) 291 return NULL; 292 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 293 } else 294 return acpi_os_ioremap(pg_off, pg_sz); 295 } 296 297 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 298 { 299 unsigned long pfn; 300 301 pfn = pg_off >> PAGE_SHIFT; 302 if (should_use_kmap(pfn)) 303 kunmap(pfn_to_page(pfn)); 304 else 305 iounmap(vaddr); 306 } 307 308 /** 309 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 310 * @phys: Start of the physical address range to map. 311 * @size: Size of the physical address range to map. 312 * 313 * Look up the given physical address range in the list of existing ACPI memory 314 * mappings. If found, get a reference to it and return a pointer to it (its 315 * virtual address). If not found, map it, add it to that list and return a 316 * pointer to it. 317 * 318 * During early init (when acpi_permanent_mmap has not been set yet) this 319 * routine simply calls __acpi_map_table() to get the job done. 320 */ 321 void __iomem __ref 322 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 323 { 324 struct acpi_ioremap *map; 325 void __iomem *virt; 326 acpi_physical_address pg_off; 327 acpi_size pg_sz; 328 329 if (phys > ULONG_MAX) { 330 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 331 return NULL; 332 } 333 334 if (!acpi_permanent_mmap) 335 return __acpi_map_table((unsigned long)phys, size); 336 337 mutex_lock(&acpi_ioremap_lock); 338 /* Check if there's a suitable mapping already. */ 339 map = acpi_map_lookup(phys, size); 340 if (map) { 341 map->track.refcount++; 342 goto out; 343 } 344 345 map = kzalloc(sizeof(*map), GFP_KERNEL); 346 if (!map) { 347 mutex_unlock(&acpi_ioremap_lock); 348 return NULL; 349 } 350 351 pg_off = round_down(phys, PAGE_SIZE); 352 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 353 virt = acpi_map(phys, size); 354 if (!virt) { 355 mutex_unlock(&acpi_ioremap_lock); 356 kfree(map); 357 return NULL; 358 } 359 360 INIT_LIST_HEAD(&map->list); 361 map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); 362 map->phys = pg_off; 363 map->size = pg_sz; 364 map->track.refcount = 1; 365 366 list_add_tail_rcu(&map->list, &acpi_ioremaps); 367 368 out: 369 mutex_unlock(&acpi_ioremap_lock); 370 return map->virt + (phys - map->phys); 371 } 372 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 373 374 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 375 { 376 return (void *)acpi_os_map_iomem(phys, size); 377 } 378 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 379 380 static void acpi_os_map_remove(struct work_struct *work) 381 { 382 struct acpi_ioremap *map = container_of(to_rcu_work(work), 383 struct acpi_ioremap, 384 track.rwork); 385 386 acpi_unmap(map->phys, map->virt); 387 kfree(map); 388 } 389 390 /* Must be called with mutex_lock(&acpi_ioremap_lock) */ 391 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 392 { 393 if (--map->track.refcount) 394 return; 395 396 list_del_rcu(&map->list); 397 398 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); 399 queue_rcu_work(system_wq, &map->track.rwork); 400 } 401 402 /** 403 * acpi_os_unmap_iomem - Drop a memory mapping reference. 404 * @virt: Start of the address range to drop a reference to. 405 * @size: Size of the address range to drop a reference to. 406 * 407 * Look up the given virtual address range in the list of existing ACPI memory 408 * mappings, drop a reference to it and if there are no more active references 409 * to it, queue it up for later removal. 410 * 411 * During early init (when acpi_permanent_mmap has not been set yet) this 412 * routine simply calls __acpi_unmap_table() to get the job done. Since 413 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 414 * here. 415 */ 416 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 417 { 418 struct acpi_ioremap *map; 419 420 if (!acpi_permanent_mmap) { 421 __acpi_unmap_table(virt, size); 422 return; 423 } 424 425 mutex_lock(&acpi_ioremap_lock); 426 427 map = acpi_map_lookup_virt(virt, size); 428 if (!map) { 429 mutex_unlock(&acpi_ioremap_lock); 430 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 431 return; 432 } 433 acpi_os_drop_map_ref(map); 434 435 mutex_unlock(&acpi_ioremap_lock); 436 } 437 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 438 439 /** 440 * acpi_os_unmap_memory - Drop a memory mapping reference. 441 * @virt: Start of the address range to drop a reference to. 442 * @size: Size of the address range to drop a reference to. 443 */ 444 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 445 { 446 acpi_os_unmap_iomem((void __iomem *)virt, size); 447 } 448 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 449 450 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas) 451 { 452 u64 addr; 453 454 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 455 return NULL; 456 457 /* Handle possible alignment issues */ 458 memcpy(&addr, &gas->address, sizeof(addr)); 459 if (!addr || !gas->bit_width) 460 return NULL; 461 462 return acpi_os_map_iomem(addr, gas->bit_width / 8); 463 } 464 EXPORT_SYMBOL(acpi_os_map_generic_address); 465 466 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 467 { 468 u64 addr; 469 struct acpi_ioremap *map; 470 471 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 472 return; 473 474 /* Handle possible alignment issues */ 475 memcpy(&addr, &gas->address, sizeof(addr)); 476 if (!addr || !gas->bit_width) 477 return; 478 479 mutex_lock(&acpi_ioremap_lock); 480 481 map = acpi_map_lookup(addr, gas->bit_width / 8); 482 if (!map) { 483 mutex_unlock(&acpi_ioremap_lock); 484 return; 485 } 486 acpi_os_drop_map_ref(map); 487 488 mutex_unlock(&acpi_ioremap_lock); 489 } 490 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 491 492 #ifdef ACPI_FUTURE_USAGE 493 acpi_status 494 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 495 { 496 if (!phys || !virt) 497 return AE_BAD_PARAMETER; 498 499 *phys = virt_to_phys(virt); 500 501 return AE_OK; 502 } 503 #endif 504 505 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 506 static bool acpi_rev_override; 507 508 int __init acpi_rev_override_setup(char *str) 509 { 510 acpi_rev_override = true; 511 return 1; 512 } 513 __setup("acpi_rev_override", acpi_rev_override_setup); 514 #else 515 #define acpi_rev_override false 516 #endif 517 518 #define ACPI_MAX_OVERRIDE_LEN 100 519 520 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 521 522 acpi_status 523 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 524 acpi_string *new_val) 525 { 526 if (!init_val || !new_val) 527 return AE_BAD_PARAMETER; 528 529 *new_val = NULL; 530 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 531 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 532 acpi_os_name); 533 *new_val = acpi_os_name; 534 } 535 536 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 537 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 538 *new_val = (char *)5; 539 } 540 541 return AE_OK; 542 } 543 544 static irqreturn_t acpi_irq(int irq, void *dev_id) 545 { 546 u32 handled; 547 548 handled = (*acpi_irq_handler) (acpi_irq_context); 549 550 if (handled) { 551 acpi_irq_handled++; 552 return IRQ_HANDLED; 553 } else { 554 acpi_irq_not_handled++; 555 return IRQ_NONE; 556 } 557 } 558 559 acpi_status 560 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 561 void *context) 562 { 563 unsigned int irq; 564 565 acpi_irq_stats_init(); 566 567 /* 568 * ACPI interrupts different from the SCI in our copy of the FADT are 569 * not supported. 570 */ 571 if (gsi != acpi_gbl_FADT.sci_interrupt) 572 return AE_BAD_PARAMETER; 573 574 if (acpi_irq_handler) 575 return AE_ALREADY_ACQUIRED; 576 577 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 578 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 579 gsi); 580 return AE_OK; 581 } 582 583 acpi_irq_handler = handler; 584 acpi_irq_context = context; 585 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 586 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 587 acpi_irq_handler = NULL; 588 return AE_NOT_ACQUIRED; 589 } 590 acpi_sci_irq = irq; 591 592 return AE_OK; 593 } 594 595 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 596 { 597 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 598 return AE_BAD_PARAMETER; 599 600 free_irq(acpi_sci_irq, acpi_irq); 601 acpi_irq_handler = NULL; 602 acpi_sci_irq = INVALID_ACPI_IRQ; 603 604 return AE_OK; 605 } 606 607 /* 608 * Running in interpreter thread context, safe to sleep 609 */ 610 611 void acpi_os_sleep(u64 ms) 612 { 613 msleep(ms); 614 } 615 616 void acpi_os_stall(u32 us) 617 { 618 while (us) { 619 u32 delay = 1000; 620 621 if (delay > us) 622 delay = us; 623 udelay(delay); 624 touch_nmi_watchdog(); 625 us -= delay; 626 } 627 } 628 629 /* 630 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 631 * monotonically increasing timer with 100ns granularity. Do not use 632 * ktime_get() to implement this function because this function may get 633 * called after timekeeping has been suspended. Note: calling this function 634 * after timekeeping has been suspended may lead to unexpected results 635 * because when timekeeping is suspended the jiffies counter is not 636 * incremented. See also timekeeping_suspend(). 637 */ 638 u64 acpi_os_get_timer(void) 639 { 640 return (get_jiffies_64() - INITIAL_JIFFIES) * 641 (ACPI_100NSEC_PER_SEC / HZ); 642 } 643 644 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 645 { 646 u32 dummy; 647 648 if (!value) 649 value = &dummy; 650 651 *value = 0; 652 if (width <= 8) { 653 *(u8 *) value = inb(port); 654 } else if (width <= 16) { 655 *(u16 *) value = inw(port); 656 } else if (width <= 32) { 657 *(u32 *) value = inl(port); 658 } else { 659 BUG(); 660 } 661 662 return AE_OK; 663 } 664 665 EXPORT_SYMBOL(acpi_os_read_port); 666 667 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 668 { 669 if (width <= 8) { 670 outb(value, port); 671 } else if (width <= 16) { 672 outw(value, port); 673 } else if (width <= 32) { 674 outl(value, port); 675 } else { 676 BUG(); 677 } 678 679 return AE_OK; 680 } 681 682 EXPORT_SYMBOL(acpi_os_write_port); 683 684 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 685 { 686 687 switch (width) { 688 case 8: 689 *(u8 *) value = readb(virt_addr); 690 break; 691 case 16: 692 *(u16 *) value = readw(virt_addr); 693 break; 694 case 32: 695 *(u32 *) value = readl(virt_addr); 696 break; 697 case 64: 698 *(u64 *) value = readq(virt_addr); 699 break; 700 default: 701 return -EINVAL; 702 } 703 704 return 0; 705 } 706 707 acpi_status 708 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 709 { 710 void __iomem *virt_addr; 711 unsigned int size = width / 8; 712 bool unmap = false; 713 u64 dummy; 714 int error; 715 716 rcu_read_lock(); 717 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 718 if (!virt_addr) { 719 rcu_read_unlock(); 720 virt_addr = acpi_os_ioremap(phys_addr, size); 721 if (!virt_addr) 722 return AE_BAD_ADDRESS; 723 unmap = true; 724 } 725 726 if (!value) 727 value = &dummy; 728 729 error = acpi_os_read_iomem(virt_addr, value, width); 730 BUG_ON(error); 731 732 if (unmap) 733 iounmap(virt_addr); 734 else 735 rcu_read_unlock(); 736 737 return AE_OK; 738 } 739 740 acpi_status 741 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 742 { 743 void __iomem *virt_addr; 744 unsigned int size = width / 8; 745 bool unmap = false; 746 747 rcu_read_lock(); 748 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 749 if (!virt_addr) { 750 rcu_read_unlock(); 751 virt_addr = acpi_os_ioremap(phys_addr, size); 752 if (!virt_addr) 753 return AE_BAD_ADDRESS; 754 unmap = true; 755 } 756 757 switch (width) { 758 case 8: 759 writeb(value, virt_addr); 760 break; 761 case 16: 762 writew(value, virt_addr); 763 break; 764 case 32: 765 writel(value, virt_addr); 766 break; 767 case 64: 768 writeq(value, virt_addr); 769 break; 770 default: 771 BUG(); 772 } 773 774 if (unmap) 775 iounmap(virt_addr); 776 else 777 rcu_read_unlock(); 778 779 return AE_OK; 780 } 781 782 #ifdef CONFIG_PCI 783 acpi_status 784 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 785 u64 *value, u32 width) 786 { 787 int result, size; 788 u32 value32; 789 790 if (!value) 791 return AE_BAD_PARAMETER; 792 793 switch (width) { 794 case 8: 795 size = 1; 796 break; 797 case 16: 798 size = 2; 799 break; 800 case 32: 801 size = 4; 802 break; 803 default: 804 return AE_ERROR; 805 } 806 807 result = raw_pci_read(pci_id->segment, pci_id->bus, 808 PCI_DEVFN(pci_id->device, pci_id->function), 809 reg, size, &value32); 810 *value = value32; 811 812 return (result ? AE_ERROR : AE_OK); 813 } 814 815 acpi_status 816 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 817 u64 value, u32 width) 818 { 819 int result, size; 820 821 switch (width) { 822 case 8: 823 size = 1; 824 break; 825 case 16: 826 size = 2; 827 break; 828 case 32: 829 size = 4; 830 break; 831 default: 832 return AE_ERROR; 833 } 834 835 result = raw_pci_write(pci_id->segment, pci_id->bus, 836 PCI_DEVFN(pci_id->device, pci_id->function), 837 reg, size, value); 838 839 return (result ? AE_ERROR : AE_OK); 840 } 841 #endif 842 843 static void acpi_os_execute_deferred(struct work_struct *work) 844 { 845 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 846 847 dpc->function(dpc->context); 848 kfree(dpc); 849 } 850 851 #ifdef CONFIG_ACPI_DEBUGGER 852 static struct acpi_debugger acpi_debugger; 853 static bool acpi_debugger_initialized; 854 855 int acpi_register_debugger(struct module *owner, 856 const struct acpi_debugger_ops *ops) 857 { 858 int ret = 0; 859 860 mutex_lock(&acpi_debugger.lock); 861 if (acpi_debugger.ops) { 862 ret = -EBUSY; 863 goto err_lock; 864 } 865 866 acpi_debugger.owner = owner; 867 acpi_debugger.ops = ops; 868 869 err_lock: 870 mutex_unlock(&acpi_debugger.lock); 871 return ret; 872 } 873 EXPORT_SYMBOL(acpi_register_debugger); 874 875 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 876 { 877 mutex_lock(&acpi_debugger.lock); 878 if (ops == acpi_debugger.ops) { 879 acpi_debugger.ops = NULL; 880 acpi_debugger.owner = NULL; 881 } 882 mutex_unlock(&acpi_debugger.lock); 883 } 884 EXPORT_SYMBOL(acpi_unregister_debugger); 885 886 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 887 { 888 int ret; 889 int (*func)(acpi_osd_exec_callback, void *); 890 struct module *owner; 891 892 if (!acpi_debugger_initialized) 893 return -ENODEV; 894 mutex_lock(&acpi_debugger.lock); 895 if (!acpi_debugger.ops) { 896 ret = -ENODEV; 897 goto err_lock; 898 } 899 if (!try_module_get(acpi_debugger.owner)) { 900 ret = -ENODEV; 901 goto err_lock; 902 } 903 func = acpi_debugger.ops->create_thread; 904 owner = acpi_debugger.owner; 905 mutex_unlock(&acpi_debugger.lock); 906 907 ret = func(function, context); 908 909 mutex_lock(&acpi_debugger.lock); 910 module_put(owner); 911 err_lock: 912 mutex_unlock(&acpi_debugger.lock); 913 return ret; 914 } 915 916 ssize_t acpi_debugger_write_log(const char *msg) 917 { 918 ssize_t ret; 919 ssize_t (*func)(const char *); 920 struct module *owner; 921 922 if (!acpi_debugger_initialized) 923 return -ENODEV; 924 mutex_lock(&acpi_debugger.lock); 925 if (!acpi_debugger.ops) { 926 ret = -ENODEV; 927 goto err_lock; 928 } 929 if (!try_module_get(acpi_debugger.owner)) { 930 ret = -ENODEV; 931 goto err_lock; 932 } 933 func = acpi_debugger.ops->write_log; 934 owner = acpi_debugger.owner; 935 mutex_unlock(&acpi_debugger.lock); 936 937 ret = func(msg); 938 939 mutex_lock(&acpi_debugger.lock); 940 module_put(owner); 941 err_lock: 942 mutex_unlock(&acpi_debugger.lock); 943 return ret; 944 } 945 946 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 947 { 948 ssize_t ret; 949 ssize_t (*func)(char *, size_t); 950 struct module *owner; 951 952 if (!acpi_debugger_initialized) 953 return -ENODEV; 954 mutex_lock(&acpi_debugger.lock); 955 if (!acpi_debugger.ops) { 956 ret = -ENODEV; 957 goto err_lock; 958 } 959 if (!try_module_get(acpi_debugger.owner)) { 960 ret = -ENODEV; 961 goto err_lock; 962 } 963 func = acpi_debugger.ops->read_cmd; 964 owner = acpi_debugger.owner; 965 mutex_unlock(&acpi_debugger.lock); 966 967 ret = func(buffer, buffer_length); 968 969 mutex_lock(&acpi_debugger.lock); 970 module_put(owner); 971 err_lock: 972 mutex_unlock(&acpi_debugger.lock); 973 return ret; 974 } 975 976 int acpi_debugger_wait_command_ready(void) 977 { 978 int ret; 979 int (*func)(bool, char *, size_t); 980 struct module *owner; 981 982 if (!acpi_debugger_initialized) 983 return -ENODEV; 984 mutex_lock(&acpi_debugger.lock); 985 if (!acpi_debugger.ops) { 986 ret = -ENODEV; 987 goto err_lock; 988 } 989 if (!try_module_get(acpi_debugger.owner)) { 990 ret = -ENODEV; 991 goto err_lock; 992 } 993 func = acpi_debugger.ops->wait_command_ready; 994 owner = acpi_debugger.owner; 995 mutex_unlock(&acpi_debugger.lock); 996 997 ret = func(acpi_gbl_method_executing, 998 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 999 1000 mutex_lock(&acpi_debugger.lock); 1001 module_put(owner); 1002 err_lock: 1003 mutex_unlock(&acpi_debugger.lock); 1004 return ret; 1005 } 1006 1007 int acpi_debugger_notify_command_complete(void) 1008 { 1009 int ret; 1010 int (*func)(void); 1011 struct module *owner; 1012 1013 if (!acpi_debugger_initialized) 1014 return -ENODEV; 1015 mutex_lock(&acpi_debugger.lock); 1016 if (!acpi_debugger.ops) { 1017 ret = -ENODEV; 1018 goto err_lock; 1019 } 1020 if (!try_module_get(acpi_debugger.owner)) { 1021 ret = -ENODEV; 1022 goto err_lock; 1023 } 1024 func = acpi_debugger.ops->notify_command_complete; 1025 owner = acpi_debugger.owner; 1026 mutex_unlock(&acpi_debugger.lock); 1027 1028 ret = func(); 1029 1030 mutex_lock(&acpi_debugger.lock); 1031 module_put(owner); 1032 err_lock: 1033 mutex_unlock(&acpi_debugger.lock); 1034 return ret; 1035 } 1036 1037 int __init acpi_debugger_init(void) 1038 { 1039 mutex_init(&acpi_debugger.lock); 1040 acpi_debugger_initialized = true; 1041 return 0; 1042 } 1043 #endif 1044 1045 /******************************************************************************* 1046 * 1047 * FUNCTION: acpi_os_execute 1048 * 1049 * PARAMETERS: Type - Type of the callback 1050 * Function - Function to be executed 1051 * Context - Function parameters 1052 * 1053 * RETURN: Status 1054 * 1055 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1056 * immediately executes function on a separate thread. 1057 * 1058 ******************************************************************************/ 1059 1060 acpi_status acpi_os_execute(acpi_execute_type type, 1061 acpi_osd_exec_callback function, void *context) 1062 { 1063 acpi_status status = AE_OK; 1064 struct acpi_os_dpc *dpc; 1065 struct workqueue_struct *queue; 1066 int ret; 1067 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1068 "Scheduling function [%p(%p)] for deferred execution.\n", 1069 function, context)); 1070 1071 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1072 ret = acpi_debugger_create_thread(function, context); 1073 if (ret) { 1074 pr_err("Call to kthread_create() failed.\n"); 1075 status = AE_ERROR; 1076 } 1077 goto out_thread; 1078 } 1079 1080 /* 1081 * Allocate/initialize DPC structure. Note that this memory will be 1082 * freed by the callee. The kernel handles the work_struct list in a 1083 * way that allows us to also free its memory inside the callee. 1084 * Because we may want to schedule several tasks with different 1085 * parameters we can't use the approach some kernel code uses of 1086 * having a static work_struct. 1087 */ 1088 1089 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1090 if (!dpc) 1091 return AE_NO_MEMORY; 1092 1093 dpc->function = function; 1094 dpc->context = context; 1095 1096 /* 1097 * To prevent lockdep from complaining unnecessarily, make sure that 1098 * there is a different static lockdep key for each workqueue by using 1099 * INIT_WORK() for each of them separately. 1100 */ 1101 if (type == OSL_NOTIFY_HANDLER) { 1102 queue = kacpi_notify_wq; 1103 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1104 } else if (type == OSL_GPE_HANDLER) { 1105 queue = kacpid_wq; 1106 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1107 } else { 1108 pr_err("Unsupported os_execute type %d.\n", type); 1109 status = AE_ERROR; 1110 } 1111 1112 if (ACPI_FAILURE(status)) 1113 goto err_workqueue; 1114 1115 /* 1116 * On some machines, a software-initiated SMI causes corruption unless 1117 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1118 * typically it's done in GPE-related methods that are run via 1119 * workqueues, so we can avoid the known corruption cases by always 1120 * queueing on CPU 0. 1121 */ 1122 ret = queue_work_on(0, queue, &dpc->work); 1123 if (!ret) { 1124 printk(KERN_ERR PREFIX 1125 "Call to queue_work() failed.\n"); 1126 status = AE_ERROR; 1127 } 1128 err_workqueue: 1129 if (ACPI_FAILURE(status)) 1130 kfree(dpc); 1131 out_thread: 1132 return status; 1133 } 1134 EXPORT_SYMBOL(acpi_os_execute); 1135 1136 void acpi_os_wait_events_complete(void) 1137 { 1138 /* 1139 * Make sure the GPE handler or the fixed event handler is not used 1140 * on another CPU after removal. 1141 */ 1142 if (acpi_sci_irq_valid()) 1143 synchronize_hardirq(acpi_sci_irq); 1144 flush_workqueue(kacpid_wq); 1145 flush_workqueue(kacpi_notify_wq); 1146 } 1147 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1148 1149 struct acpi_hp_work { 1150 struct work_struct work; 1151 struct acpi_device *adev; 1152 u32 src; 1153 }; 1154 1155 static void acpi_hotplug_work_fn(struct work_struct *work) 1156 { 1157 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1158 1159 acpi_os_wait_events_complete(); 1160 acpi_device_hotplug(hpw->adev, hpw->src); 1161 kfree(hpw); 1162 } 1163 1164 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1165 { 1166 struct acpi_hp_work *hpw; 1167 1168 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1169 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1170 adev, src)); 1171 1172 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1173 if (!hpw) 1174 return AE_NO_MEMORY; 1175 1176 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1177 hpw->adev = adev; 1178 hpw->src = src; 1179 /* 1180 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1181 * the hotplug code may call driver .remove() functions, which may 1182 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1183 * these workqueues. 1184 */ 1185 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1186 kfree(hpw); 1187 return AE_ERROR; 1188 } 1189 return AE_OK; 1190 } 1191 1192 bool acpi_queue_hotplug_work(struct work_struct *work) 1193 { 1194 return queue_work(kacpi_hotplug_wq, work); 1195 } 1196 1197 acpi_status 1198 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1199 { 1200 struct semaphore *sem = NULL; 1201 1202 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1203 if (!sem) 1204 return AE_NO_MEMORY; 1205 1206 sema_init(sem, initial_units); 1207 1208 *handle = (acpi_handle *) sem; 1209 1210 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1211 *handle, initial_units)); 1212 1213 return AE_OK; 1214 } 1215 1216 /* 1217 * TODO: A better way to delete semaphores? Linux doesn't have a 1218 * 'delete_semaphore()' function -- may result in an invalid 1219 * pointer dereference for non-synchronized consumers. Should 1220 * we at least check for blocked threads and signal/cancel them? 1221 */ 1222 1223 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1224 { 1225 struct semaphore *sem = (struct semaphore *)handle; 1226 1227 if (!sem) 1228 return AE_BAD_PARAMETER; 1229 1230 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1231 1232 BUG_ON(!list_empty(&sem->wait_list)); 1233 kfree(sem); 1234 sem = NULL; 1235 1236 return AE_OK; 1237 } 1238 1239 /* 1240 * TODO: Support for units > 1? 1241 */ 1242 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1243 { 1244 acpi_status status = AE_OK; 1245 struct semaphore *sem = (struct semaphore *)handle; 1246 long jiffies; 1247 int ret = 0; 1248 1249 if (!acpi_os_initialized) 1250 return AE_OK; 1251 1252 if (!sem || (units < 1)) 1253 return AE_BAD_PARAMETER; 1254 1255 if (units > 1) 1256 return AE_SUPPORT; 1257 1258 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1259 handle, units, timeout)); 1260 1261 if (timeout == ACPI_WAIT_FOREVER) 1262 jiffies = MAX_SCHEDULE_TIMEOUT; 1263 else 1264 jiffies = msecs_to_jiffies(timeout); 1265 1266 ret = down_timeout(sem, jiffies); 1267 if (ret) 1268 status = AE_TIME; 1269 1270 if (ACPI_FAILURE(status)) { 1271 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1272 "Failed to acquire semaphore[%p|%d|%d], %s", 1273 handle, units, timeout, 1274 acpi_format_exception(status))); 1275 } else { 1276 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1277 "Acquired semaphore[%p|%d|%d]", handle, 1278 units, timeout)); 1279 } 1280 1281 return status; 1282 } 1283 1284 /* 1285 * TODO: Support for units > 1? 1286 */ 1287 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1288 { 1289 struct semaphore *sem = (struct semaphore *)handle; 1290 1291 if (!acpi_os_initialized) 1292 return AE_OK; 1293 1294 if (!sem || (units < 1)) 1295 return AE_BAD_PARAMETER; 1296 1297 if (units > 1) 1298 return AE_SUPPORT; 1299 1300 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1301 units)); 1302 1303 up(sem); 1304 1305 return AE_OK; 1306 } 1307 1308 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1309 { 1310 #ifdef ENABLE_DEBUGGER 1311 if (acpi_in_debugger) { 1312 u32 chars; 1313 1314 kdb_read(buffer, buffer_length); 1315 1316 /* remove the CR kdb includes */ 1317 chars = strlen(buffer) - 1; 1318 buffer[chars] = '\0'; 1319 } 1320 #else 1321 int ret; 1322 1323 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1324 if (ret < 0) 1325 return AE_ERROR; 1326 if (bytes_read) 1327 *bytes_read = ret; 1328 #endif 1329 1330 return AE_OK; 1331 } 1332 EXPORT_SYMBOL(acpi_os_get_line); 1333 1334 acpi_status acpi_os_wait_command_ready(void) 1335 { 1336 int ret; 1337 1338 ret = acpi_debugger_wait_command_ready(); 1339 if (ret < 0) 1340 return AE_ERROR; 1341 return AE_OK; 1342 } 1343 1344 acpi_status acpi_os_notify_command_complete(void) 1345 { 1346 int ret; 1347 1348 ret = acpi_debugger_notify_command_complete(); 1349 if (ret < 0) 1350 return AE_ERROR; 1351 return AE_OK; 1352 } 1353 1354 acpi_status acpi_os_signal(u32 function, void *info) 1355 { 1356 switch (function) { 1357 case ACPI_SIGNAL_FATAL: 1358 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1359 break; 1360 case ACPI_SIGNAL_BREAKPOINT: 1361 /* 1362 * AML Breakpoint 1363 * ACPI spec. says to treat it as a NOP unless 1364 * you are debugging. So if/when we integrate 1365 * AML debugger into the kernel debugger its 1366 * hook will go here. But until then it is 1367 * not useful to print anything on breakpoints. 1368 */ 1369 break; 1370 default: 1371 break; 1372 } 1373 1374 return AE_OK; 1375 } 1376 1377 static int __init acpi_os_name_setup(char *str) 1378 { 1379 char *p = acpi_os_name; 1380 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1381 1382 if (!str || !*str) 1383 return 0; 1384 1385 for (; count-- && *str; str++) { 1386 if (isalnum(*str) || *str == ' ' || *str == ':') 1387 *p++ = *str; 1388 else if (*str == '\'' || *str == '"') 1389 continue; 1390 else 1391 break; 1392 } 1393 *p = 0; 1394 1395 return 1; 1396 1397 } 1398 1399 __setup("acpi_os_name=", acpi_os_name_setup); 1400 1401 /* 1402 * Disable the auto-serialization of named objects creation methods. 1403 * 1404 * This feature is enabled by default. It marks the AML control methods 1405 * that contain the opcodes to create named objects as "Serialized". 1406 */ 1407 static int __init acpi_no_auto_serialize_setup(char *str) 1408 { 1409 acpi_gbl_auto_serialize_methods = FALSE; 1410 pr_info("ACPI: auto-serialization disabled\n"); 1411 1412 return 1; 1413 } 1414 1415 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1416 1417 /* Check of resource interference between native drivers and ACPI 1418 * OperationRegions (SystemIO and System Memory only). 1419 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1420 * in arbitrary AML code and can interfere with legacy drivers. 1421 * acpi_enforce_resources= can be set to: 1422 * 1423 * - strict (default) (2) 1424 * -> further driver trying to access the resources will not load 1425 * - lax (1) 1426 * -> further driver trying to access the resources will load, but you 1427 * get a system message that something might go wrong... 1428 * 1429 * - no (0) 1430 * -> ACPI Operation Region resources will not be registered 1431 * 1432 */ 1433 #define ENFORCE_RESOURCES_STRICT 2 1434 #define ENFORCE_RESOURCES_LAX 1 1435 #define ENFORCE_RESOURCES_NO 0 1436 1437 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1438 1439 static int __init acpi_enforce_resources_setup(char *str) 1440 { 1441 if (str == NULL || *str == '\0') 1442 return 0; 1443 1444 if (!strcmp("strict", str)) 1445 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1446 else if (!strcmp("lax", str)) 1447 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1448 else if (!strcmp("no", str)) 1449 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1450 1451 return 1; 1452 } 1453 1454 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1455 1456 /* Check for resource conflicts between ACPI OperationRegions and native 1457 * drivers */ 1458 int acpi_check_resource_conflict(const struct resource *res) 1459 { 1460 acpi_adr_space_type space_id; 1461 acpi_size length; 1462 u8 warn = 0; 1463 int clash = 0; 1464 1465 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1466 return 0; 1467 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1468 return 0; 1469 1470 if (res->flags & IORESOURCE_IO) 1471 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1472 else 1473 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1474 1475 length = resource_size(res); 1476 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1477 warn = 1; 1478 clash = acpi_check_address_range(space_id, res->start, length, warn); 1479 1480 if (clash) { 1481 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1482 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1483 printk(KERN_NOTICE "ACPI: This conflict may" 1484 " cause random problems and system" 1485 " instability\n"); 1486 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1487 " for this device, you should use it instead of" 1488 " the native driver\n"); 1489 } 1490 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1491 return -EBUSY; 1492 } 1493 return 0; 1494 } 1495 EXPORT_SYMBOL(acpi_check_resource_conflict); 1496 1497 int acpi_check_region(resource_size_t start, resource_size_t n, 1498 const char *name) 1499 { 1500 struct resource res = { 1501 .start = start, 1502 .end = start + n - 1, 1503 .name = name, 1504 .flags = IORESOURCE_IO, 1505 }; 1506 1507 return acpi_check_resource_conflict(&res); 1508 } 1509 EXPORT_SYMBOL(acpi_check_region); 1510 1511 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, 1512 void *_res, void **return_value) 1513 { 1514 struct acpi_mem_space_context **mem_ctx; 1515 union acpi_operand_object *handler_obj; 1516 union acpi_operand_object *region_obj2; 1517 union acpi_operand_object *region_obj; 1518 struct resource *res = _res; 1519 acpi_status status; 1520 1521 region_obj = acpi_ns_get_attached_object(handle); 1522 if (!region_obj) 1523 return AE_OK; 1524 1525 handler_obj = region_obj->region.handler; 1526 if (!handler_obj) 1527 return AE_OK; 1528 1529 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 1530 return AE_OK; 1531 1532 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) 1533 return AE_OK; 1534 1535 region_obj2 = acpi_ns_get_secondary_object(region_obj); 1536 if (!region_obj2) 1537 return AE_OK; 1538 1539 mem_ctx = (void *)®ion_obj2->extra.region_context; 1540 1541 if (!(mem_ctx[0]->address >= res->start && 1542 mem_ctx[0]->address < res->end)) 1543 return AE_OK; 1544 1545 status = handler_obj->address_space.setup(region_obj, 1546 ACPI_REGION_DEACTIVATE, 1547 NULL, (void **)mem_ctx); 1548 if (ACPI_SUCCESS(status)) 1549 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); 1550 1551 return status; 1552 } 1553 1554 /** 1555 * acpi_release_memory - Release any mappings done to a memory region 1556 * @handle: Handle to namespace node 1557 * @res: Memory resource 1558 * @level: A level that terminates the search 1559 * 1560 * Walks through @handle and unmaps all SystemMemory Operation Regions that 1561 * overlap with @res and that have already been activated (mapped). 1562 * 1563 * This is a helper that allows drivers to place special requirements on memory 1564 * region that may overlap with operation regions, primarily allowing them to 1565 * safely map the region as non-cached memory. 1566 * 1567 * The unmapped Operation Regions will be automatically remapped next time they 1568 * are called, so the drivers do not need to do anything else. 1569 */ 1570 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, 1571 u32 level) 1572 { 1573 acpi_status status; 1574 1575 if (!(res->flags & IORESOURCE_MEM)) 1576 return AE_TYPE; 1577 1578 status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, 1579 acpi_deactivate_mem_region, NULL, 1580 res, NULL); 1581 if (ACPI_FAILURE(status)) 1582 return status; 1583 1584 /* 1585 * Wait for all of the mappings queued up for removal by 1586 * acpi_deactivate_mem_region() to actually go away. 1587 */ 1588 synchronize_rcu(); 1589 rcu_barrier(); 1590 flush_scheduled_work(); 1591 1592 return AE_OK; 1593 } 1594 EXPORT_SYMBOL_GPL(acpi_release_memory); 1595 1596 /* 1597 * Let drivers know whether the resource checks are effective 1598 */ 1599 int acpi_resources_are_enforced(void) 1600 { 1601 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1602 } 1603 EXPORT_SYMBOL(acpi_resources_are_enforced); 1604 1605 /* 1606 * Deallocate the memory for a spinlock. 1607 */ 1608 void acpi_os_delete_lock(acpi_spinlock handle) 1609 { 1610 ACPI_FREE(handle); 1611 } 1612 1613 /* 1614 * Acquire a spinlock. 1615 * 1616 * handle is a pointer to the spinlock_t. 1617 */ 1618 1619 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1620 __acquires(lockp) 1621 { 1622 acpi_cpu_flags flags; 1623 spin_lock_irqsave(lockp, flags); 1624 return flags; 1625 } 1626 1627 /* 1628 * Release a spinlock. See above. 1629 */ 1630 1631 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1632 __releases(lockp) 1633 { 1634 spin_unlock_irqrestore(lockp, flags); 1635 } 1636 1637 #ifndef ACPI_USE_LOCAL_CACHE 1638 1639 /******************************************************************************* 1640 * 1641 * FUNCTION: acpi_os_create_cache 1642 * 1643 * PARAMETERS: name - Ascii name for the cache 1644 * size - Size of each cached object 1645 * depth - Maximum depth of the cache (in objects) <ignored> 1646 * cache - Where the new cache object is returned 1647 * 1648 * RETURN: status 1649 * 1650 * DESCRIPTION: Create a cache object 1651 * 1652 ******************************************************************************/ 1653 1654 acpi_status 1655 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1656 { 1657 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1658 if (*cache == NULL) 1659 return AE_ERROR; 1660 else 1661 return AE_OK; 1662 } 1663 1664 /******************************************************************************* 1665 * 1666 * FUNCTION: acpi_os_purge_cache 1667 * 1668 * PARAMETERS: Cache - Handle to cache object 1669 * 1670 * RETURN: Status 1671 * 1672 * DESCRIPTION: Free all objects within the requested cache. 1673 * 1674 ******************************************************************************/ 1675 1676 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1677 { 1678 kmem_cache_shrink(cache); 1679 return (AE_OK); 1680 } 1681 1682 /******************************************************************************* 1683 * 1684 * FUNCTION: acpi_os_delete_cache 1685 * 1686 * PARAMETERS: Cache - Handle to cache object 1687 * 1688 * RETURN: Status 1689 * 1690 * DESCRIPTION: Free all objects within the requested cache and delete the 1691 * cache object. 1692 * 1693 ******************************************************************************/ 1694 1695 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1696 { 1697 kmem_cache_destroy(cache); 1698 return (AE_OK); 1699 } 1700 1701 /******************************************************************************* 1702 * 1703 * FUNCTION: acpi_os_release_object 1704 * 1705 * PARAMETERS: Cache - Handle to cache object 1706 * Object - The object to be released 1707 * 1708 * RETURN: None 1709 * 1710 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1711 * the object is deleted. 1712 * 1713 ******************************************************************************/ 1714 1715 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1716 { 1717 kmem_cache_free(cache, object); 1718 return (AE_OK); 1719 } 1720 #endif 1721 1722 static int __init acpi_no_static_ssdt_setup(char *s) 1723 { 1724 acpi_gbl_disable_ssdt_table_install = TRUE; 1725 pr_info("ACPI: static SSDT installation disabled\n"); 1726 1727 return 0; 1728 } 1729 1730 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1731 1732 static int __init acpi_disable_return_repair(char *s) 1733 { 1734 printk(KERN_NOTICE PREFIX 1735 "ACPI: Predefined validation mechanism disabled\n"); 1736 acpi_gbl_disable_auto_repair = TRUE; 1737 1738 return 1; 1739 } 1740 1741 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1742 1743 acpi_status __init acpi_os_initialize(void) 1744 { 1745 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1746 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1747 1748 acpi_gbl_xgpe0_block_logical_address = 1749 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1750 acpi_gbl_xgpe1_block_logical_address = 1751 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1752 1753 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1754 /* 1755 * Use acpi_os_map_generic_address to pre-map the reset 1756 * register if it's in system memory. 1757 */ 1758 void *rv; 1759 1760 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1761 pr_debug(PREFIX "%s: map reset_reg %s\n", __func__, 1762 rv ? "successful" : "failed"); 1763 } 1764 acpi_os_initialized = true; 1765 1766 return AE_OK; 1767 } 1768 1769 acpi_status __init acpi_os_initialize1(void) 1770 { 1771 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1772 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1773 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1774 BUG_ON(!kacpid_wq); 1775 BUG_ON(!kacpi_notify_wq); 1776 BUG_ON(!kacpi_hotplug_wq); 1777 acpi_osi_init(); 1778 return AE_OK; 1779 } 1780 1781 acpi_status acpi_os_terminate(void) 1782 { 1783 if (acpi_irq_handler) { 1784 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1785 acpi_irq_handler); 1786 } 1787 1788 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1789 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1790 acpi_gbl_xgpe0_block_logical_address = 0UL; 1791 acpi_gbl_xgpe1_block_logical_address = 0UL; 1792 1793 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1794 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1795 1796 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1797 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1798 1799 destroy_workqueue(kacpid_wq); 1800 destroy_workqueue(kacpi_notify_wq); 1801 destroy_workqueue(kacpi_hotplug_wq); 1802 1803 return AE_OK; 1804 } 1805 1806 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1807 u32 pm1b_control) 1808 { 1809 int rc = 0; 1810 if (__acpi_os_prepare_sleep) 1811 rc = __acpi_os_prepare_sleep(sleep_state, 1812 pm1a_control, pm1b_control); 1813 if (rc < 0) 1814 return AE_ERROR; 1815 else if (rc > 0) 1816 return AE_CTRL_TERMINATE; 1817 1818 return AE_OK; 1819 } 1820 1821 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1822 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1823 { 1824 __acpi_os_prepare_sleep = func; 1825 } 1826 1827 #if (ACPI_REDUCED_HARDWARE) 1828 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1829 u32 val_b) 1830 { 1831 int rc = 0; 1832 if (__acpi_os_prepare_extended_sleep) 1833 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1834 val_a, val_b); 1835 if (rc < 0) 1836 return AE_ERROR; 1837 else if (rc > 0) 1838 return AE_CTRL_TERMINATE; 1839 1840 return AE_OK; 1841 } 1842 #else 1843 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1844 u32 val_b) 1845 { 1846 return AE_OK; 1847 } 1848 #endif 1849 1850 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1851 u32 val_a, u32 val_b)) 1852 { 1853 __acpi_os_prepare_extended_sleep = func; 1854 } 1855 1856 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1857 u32 reg_a_value, u32 reg_b_value) 1858 { 1859 acpi_status status; 1860 1861 if (acpi_gbl_reduced_hardware) 1862 status = acpi_os_prepare_extended_sleep(sleep_state, 1863 reg_a_value, 1864 reg_b_value); 1865 else 1866 status = acpi_os_prepare_sleep(sleep_state, 1867 reg_a_value, reg_b_value); 1868 return status; 1869 } 1870