1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 4 * 5 * Copyright (C) 2000 Andrew Henroid 6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * Copyright (c) 2008 Intel Corporation 9 * Author: Matthew Wilcox <willy@linux.intel.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/mm.h> 16 #include <linux/highmem.h> 17 #include <linux/lockdep.h> 18 #include <linux/pci.h> 19 #include <linux/interrupt.h> 20 #include <linux/kmod.h> 21 #include <linux/delay.h> 22 #include <linux/workqueue.h> 23 #include <linux/nmi.h> 24 #include <linux/acpi.h> 25 #include <linux/efi.h> 26 #include <linux/ioport.h> 27 #include <linux/list.h> 28 #include <linux/jiffies.h> 29 #include <linux/semaphore.h> 30 #include <linux/security.h> 31 32 #include <asm/io.h> 33 #include <linux/uaccess.h> 34 #include <linux/io-64-nonatomic-lo-hi.h> 35 36 #include "acpica/accommon.h" 37 #include "acpica/acnamesp.h" 38 #include "internal.h" 39 40 #define _COMPONENT ACPI_OS_SERVICES 41 ACPI_MODULE_NAME("osl"); 42 43 struct acpi_os_dpc { 44 acpi_osd_exec_callback function; 45 void *context; 46 struct work_struct work; 47 }; 48 49 #ifdef ENABLE_DEBUGGER 50 #include <linux/kdb.h> 51 52 /* stuff for debugger support */ 53 int acpi_in_debugger; 54 EXPORT_SYMBOL(acpi_in_debugger); 55 #endif /*ENABLE_DEBUGGER */ 56 57 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 58 u32 pm1b_ctrl); 59 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 60 u32 val_b); 61 62 static acpi_osd_handler acpi_irq_handler; 63 static void *acpi_irq_context; 64 static struct workqueue_struct *kacpid_wq; 65 static struct workqueue_struct *kacpi_notify_wq; 66 static struct workqueue_struct *kacpi_hotplug_wq; 67 static bool acpi_os_initialized; 68 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 69 bool acpi_permanent_mmap = false; 70 71 /* 72 * This list of permanent mappings is for memory that may be accessed from 73 * interrupt context, where we can't do the ioremap(). 74 */ 75 struct acpi_ioremap { 76 struct list_head list; 77 void __iomem *virt; 78 acpi_physical_address phys; 79 acpi_size size; 80 union { 81 unsigned long refcount; 82 struct rcu_work rwork; 83 } track; 84 }; 85 86 static LIST_HEAD(acpi_ioremaps); 87 static DEFINE_MUTEX(acpi_ioremap_lock); 88 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) 89 90 static void __init acpi_request_region (struct acpi_generic_address *gas, 91 unsigned int length, char *desc) 92 { 93 u64 addr; 94 95 /* Handle possible alignment issues */ 96 memcpy(&addr, &gas->address, sizeof(addr)); 97 if (!addr || !length) 98 return; 99 100 /* Resources are never freed */ 101 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 102 request_region(addr, length, desc); 103 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 104 request_mem_region(addr, length, desc); 105 } 106 107 static int __init acpi_reserve_resources(void) 108 { 109 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 110 "ACPI PM1a_EVT_BLK"); 111 112 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 113 "ACPI PM1b_EVT_BLK"); 114 115 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 116 "ACPI PM1a_CNT_BLK"); 117 118 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 119 "ACPI PM1b_CNT_BLK"); 120 121 if (acpi_gbl_FADT.pm_timer_length == 4) 122 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 123 124 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 125 "ACPI PM2_CNT_BLK"); 126 127 /* Length of GPE blocks must be a non-negative multiple of 2 */ 128 129 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 130 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 131 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 132 133 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 134 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 135 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 136 137 return 0; 138 } 139 fs_initcall_sync(acpi_reserve_resources); 140 141 void acpi_os_printf(const char *fmt, ...) 142 { 143 va_list args; 144 va_start(args, fmt); 145 acpi_os_vprintf(fmt, args); 146 va_end(args); 147 } 148 EXPORT_SYMBOL(acpi_os_printf); 149 150 void acpi_os_vprintf(const char *fmt, va_list args) 151 { 152 static char buffer[512]; 153 154 vsprintf(buffer, fmt, args); 155 156 #ifdef ENABLE_DEBUGGER 157 if (acpi_in_debugger) { 158 kdb_printf("%s", buffer); 159 } else { 160 if (printk_get_level(buffer)) 161 printk("%s", buffer); 162 else 163 printk(KERN_CONT "%s", buffer); 164 } 165 #else 166 if (acpi_debugger_write_log(buffer) < 0) { 167 if (printk_get_level(buffer)) 168 printk("%s", buffer); 169 else 170 printk(KERN_CONT "%s", buffer); 171 } 172 #endif 173 } 174 175 #ifdef CONFIG_KEXEC 176 static unsigned long acpi_rsdp; 177 static int __init setup_acpi_rsdp(char *arg) 178 { 179 return kstrtoul(arg, 16, &acpi_rsdp); 180 } 181 early_param("acpi_rsdp", setup_acpi_rsdp); 182 #endif 183 184 acpi_physical_address __init acpi_os_get_root_pointer(void) 185 { 186 acpi_physical_address pa; 187 188 #ifdef CONFIG_KEXEC 189 /* 190 * We may have been provided with an RSDP on the command line, 191 * but if a malicious user has done so they may be pointing us 192 * at modified ACPI tables that could alter kernel behaviour - 193 * so, we check the lockdown status before making use of 194 * it. If we trust it then also stash it in an architecture 195 * specific location (if appropriate) so it can be carried 196 * over further kexec()s. 197 */ 198 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) { 199 acpi_arch_set_root_pointer(acpi_rsdp); 200 return acpi_rsdp; 201 } 202 #endif 203 pa = acpi_arch_get_root_pointer(); 204 if (pa) 205 return pa; 206 207 if (efi_enabled(EFI_CONFIG_TABLES)) { 208 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 209 return efi.acpi20; 210 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 211 return efi.acpi; 212 pr_err(PREFIX "System description tables not found\n"); 213 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 214 acpi_find_root_pointer(&pa); 215 } 216 217 return pa; 218 } 219 220 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 221 static struct acpi_ioremap * 222 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 223 { 224 struct acpi_ioremap *map; 225 226 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 227 if (map->phys <= phys && 228 phys + size <= map->phys + map->size) 229 return map; 230 231 return NULL; 232 } 233 234 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 235 static void __iomem * 236 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 237 { 238 struct acpi_ioremap *map; 239 240 map = acpi_map_lookup(phys, size); 241 if (map) 242 return map->virt + (phys - map->phys); 243 244 return NULL; 245 } 246 247 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 248 { 249 struct acpi_ioremap *map; 250 void __iomem *virt = NULL; 251 252 mutex_lock(&acpi_ioremap_lock); 253 map = acpi_map_lookup(phys, size); 254 if (map) { 255 virt = map->virt + (phys - map->phys); 256 map->track.refcount++; 257 } 258 mutex_unlock(&acpi_ioremap_lock); 259 return virt; 260 } 261 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 262 263 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 264 static struct acpi_ioremap * 265 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 266 { 267 struct acpi_ioremap *map; 268 269 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 270 if (map->virt <= virt && 271 virt + size <= map->virt + map->size) 272 return map; 273 274 return NULL; 275 } 276 277 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 278 /* ioremap will take care of cache attributes */ 279 #define should_use_kmap(pfn) 0 280 #else 281 #define should_use_kmap(pfn) page_is_ram(pfn) 282 #endif 283 284 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 285 { 286 unsigned long pfn; 287 288 pfn = pg_off >> PAGE_SHIFT; 289 if (should_use_kmap(pfn)) { 290 if (pg_sz > PAGE_SIZE) 291 return NULL; 292 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 293 } else 294 return acpi_os_ioremap(pg_off, pg_sz); 295 } 296 297 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 298 { 299 unsigned long pfn; 300 301 pfn = pg_off >> PAGE_SHIFT; 302 if (should_use_kmap(pfn)) 303 kunmap(pfn_to_page(pfn)); 304 else 305 iounmap(vaddr); 306 } 307 308 /** 309 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 310 * @phys: Start of the physical address range to map. 311 * @size: Size of the physical address range to map. 312 * 313 * Look up the given physical address range in the list of existing ACPI memory 314 * mappings. If found, get a reference to it and return a pointer to it (its 315 * virtual address). If not found, map it, add it to that list and return a 316 * pointer to it. 317 * 318 * During early init (when acpi_permanent_mmap has not been set yet) this 319 * routine simply calls __acpi_map_table() to get the job done. 320 */ 321 void __iomem __ref 322 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 323 { 324 struct acpi_ioremap *map; 325 void __iomem *virt; 326 acpi_physical_address pg_off; 327 acpi_size pg_sz; 328 329 if (phys > ULONG_MAX) { 330 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 331 return NULL; 332 } 333 334 if (!acpi_permanent_mmap) 335 return __acpi_map_table((unsigned long)phys, size); 336 337 mutex_lock(&acpi_ioremap_lock); 338 /* Check if there's a suitable mapping already. */ 339 map = acpi_map_lookup(phys, size); 340 if (map) { 341 map->track.refcount++; 342 goto out; 343 } 344 345 map = kzalloc(sizeof(*map), GFP_KERNEL); 346 if (!map) { 347 mutex_unlock(&acpi_ioremap_lock); 348 return NULL; 349 } 350 351 pg_off = round_down(phys, PAGE_SIZE); 352 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 353 virt = acpi_map(pg_off, pg_sz); 354 if (!virt) { 355 mutex_unlock(&acpi_ioremap_lock); 356 kfree(map); 357 return NULL; 358 } 359 360 INIT_LIST_HEAD(&map->list); 361 map->virt = virt; 362 map->phys = pg_off; 363 map->size = pg_sz; 364 map->track.refcount = 1; 365 366 list_add_tail_rcu(&map->list, &acpi_ioremaps); 367 368 out: 369 mutex_unlock(&acpi_ioremap_lock); 370 return map->virt + (phys - map->phys); 371 } 372 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 373 374 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 375 { 376 return (void *)acpi_os_map_iomem(phys, size); 377 } 378 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 379 380 static void acpi_os_map_remove(struct work_struct *work) 381 { 382 struct acpi_ioremap *map = container_of(to_rcu_work(work), 383 struct acpi_ioremap, 384 track.rwork); 385 386 acpi_unmap(map->phys, map->virt); 387 kfree(map); 388 } 389 390 /* Must be called with mutex_lock(&acpi_ioremap_lock) */ 391 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 392 { 393 if (--map->track.refcount) 394 return; 395 396 list_del_rcu(&map->list); 397 398 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); 399 queue_rcu_work(system_wq, &map->track.rwork); 400 } 401 402 /** 403 * acpi_os_unmap_iomem - Drop a memory mapping reference. 404 * @virt: Start of the address range to drop a reference to. 405 * @size: Size of the address range to drop a reference to. 406 * 407 * Look up the given virtual address range in the list of existing ACPI memory 408 * mappings, drop a reference to it and if there are no more active references 409 * to it, queue it up for later removal. 410 * 411 * During early init (when acpi_permanent_mmap has not been set yet) this 412 * routine simply calls __acpi_unmap_table() to get the job done. Since 413 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 414 * here. 415 */ 416 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 417 { 418 struct acpi_ioremap *map; 419 420 if (!acpi_permanent_mmap) { 421 __acpi_unmap_table(virt, size); 422 return; 423 } 424 425 mutex_lock(&acpi_ioremap_lock); 426 427 map = acpi_map_lookup_virt(virt, size); 428 if (!map) { 429 mutex_unlock(&acpi_ioremap_lock); 430 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 431 return; 432 } 433 acpi_os_drop_map_ref(map); 434 435 mutex_unlock(&acpi_ioremap_lock); 436 } 437 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 438 439 /** 440 * acpi_os_unmap_memory - Drop a memory mapping reference. 441 * @virt: Start of the address range to drop a reference to. 442 * @size: Size of the address range to drop a reference to. 443 */ 444 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 445 { 446 acpi_os_unmap_iomem((void __iomem *)virt, size); 447 } 448 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 449 450 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 451 { 452 u64 addr; 453 void __iomem *virt; 454 455 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 456 return 0; 457 458 /* Handle possible alignment issues */ 459 memcpy(&addr, &gas->address, sizeof(addr)); 460 if (!addr || !gas->bit_width) 461 return -EINVAL; 462 463 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 464 if (!virt) 465 return -EIO; 466 467 return 0; 468 } 469 EXPORT_SYMBOL(acpi_os_map_generic_address); 470 471 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 472 { 473 u64 addr; 474 struct acpi_ioremap *map; 475 476 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 477 return; 478 479 /* Handle possible alignment issues */ 480 memcpy(&addr, &gas->address, sizeof(addr)); 481 if (!addr || !gas->bit_width) 482 return; 483 484 mutex_lock(&acpi_ioremap_lock); 485 486 map = acpi_map_lookup(addr, gas->bit_width / 8); 487 if (!map) { 488 mutex_unlock(&acpi_ioremap_lock); 489 return; 490 } 491 acpi_os_drop_map_ref(map); 492 493 mutex_unlock(&acpi_ioremap_lock); 494 } 495 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 496 497 #ifdef ACPI_FUTURE_USAGE 498 acpi_status 499 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 500 { 501 if (!phys || !virt) 502 return AE_BAD_PARAMETER; 503 504 *phys = virt_to_phys(virt); 505 506 return AE_OK; 507 } 508 #endif 509 510 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 511 static bool acpi_rev_override; 512 513 int __init acpi_rev_override_setup(char *str) 514 { 515 acpi_rev_override = true; 516 return 1; 517 } 518 __setup("acpi_rev_override", acpi_rev_override_setup); 519 #else 520 #define acpi_rev_override false 521 #endif 522 523 #define ACPI_MAX_OVERRIDE_LEN 100 524 525 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 526 527 acpi_status 528 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 529 acpi_string *new_val) 530 { 531 if (!init_val || !new_val) 532 return AE_BAD_PARAMETER; 533 534 *new_val = NULL; 535 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 536 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 537 acpi_os_name); 538 *new_val = acpi_os_name; 539 } 540 541 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 542 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 543 *new_val = (char *)5; 544 } 545 546 return AE_OK; 547 } 548 549 static irqreturn_t acpi_irq(int irq, void *dev_id) 550 { 551 u32 handled; 552 553 handled = (*acpi_irq_handler) (acpi_irq_context); 554 555 if (handled) { 556 acpi_irq_handled++; 557 return IRQ_HANDLED; 558 } else { 559 acpi_irq_not_handled++; 560 return IRQ_NONE; 561 } 562 } 563 564 acpi_status 565 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 566 void *context) 567 { 568 unsigned int irq; 569 570 acpi_irq_stats_init(); 571 572 /* 573 * ACPI interrupts different from the SCI in our copy of the FADT are 574 * not supported. 575 */ 576 if (gsi != acpi_gbl_FADT.sci_interrupt) 577 return AE_BAD_PARAMETER; 578 579 if (acpi_irq_handler) 580 return AE_ALREADY_ACQUIRED; 581 582 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 583 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 584 gsi); 585 return AE_OK; 586 } 587 588 acpi_irq_handler = handler; 589 acpi_irq_context = context; 590 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 591 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 592 acpi_irq_handler = NULL; 593 return AE_NOT_ACQUIRED; 594 } 595 acpi_sci_irq = irq; 596 597 return AE_OK; 598 } 599 600 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 601 { 602 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 603 return AE_BAD_PARAMETER; 604 605 free_irq(acpi_sci_irq, acpi_irq); 606 acpi_irq_handler = NULL; 607 acpi_sci_irq = INVALID_ACPI_IRQ; 608 609 return AE_OK; 610 } 611 612 /* 613 * Running in interpreter thread context, safe to sleep 614 */ 615 616 void acpi_os_sleep(u64 ms) 617 { 618 msleep(ms); 619 } 620 621 void acpi_os_stall(u32 us) 622 { 623 while (us) { 624 u32 delay = 1000; 625 626 if (delay > us) 627 delay = us; 628 udelay(delay); 629 touch_nmi_watchdog(); 630 us -= delay; 631 } 632 } 633 634 /* 635 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 636 * monotonically increasing timer with 100ns granularity. Do not use 637 * ktime_get() to implement this function because this function may get 638 * called after timekeeping has been suspended. Note: calling this function 639 * after timekeeping has been suspended may lead to unexpected results 640 * because when timekeeping is suspended the jiffies counter is not 641 * incremented. See also timekeeping_suspend(). 642 */ 643 u64 acpi_os_get_timer(void) 644 { 645 return (get_jiffies_64() - INITIAL_JIFFIES) * 646 (ACPI_100NSEC_PER_SEC / HZ); 647 } 648 649 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 650 { 651 u32 dummy; 652 653 if (!value) 654 value = &dummy; 655 656 *value = 0; 657 if (width <= 8) { 658 *(u8 *) value = inb(port); 659 } else if (width <= 16) { 660 *(u16 *) value = inw(port); 661 } else if (width <= 32) { 662 *(u32 *) value = inl(port); 663 } else { 664 BUG(); 665 } 666 667 return AE_OK; 668 } 669 670 EXPORT_SYMBOL(acpi_os_read_port); 671 672 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 673 { 674 if (width <= 8) { 675 outb(value, port); 676 } else if (width <= 16) { 677 outw(value, port); 678 } else if (width <= 32) { 679 outl(value, port); 680 } else { 681 BUG(); 682 } 683 684 return AE_OK; 685 } 686 687 EXPORT_SYMBOL(acpi_os_write_port); 688 689 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 690 { 691 692 switch (width) { 693 case 8: 694 *(u8 *) value = readb(virt_addr); 695 break; 696 case 16: 697 *(u16 *) value = readw(virt_addr); 698 break; 699 case 32: 700 *(u32 *) value = readl(virt_addr); 701 break; 702 case 64: 703 *(u64 *) value = readq(virt_addr); 704 break; 705 default: 706 return -EINVAL; 707 } 708 709 return 0; 710 } 711 712 acpi_status 713 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 714 { 715 void __iomem *virt_addr; 716 unsigned int size = width / 8; 717 bool unmap = false; 718 u64 dummy; 719 int error; 720 721 rcu_read_lock(); 722 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 723 if (!virt_addr) { 724 rcu_read_unlock(); 725 virt_addr = acpi_os_ioremap(phys_addr, size); 726 if (!virt_addr) 727 return AE_BAD_ADDRESS; 728 unmap = true; 729 } 730 731 if (!value) 732 value = &dummy; 733 734 error = acpi_os_read_iomem(virt_addr, value, width); 735 BUG_ON(error); 736 737 if (unmap) 738 iounmap(virt_addr); 739 else 740 rcu_read_unlock(); 741 742 return AE_OK; 743 } 744 745 acpi_status 746 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 747 { 748 void __iomem *virt_addr; 749 unsigned int size = width / 8; 750 bool unmap = false; 751 752 rcu_read_lock(); 753 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 754 if (!virt_addr) { 755 rcu_read_unlock(); 756 virt_addr = acpi_os_ioremap(phys_addr, size); 757 if (!virt_addr) 758 return AE_BAD_ADDRESS; 759 unmap = true; 760 } 761 762 switch (width) { 763 case 8: 764 writeb(value, virt_addr); 765 break; 766 case 16: 767 writew(value, virt_addr); 768 break; 769 case 32: 770 writel(value, virt_addr); 771 break; 772 case 64: 773 writeq(value, virt_addr); 774 break; 775 default: 776 BUG(); 777 } 778 779 if (unmap) 780 iounmap(virt_addr); 781 else 782 rcu_read_unlock(); 783 784 return AE_OK; 785 } 786 787 #ifdef CONFIG_PCI 788 acpi_status 789 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 790 u64 *value, u32 width) 791 { 792 int result, size; 793 u32 value32; 794 795 if (!value) 796 return AE_BAD_PARAMETER; 797 798 switch (width) { 799 case 8: 800 size = 1; 801 break; 802 case 16: 803 size = 2; 804 break; 805 case 32: 806 size = 4; 807 break; 808 default: 809 return AE_ERROR; 810 } 811 812 result = raw_pci_read(pci_id->segment, pci_id->bus, 813 PCI_DEVFN(pci_id->device, pci_id->function), 814 reg, size, &value32); 815 *value = value32; 816 817 return (result ? AE_ERROR : AE_OK); 818 } 819 820 acpi_status 821 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 822 u64 value, u32 width) 823 { 824 int result, size; 825 826 switch (width) { 827 case 8: 828 size = 1; 829 break; 830 case 16: 831 size = 2; 832 break; 833 case 32: 834 size = 4; 835 break; 836 default: 837 return AE_ERROR; 838 } 839 840 result = raw_pci_write(pci_id->segment, pci_id->bus, 841 PCI_DEVFN(pci_id->device, pci_id->function), 842 reg, size, value); 843 844 return (result ? AE_ERROR : AE_OK); 845 } 846 #endif 847 848 static void acpi_os_execute_deferred(struct work_struct *work) 849 { 850 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 851 852 dpc->function(dpc->context); 853 kfree(dpc); 854 } 855 856 #ifdef CONFIG_ACPI_DEBUGGER 857 static struct acpi_debugger acpi_debugger; 858 static bool acpi_debugger_initialized; 859 860 int acpi_register_debugger(struct module *owner, 861 const struct acpi_debugger_ops *ops) 862 { 863 int ret = 0; 864 865 mutex_lock(&acpi_debugger.lock); 866 if (acpi_debugger.ops) { 867 ret = -EBUSY; 868 goto err_lock; 869 } 870 871 acpi_debugger.owner = owner; 872 acpi_debugger.ops = ops; 873 874 err_lock: 875 mutex_unlock(&acpi_debugger.lock); 876 return ret; 877 } 878 EXPORT_SYMBOL(acpi_register_debugger); 879 880 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 881 { 882 mutex_lock(&acpi_debugger.lock); 883 if (ops == acpi_debugger.ops) { 884 acpi_debugger.ops = NULL; 885 acpi_debugger.owner = NULL; 886 } 887 mutex_unlock(&acpi_debugger.lock); 888 } 889 EXPORT_SYMBOL(acpi_unregister_debugger); 890 891 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 892 { 893 int ret; 894 int (*func)(acpi_osd_exec_callback, void *); 895 struct module *owner; 896 897 if (!acpi_debugger_initialized) 898 return -ENODEV; 899 mutex_lock(&acpi_debugger.lock); 900 if (!acpi_debugger.ops) { 901 ret = -ENODEV; 902 goto err_lock; 903 } 904 if (!try_module_get(acpi_debugger.owner)) { 905 ret = -ENODEV; 906 goto err_lock; 907 } 908 func = acpi_debugger.ops->create_thread; 909 owner = acpi_debugger.owner; 910 mutex_unlock(&acpi_debugger.lock); 911 912 ret = func(function, context); 913 914 mutex_lock(&acpi_debugger.lock); 915 module_put(owner); 916 err_lock: 917 mutex_unlock(&acpi_debugger.lock); 918 return ret; 919 } 920 921 ssize_t acpi_debugger_write_log(const char *msg) 922 { 923 ssize_t ret; 924 ssize_t (*func)(const char *); 925 struct module *owner; 926 927 if (!acpi_debugger_initialized) 928 return -ENODEV; 929 mutex_lock(&acpi_debugger.lock); 930 if (!acpi_debugger.ops) { 931 ret = -ENODEV; 932 goto err_lock; 933 } 934 if (!try_module_get(acpi_debugger.owner)) { 935 ret = -ENODEV; 936 goto err_lock; 937 } 938 func = acpi_debugger.ops->write_log; 939 owner = acpi_debugger.owner; 940 mutex_unlock(&acpi_debugger.lock); 941 942 ret = func(msg); 943 944 mutex_lock(&acpi_debugger.lock); 945 module_put(owner); 946 err_lock: 947 mutex_unlock(&acpi_debugger.lock); 948 return ret; 949 } 950 951 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 952 { 953 ssize_t ret; 954 ssize_t (*func)(char *, size_t); 955 struct module *owner; 956 957 if (!acpi_debugger_initialized) 958 return -ENODEV; 959 mutex_lock(&acpi_debugger.lock); 960 if (!acpi_debugger.ops) { 961 ret = -ENODEV; 962 goto err_lock; 963 } 964 if (!try_module_get(acpi_debugger.owner)) { 965 ret = -ENODEV; 966 goto err_lock; 967 } 968 func = acpi_debugger.ops->read_cmd; 969 owner = acpi_debugger.owner; 970 mutex_unlock(&acpi_debugger.lock); 971 972 ret = func(buffer, buffer_length); 973 974 mutex_lock(&acpi_debugger.lock); 975 module_put(owner); 976 err_lock: 977 mutex_unlock(&acpi_debugger.lock); 978 return ret; 979 } 980 981 int acpi_debugger_wait_command_ready(void) 982 { 983 int ret; 984 int (*func)(bool, char *, size_t); 985 struct module *owner; 986 987 if (!acpi_debugger_initialized) 988 return -ENODEV; 989 mutex_lock(&acpi_debugger.lock); 990 if (!acpi_debugger.ops) { 991 ret = -ENODEV; 992 goto err_lock; 993 } 994 if (!try_module_get(acpi_debugger.owner)) { 995 ret = -ENODEV; 996 goto err_lock; 997 } 998 func = acpi_debugger.ops->wait_command_ready; 999 owner = acpi_debugger.owner; 1000 mutex_unlock(&acpi_debugger.lock); 1001 1002 ret = func(acpi_gbl_method_executing, 1003 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 1004 1005 mutex_lock(&acpi_debugger.lock); 1006 module_put(owner); 1007 err_lock: 1008 mutex_unlock(&acpi_debugger.lock); 1009 return ret; 1010 } 1011 1012 int acpi_debugger_notify_command_complete(void) 1013 { 1014 int ret; 1015 int (*func)(void); 1016 struct module *owner; 1017 1018 if (!acpi_debugger_initialized) 1019 return -ENODEV; 1020 mutex_lock(&acpi_debugger.lock); 1021 if (!acpi_debugger.ops) { 1022 ret = -ENODEV; 1023 goto err_lock; 1024 } 1025 if (!try_module_get(acpi_debugger.owner)) { 1026 ret = -ENODEV; 1027 goto err_lock; 1028 } 1029 func = acpi_debugger.ops->notify_command_complete; 1030 owner = acpi_debugger.owner; 1031 mutex_unlock(&acpi_debugger.lock); 1032 1033 ret = func(); 1034 1035 mutex_lock(&acpi_debugger.lock); 1036 module_put(owner); 1037 err_lock: 1038 mutex_unlock(&acpi_debugger.lock); 1039 return ret; 1040 } 1041 1042 int __init acpi_debugger_init(void) 1043 { 1044 mutex_init(&acpi_debugger.lock); 1045 acpi_debugger_initialized = true; 1046 return 0; 1047 } 1048 #endif 1049 1050 /******************************************************************************* 1051 * 1052 * FUNCTION: acpi_os_execute 1053 * 1054 * PARAMETERS: Type - Type of the callback 1055 * Function - Function to be executed 1056 * Context - Function parameters 1057 * 1058 * RETURN: Status 1059 * 1060 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1061 * immediately executes function on a separate thread. 1062 * 1063 ******************************************************************************/ 1064 1065 acpi_status acpi_os_execute(acpi_execute_type type, 1066 acpi_osd_exec_callback function, void *context) 1067 { 1068 acpi_status status = AE_OK; 1069 struct acpi_os_dpc *dpc; 1070 struct workqueue_struct *queue; 1071 int ret; 1072 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1073 "Scheduling function [%p(%p)] for deferred execution.\n", 1074 function, context)); 1075 1076 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1077 ret = acpi_debugger_create_thread(function, context); 1078 if (ret) { 1079 pr_err("Call to kthread_create() failed.\n"); 1080 status = AE_ERROR; 1081 } 1082 goto out_thread; 1083 } 1084 1085 /* 1086 * Allocate/initialize DPC structure. Note that this memory will be 1087 * freed by the callee. The kernel handles the work_struct list in a 1088 * way that allows us to also free its memory inside the callee. 1089 * Because we may want to schedule several tasks with different 1090 * parameters we can't use the approach some kernel code uses of 1091 * having a static work_struct. 1092 */ 1093 1094 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1095 if (!dpc) 1096 return AE_NO_MEMORY; 1097 1098 dpc->function = function; 1099 dpc->context = context; 1100 1101 /* 1102 * To prevent lockdep from complaining unnecessarily, make sure that 1103 * there is a different static lockdep key for each workqueue by using 1104 * INIT_WORK() for each of them separately. 1105 */ 1106 if (type == OSL_NOTIFY_HANDLER) { 1107 queue = kacpi_notify_wq; 1108 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1109 } else if (type == OSL_GPE_HANDLER) { 1110 queue = kacpid_wq; 1111 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1112 } else { 1113 pr_err("Unsupported os_execute type %d.\n", type); 1114 status = AE_ERROR; 1115 } 1116 1117 if (ACPI_FAILURE(status)) 1118 goto err_workqueue; 1119 1120 /* 1121 * On some machines, a software-initiated SMI causes corruption unless 1122 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1123 * typically it's done in GPE-related methods that are run via 1124 * workqueues, so we can avoid the known corruption cases by always 1125 * queueing on CPU 0. 1126 */ 1127 ret = queue_work_on(0, queue, &dpc->work); 1128 if (!ret) { 1129 printk(KERN_ERR PREFIX 1130 "Call to queue_work() failed.\n"); 1131 status = AE_ERROR; 1132 } 1133 err_workqueue: 1134 if (ACPI_FAILURE(status)) 1135 kfree(dpc); 1136 out_thread: 1137 return status; 1138 } 1139 EXPORT_SYMBOL(acpi_os_execute); 1140 1141 void acpi_os_wait_events_complete(void) 1142 { 1143 /* 1144 * Make sure the GPE handler or the fixed event handler is not used 1145 * on another CPU after removal. 1146 */ 1147 if (acpi_sci_irq_valid()) 1148 synchronize_hardirq(acpi_sci_irq); 1149 flush_workqueue(kacpid_wq); 1150 flush_workqueue(kacpi_notify_wq); 1151 } 1152 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1153 1154 struct acpi_hp_work { 1155 struct work_struct work; 1156 struct acpi_device *adev; 1157 u32 src; 1158 }; 1159 1160 static void acpi_hotplug_work_fn(struct work_struct *work) 1161 { 1162 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1163 1164 acpi_os_wait_events_complete(); 1165 acpi_device_hotplug(hpw->adev, hpw->src); 1166 kfree(hpw); 1167 } 1168 1169 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1170 { 1171 struct acpi_hp_work *hpw; 1172 1173 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1174 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1175 adev, src)); 1176 1177 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1178 if (!hpw) 1179 return AE_NO_MEMORY; 1180 1181 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1182 hpw->adev = adev; 1183 hpw->src = src; 1184 /* 1185 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1186 * the hotplug code may call driver .remove() functions, which may 1187 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1188 * these workqueues. 1189 */ 1190 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1191 kfree(hpw); 1192 return AE_ERROR; 1193 } 1194 return AE_OK; 1195 } 1196 1197 bool acpi_queue_hotplug_work(struct work_struct *work) 1198 { 1199 return queue_work(kacpi_hotplug_wq, work); 1200 } 1201 1202 acpi_status 1203 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1204 { 1205 struct semaphore *sem = NULL; 1206 1207 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1208 if (!sem) 1209 return AE_NO_MEMORY; 1210 1211 sema_init(sem, initial_units); 1212 1213 *handle = (acpi_handle *) sem; 1214 1215 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1216 *handle, initial_units)); 1217 1218 return AE_OK; 1219 } 1220 1221 /* 1222 * TODO: A better way to delete semaphores? Linux doesn't have a 1223 * 'delete_semaphore()' function -- may result in an invalid 1224 * pointer dereference for non-synchronized consumers. Should 1225 * we at least check for blocked threads and signal/cancel them? 1226 */ 1227 1228 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1229 { 1230 struct semaphore *sem = (struct semaphore *)handle; 1231 1232 if (!sem) 1233 return AE_BAD_PARAMETER; 1234 1235 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1236 1237 BUG_ON(!list_empty(&sem->wait_list)); 1238 kfree(sem); 1239 sem = NULL; 1240 1241 return AE_OK; 1242 } 1243 1244 /* 1245 * TODO: Support for units > 1? 1246 */ 1247 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1248 { 1249 acpi_status status = AE_OK; 1250 struct semaphore *sem = (struct semaphore *)handle; 1251 long jiffies; 1252 int ret = 0; 1253 1254 if (!acpi_os_initialized) 1255 return AE_OK; 1256 1257 if (!sem || (units < 1)) 1258 return AE_BAD_PARAMETER; 1259 1260 if (units > 1) 1261 return AE_SUPPORT; 1262 1263 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1264 handle, units, timeout)); 1265 1266 if (timeout == ACPI_WAIT_FOREVER) 1267 jiffies = MAX_SCHEDULE_TIMEOUT; 1268 else 1269 jiffies = msecs_to_jiffies(timeout); 1270 1271 ret = down_timeout(sem, jiffies); 1272 if (ret) 1273 status = AE_TIME; 1274 1275 if (ACPI_FAILURE(status)) { 1276 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1277 "Failed to acquire semaphore[%p|%d|%d], %s", 1278 handle, units, timeout, 1279 acpi_format_exception(status))); 1280 } else { 1281 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1282 "Acquired semaphore[%p|%d|%d]", handle, 1283 units, timeout)); 1284 } 1285 1286 return status; 1287 } 1288 1289 /* 1290 * TODO: Support for units > 1? 1291 */ 1292 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1293 { 1294 struct semaphore *sem = (struct semaphore *)handle; 1295 1296 if (!acpi_os_initialized) 1297 return AE_OK; 1298 1299 if (!sem || (units < 1)) 1300 return AE_BAD_PARAMETER; 1301 1302 if (units > 1) 1303 return AE_SUPPORT; 1304 1305 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1306 units)); 1307 1308 up(sem); 1309 1310 return AE_OK; 1311 } 1312 1313 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1314 { 1315 #ifdef ENABLE_DEBUGGER 1316 if (acpi_in_debugger) { 1317 u32 chars; 1318 1319 kdb_read(buffer, buffer_length); 1320 1321 /* remove the CR kdb includes */ 1322 chars = strlen(buffer) - 1; 1323 buffer[chars] = '\0'; 1324 } 1325 #else 1326 int ret; 1327 1328 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1329 if (ret < 0) 1330 return AE_ERROR; 1331 if (bytes_read) 1332 *bytes_read = ret; 1333 #endif 1334 1335 return AE_OK; 1336 } 1337 EXPORT_SYMBOL(acpi_os_get_line); 1338 1339 acpi_status acpi_os_wait_command_ready(void) 1340 { 1341 int ret; 1342 1343 ret = acpi_debugger_wait_command_ready(); 1344 if (ret < 0) 1345 return AE_ERROR; 1346 return AE_OK; 1347 } 1348 1349 acpi_status acpi_os_notify_command_complete(void) 1350 { 1351 int ret; 1352 1353 ret = acpi_debugger_notify_command_complete(); 1354 if (ret < 0) 1355 return AE_ERROR; 1356 return AE_OK; 1357 } 1358 1359 acpi_status acpi_os_signal(u32 function, void *info) 1360 { 1361 switch (function) { 1362 case ACPI_SIGNAL_FATAL: 1363 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1364 break; 1365 case ACPI_SIGNAL_BREAKPOINT: 1366 /* 1367 * AML Breakpoint 1368 * ACPI spec. says to treat it as a NOP unless 1369 * you are debugging. So if/when we integrate 1370 * AML debugger into the kernel debugger its 1371 * hook will go here. But until then it is 1372 * not useful to print anything on breakpoints. 1373 */ 1374 break; 1375 default: 1376 break; 1377 } 1378 1379 return AE_OK; 1380 } 1381 1382 static int __init acpi_os_name_setup(char *str) 1383 { 1384 char *p = acpi_os_name; 1385 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1386 1387 if (!str || !*str) 1388 return 0; 1389 1390 for (; count-- && *str; str++) { 1391 if (isalnum(*str) || *str == ' ' || *str == ':') 1392 *p++ = *str; 1393 else if (*str == '\'' || *str == '"') 1394 continue; 1395 else 1396 break; 1397 } 1398 *p = 0; 1399 1400 return 1; 1401 1402 } 1403 1404 __setup("acpi_os_name=", acpi_os_name_setup); 1405 1406 /* 1407 * Disable the auto-serialization of named objects creation methods. 1408 * 1409 * This feature is enabled by default. It marks the AML control methods 1410 * that contain the opcodes to create named objects as "Serialized". 1411 */ 1412 static int __init acpi_no_auto_serialize_setup(char *str) 1413 { 1414 acpi_gbl_auto_serialize_methods = FALSE; 1415 pr_info("ACPI: auto-serialization disabled\n"); 1416 1417 return 1; 1418 } 1419 1420 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1421 1422 /* Check of resource interference between native drivers and ACPI 1423 * OperationRegions (SystemIO and System Memory only). 1424 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1425 * in arbitrary AML code and can interfere with legacy drivers. 1426 * acpi_enforce_resources= can be set to: 1427 * 1428 * - strict (default) (2) 1429 * -> further driver trying to access the resources will not load 1430 * - lax (1) 1431 * -> further driver trying to access the resources will load, but you 1432 * get a system message that something might go wrong... 1433 * 1434 * - no (0) 1435 * -> ACPI Operation Region resources will not be registered 1436 * 1437 */ 1438 #define ENFORCE_RESOURCES_STRICT 2 1439 #define ENFORCE_RESOURCES_LAX 1 1440 #define ENFORCE_RESOURCES_NO 0 1441 1442 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1443 1444 static int __init acpi_enforce_resources_setup(char *str) 1445 { 1446 if (str == NULL || *str == '\0') 1447 return 0; 1448 1449 if (!strcmp("strict", str)) 1450 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1451 else if (!strcmp("lax", str)) 1452 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1453 else if (!strcmp("no", str)) 1454 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1455 1456 return 1; 1457 } 1458 1459 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1460 1461 /* Check for resource conflicts between ACPI OperationRegions and native 1462 * drivers */ 1463 int acpi_check_resource_conflict(const struct resource *res) 1464 { 1465 acpi_adr_space_type space_id; 1466 acpi_size length; 1467 u8 warn = 0; 1468 int clash = 0; 1469 1470 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1471 return 0; 1472 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1473 return 0; 1474 1475 if (res->flags & IORESOURCE_IO) 1476 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1477 else 1478 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1479 1480 length = resource_size(res); 1481 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1482 warn = 1; 1483 clash = acpi_check_address_range(space_id, res->start, length, warn); 1484 1485 if (clash) { 1486 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1487 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1488 printk(KERN_NOTICE "ACPI: This conflict may" 1489 " cause random problems and system" 1490 " instability\n"); 1491 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1492 " for this device, you should use it instead of" 1493 " the native driver\n"); 1494 } 1495 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1496 return -EBUSY; 1497 } 1498 return 0; 1499 } 1500 EXPORT_SYMBOL(acpi_check_resource_conflict); 1501 1502 int acpi_check_region(resource_size_t start, resource_size_t n, 1503 const char *name) 1504 { 1505 struct resource res = { 1506 .start = start, 1507 .end = start + n - 1, 1508 .name = name, 1509 .flags = IORESOURCE_IO, 1510 }; 1511 1512 return acpi_check_resource_conflict(&res); 1513 } 1514 EXPORT_SYMBOL(acpi_check_region); 1515 1516 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, 1517 void *_res, void **return_value) 1518 { 1519 struct acpi_mem_space_context **mem_ctx; 1520 union acpi_operand_object *handler_obj; 1521 union acpi_operand_object *region_obj2; 1522 union acpi_operand_object *region_obj; 1523 struct resource *res = _res; 1524 acpi_status status; 1525 1526 region_obj = acpi_ns_get_attached_object(handle); 1527 if (!region_obj) 1528 return AE_OK; 1529 1530 handler_obj = region_obj->region.handler; 1531 if (!handler_obj) 1532 return AE_OK; 1533 1534 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 1535 return AE_OK; 1536 1537 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) 1538 return AE_OK; 1539 1540 region_obj2 = acpi_ns_get_secondary_object(region_obj); 1541 if (!region_obj2) 1542 return AE_OK; 1543 1544 mem_ctx = (void *)®ion_obj2->extra.region_context; 1545 1546 if (!(mem_ctx[0]->address >= res->start && 1547 mem_ctx[0]->address < res->end)) 1548 return AE_OK; 1549 1550 status = handler_obj->address_space.setup(region_obj, 1551 ACPI_REGION_DEACTIVATE, 1552 NULL, (void **)mem_ctx); 1553 if (ACPI_SUCCESS(status)) 1554 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); 1555 1556 return status; 1557 } 1558 1559 /** 1560 * acpi_release_memory - Release any mappings done to a memory region 1561 * @handle: Handle to namespace node 1562 * @res: Memory resource 1563 * @level: A level that terminates the search 1564 * 1565 * Walks through @handle and unmaps all SystemMemory Operation Regions that 1566 * overlap with @res and that have already been activated (mapped). 1567 * 1568 * This is a helper that allows drivers to place special requirements on memory 1569 * region that may overlap with operation regions, primarily allowing them to 1570 * safely map the region as non-cached memory. 1571 * 1572 * The unmapped Operation Regions will be automatically remapped next time they 1573 * are called, so the drivers do not need to do anything else. 1574 */ 1575 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, 1576 u32 level) 1577 { 1578 if (!(res->flags & IORESOURCE_MEM)) 1579 return AE_TYPE; 1580 1581 return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, 1582 acpi_deactivate_mem_region, NULL, res, NULL); 1583 } 1584 EXPORT_SYMBOL_GPL(acpi_release_memory); 1585 1586 /* 1587 * Let drivers know whether the resource checks are effective 1588 */ 1589 int acpi_resources_are_enforced(void) 1590 { 1591 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1592 } 1593 EXPORT_SYMBOL(acpi_resources_are_enforced); 1594 1595 /* 1596 * Deallocate the memory for a spinlock. 1597 */ 1598 void acpi_os_delete_lock(acpi_spinlock handle) 1599 { 1600 ACPI_FREE(handle); 1601 } 1602 1603 /* 1604 * Acquire a spinlock. 1605 * 1606 * handle is a pointer to the spinlock_t. 1607 */ 1608 1609 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1610 __acquires(lockp) 1611 { 1612 acpi_cpu_flags flags; 1613 spin_lock_irqsave(lockp, flags); 1614 return flags; 1615 } 1616 1617 /* 1618 * Release a spinlock. See above. 1619 */ 1620 1621 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1622 __releases(lockp) 1623 { 1624 spin_unlock_irqrestore(lockp, flags); 1625 } 1626 1627 #ifndef ACPI_USE_LOCAL_CACHE 1628 1629 /******************************************************************************* 1630 * 1631 * FUNCTION: acpi_os_create_cache 1632 * 1633 * PARAMETERS: name - Ascii name for the cache 1634 * size - Size of each cached object 1635 * depth - Maximum depth of the cache (in objects) <ignored> 1636 * cache - Where the new cache object is returned 1637 * 1638 * RETURN: status 1639 * 1640 * DESCRIPTION: Create a cache object 1641 * 1642 ******************************************************************************/ 1643 1644 acpi_status 1645 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1646 { 1647 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1648 if (*cache == NULL) 1649 return AE_ERROR; 1650 else 1651 return AE_OK; 1652 } 1653 1654 /******************************************************************************* 1655 * 1656 * FUNCTION: acpi_os_purge_cache 1657 * 1658 * PARAMETERS: Cache - Handle to cache object 1659 * 1660 * RETURN: Status 1661 * 1662 * DESCRIPTION: Free all objects within the requested cache. 1663 * 1664 ******************************************************************************/ 1665 1666 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1667 { 1668 kmem_cache_shrink(cache); 1669 return (AE_OK); 1670 } 1671 1672 /******************************************************************************* 1673 * 1674 * FUNCTION: acpi_os_delete_cache 1675 * 1676 * PARAMETERS: Cache - Handle to cache object 1677 * 1678 * RETURN: Status 1679 * 1680 * DESCRIPTION: Free all objects within the requested cache and delete the 1681 * cache object. 1682 * 1683 ******************************************************************************/ 1684 1685 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1686 { 1687 kmem_cache_destroy(cache); 1688 return (AE_OK); 1689 } 1690 1691 /******************************************************************************* 1692 * 1693 * FUNCTION: acpi_os_release_object 1694 * 1695 * PARAMETERS: Cache - Handle to cache object 1696 * Object - The object to be released 1697 * 1698 * RETURN: None 1699 * 1700 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1701 * the object is deleted. 1702 * 1703 ******************************************************************************/ 1704 1705 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1706 { 1707 kmem_cache_free(cache, object); 1708 return (AE_OK); 1709 } 1710 #endif 1711 1712 static int __init acpi_no_static_ssdt_setup(char *s) 1713 { 1714 acpi_gbl_disable_ssdt_table_install = TRUE; 1715 pr_info("ACPI: static SSDT installation disabled\n"); 1716 1717 return 0; 1718 } 1719 1720 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1721 1722 static int __init acpi_disable_return_repair(char *s) 1723 { 1724 printk(KERN_NOTICE PREFIX 1725 "ACPI: Predefined validation mechanism disabled\n"); 1726 acpi_gbl_disable_auto_repair = TRUE; 1727 1728 return 1; 1729 } 1730 1731 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1732 1733 acpi_status __init acpi_os_initialize(void) 1734 { 1735 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1736 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1737 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1738 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1739 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1740 /* 1741 * Use acpi_os_map_generic_address to pre-map the reset 1742 * register if it's in system memory. 1743 */ 1744 int rv; 1745 1746 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1747 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1748 } 1749 acpi_os_initialized = true; 1750 1751 return AE_OK; 1752 } 1753 1754 acpi_status __init acpi_os_initialize1(void) 1755 { 1756 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1757 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1758 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1759 BUG_ON(!kacpid_wq); 1760 BUG_ON(!kacpi_notify_wq); 1761 BUG_ON(!kacpi_hotplug_wq); 1762 acpi_osi_init(); 1763 return AE_OK; 1764 } 1765 1766 acpi_status acpi_os_terminate(void) 1767 { 1768 if (acpi_irq_handler) { 1769 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1770 acpi_irq_handler); 1771 } 1772 1773 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1774 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1775 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1776 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1777 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1778 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1779 1780 destroy_workqueue(kacpid_wq); 1781 destroy_workqueue(kacpi_notify_wq); 1782 destroy_workqueue(kacpi_hotplug_wq); 1783 1784 return AE_OK; 1785 } 1786 1787 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1788 u32 pm1b_control) 1789 { 1790 int rc = 0; 1791 if (__acpi_os_prepare_sleep) 1792 rc = __acpi_os_prepare_sleep(sleep_state, 1793 pm1a_control, pm1b_control); 1794 if (rc < 0) 1795 return AE_ERROR; 1796 else if (rc > 0) 1797 return AE_CTRL_TERMINATE; 1798 1799 return AE_OK; 1800 } 1801 1802 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1803 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1804 { 1805 __acpi_os_prepare_sleep = func; 1806 } 1807 1808 #if (ACPI_REDUCED_HARDWARE) 1809 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1810 u32 val_b) 1811 { 1812 int rc = 0; 1813 if (__acpi_os_prepare_extended_sleep) 1814 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1815 val_a, val_b); 1816 if (rc < 0) 1817 return AE_ERROR; 1818 else if (rc > 0) 1819 return AE_CTRL_TERMINATE; 1820 1821 return AE_OK; 1822 } 1823 #else 1824 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1825 u32 val_b) 1826 { 1827 return AE_OK; 1828 } 1829 #endif 1830 1831 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1832 u32 val_a, u32 val_b)) 1833 { 1834 __acpi_os_prepare_extended_sleep = func; 1835 } 1836 1837 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1838 u32 reg_a_value, u32 reg_b_value) 1839 { 1840 acpi_status status; 1841 1842 if (acpi_gbl_reduced_hardware) 1843 status = acpi_os_prepare_extended_sleep(sleep_state, 1844 reg_a_value, 1845 reg_b_value); 1846 else 1847 status = acpi_os_prepare_sleep(sleep_state, 1848 reg_a_value, reg_b_value); 1849 return status; 1850 } 1851