1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/highmem.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/kmod.h> 34 #include <linux/delay.h> 35 #include <linux/workqueue.h> 36 #include <linux/nmi.h> 37 #include <linux/acpi.h> 38 #include <linux/efi.h> 39 #include <linux/ioport.h> 40 #include <linux/list.h> 41 #include <linux/jiffies.h> 42 #include <linux/semaphore.h> 43 44 #include <asm/io.h> 45 #include <linux/uaccess.h> 46 #include <linux/io-64-nonatomic-lo-hi.h> 47 48 #include "acpica/accommon.h" 49 #include "acpica/acnamesp.h" 50 #include "internal.h" 51 52 #define _COMPONENT ACPI_OS_SERVICES 53 ACPI_MODULE_NAME("osl"); 54 55 struct acpi_os_dpc { 56 acpi_osd_exec_callback function; 57 void *context; 58 struct work_struct work; 59 }; 60 61 #ifdef ENABLE_DEBUGGER 62 #include <linux/kdb.h> 63 64 /* stuff for debugger support */ 65 int acpi_in_debugger; 66 EXPORT_SYMBOL(acpi_in_debugger); 67 #endif /*ENABLE_DEBUGGER */ 68 69 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 70 u32 pm1b_ctrl); 71 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 72 u32 val_b); 73 74 static acpi_osd_handler acpi_irq_handler; 75 static void *acpi_irq_context; 76 static struct workqueue_struct *kacpid_wq; 77 static struct workqueue_struct *kacpi_notify_wq; 78 static struct workqueue_struct *kacpi_hotplug_wq; 79 static bool acpi_os_initialized; 80 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 81 bool acpi_permanent_mmap = false; 82 83 /* 84 * This list of permanent mappings is for memory that may be accessed from 85 * interrupt context, where we can't do the ioremap(). 86 */ 87 struct acpi_ioremap { 88 struct list_head list; 89 void __iomem *virt; 90 acpi_physical_address phys; 91 acpi_size size; 92 unsigned long refcount; 93 }; 94 95 static LIST_HEAD(acpi_ioremaps); 96 static DEFINE_MUTEX(acpi_ioremap_lock); 97 98 static void __init acpi_request_region (struct acpi_generic_address *gas, 99 unsigned int length, char *desc) 100 { 101 u64 addr; 102 103 /* Handle possible alignment issues */ 104 memcpy(&addr, &gas->address, sizeof(addr)); 105 if (!addr || !length) 106 return; 107 108 /* Resources are never freed */ 109 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 110 request_region(addr, length, desc); 111 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 112 request_mem_region(addr, length, desc); 113 } 114 115 static int __init acpi_reserve_resources(void) 116 { 117 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 118 "ACPI PM1a_EVT_BLK"); 119 120 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 121 "ACPI PM1b_EVT_BLK"); 122 123 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 124 "ACPI PM1a_CNT_BLK"); 125 126 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 127 "ACPI PM1b_CNT_BLK"); 128 129 if (acpi_gbl_FADT.pm_timer_length == 4) 130 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 131 132 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 133 "ACPI PM2_CNT_BLK"); 134 135 /* Length of GPE blocks must be a non-negative multiple of 2 */ 136 137 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 138 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 139 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 140 141 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 142 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 143 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 144 145 return 0; 146 } 147 fs_initcall_sync(acpi_reserve_resources); 148 149 void acpi_os_printf(const char *fmt, ...) 150 { 151 va_list args; 152 va_start(args, fmt); 153 acpi_os_vprintf(fmt, args); 154 va_end(args); 155 } 156 EXPORT_SYMBOL(acpi_os_printf); 157 158 void acpi_os_vprintf(const char *fmt, va_list args) 159 { 160 static char buffer[512]; 161 162 vsprintf(buffer, fmt, args); 163 164 #ifdef ENABLE_DEBUGGER 165 if (acpi_in_debugger) { 166 kdb_printf("%s", buffer); 167 } else { 168 if (printk_get_level(buffer)) 169 printk("%s", buffer); 170 else 171 printk(KERN_CONT "%s", buffer); 172 } 173 #else 174 if (acpi_debugger_write_log(buffer) < 0) { 175 if (printk_get_level(buffer)) 176 printk("%s", buffer); 177 else 178 printk(KERN_CONT "%s", buffer); 179 } 180 #endif 181 } 182 183 #ifdef CONFIG_KEXEC 184 static unsigned long acpi_rsdp; 185 static int __init setup_acpi_rsdp(char *arg) 186 { 187 return kstrtoul(arg, 16, &acpi_rsdp); 188 } 189 early_param("acpi_rsdp", setup_acpi_rsdp); 190 #endif 191 192 acpi_physical_address __init acpi_os_get_root_pointer(void) 193 { 194 acpi_physical_address pa; 195 196 #ifdef CONFIG_KEXEC 197 if (acpi_rsdp) 198 return acpi_rsdp; 199 #endif 200 pa = acpi_arch_get_root_pointer(); 201 if (pa) 202 return pa; 203 204 if (efi_enabled(EFI_CONFIG_TABLES)) { 205 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 206 return efi.acpi20; 207 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 208 return efi.acpi; 209 pr_err(PREFIX "System description tables not found\n"); 210 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 211 acpi_find_root_pointer(&pa); 212 } 213 214 return pa; 215 } 216 217 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 218 static struct acpi_ioremap * 219 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 220 { 221 struct acpi_ioremap *map; 222 223 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 224 if (map->phys <= phys && 225 phys + size <= map->phys + map->size) 226 return map; 227 228 return NULL; 229 } 230 231 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 232 static void __iomem * 233 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 234 { 235 struct acpi_ioremap *map; 236 237 map = acpi_map_lookup(phys, size); 238 if (map) 239 return map->virt + (phys - map->phys); 240 241 return NULL; 242 } 243 244 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 245 { 246 struct acpi_ioremap *map; 247 void __iomem *virt = NULL; 248 249 mutex_lock(&acpi_ioremap_lock); 250 map = acpi_map_lookup(phys, size); 251 if (map) { 252 virt = map->virt + (phys - map->phys); 253 map->refcount++; 254 } 255 mutex_unlock(&acpi_ioremap_lock); 256 return virt; 257 } 258 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 259 260 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 261 static struct acpi_ioremap * 262 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 263 { 264 struct acpi_ioremap *map; 265 266 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 267 if (map->virt <= virt && 268 virt + size <= map->virt + map->size) 269 return map; 270 271 return NULL; 272 } 273 274 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 275 /* ioremap will take care of cache attributes */ 276 #define should_use_kmap(pfn) 0 277 #else 278 #define should_use_kmap(pfn) page_is_ram(pfn) 279 #endif 280 281 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 282 { 283 unsigned long pfn; 284 285 pfn = pg_off >> PAGE_SHIFT; 286 if (should_use_kmap(pfn)) { 287 if (pg_sz > PAGE_SIZE) 288 return NULL; 289 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 290 } else 291 return acpi_os_ioremap(pg_off, pg_sz); 292 } 293 294 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 295 { 296 unsigned long pfn; 297 298 pfn = pg_off >> PAGE_SHIFT; 299 if (should_use_kmap(pfn)) 300 kunmap(pfn_to_page(pfn)); 301 else 302 iounmap(vaddr); 303 } 304 305 /** 306 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 307 * @phys: Start of the physical address range to map. 308 * @size: Size of the physical address range to map. 309 * 310 * Look up the given physical address range in the list of existing ACPI memory 311 * mappings. If found, get a reference to it and return a pointer to it (its 312 * virtual address). If not found, map it, add it to that list and return a 313 * pointer to it. 314 * 315 * During early init (when acpi_permanent_mmap has not been set yet) this 316 * routine simply calls __acpi_map_table() to get the job done. 317 */ 318 void __iomem *__ref 319 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 320 { 321 struct acpi_ioremap *map; 322 void __iomem *virt; 323 acpi_physical_address pg_off; 324 acpi_size pg_sz; 325 326 if (phys > ULONG_MAX) { 327 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 328 return NULL; 329 } 330 331 if (!acpi_permanent_mmap) 332 return __acpi_map_table((unsigned long)phys, size); 333 334 mutex_lock(&acpi_ioremap_lock); 335 /* Check if there's a suitable mapping already. */ 336 map = acpi_map_lookup(phys, size); 337 if (map) { 338 map->refcount++; 339 goto out; 340 } 341 342 map = kzalloc(sizeof(*map), GFP_KERNEL); 343 if (!map) { 344 mutex_unlock(&acpi_ioremap_lock); 345 return NULL; 346 } 347 348 pg_off = round_down(phys, PAGE_SIZE); 349 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 350 virt = acpi_map(pg_off, pg_sz); 351 if (!virt) { 352 mutex_unlock(&acpi_ioremap_lock); 353 kfree(map); 354 return NULL; 355 } 356 357 INIT_LIST_HEAD(&map->list); 358 map->virt = virt; 359 map->phys = pg_off; 360 map->size = pg_sz; 361 map->refcount = 1; 362 363 list_add_tail_rcu(&map->list, &acpi_ioremaps); 364 365 out: 366 mutex_unlock(&acpi_ioremap_lock); 367 return map->virt + (phys - map->phys); 368 } 369 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 370 371 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 372 { 373 return (void *)acpi_os_map_iomem(phys, size); 374 } 375 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 376 377 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 378 { 379 if (!--map->refcount) 380 list_del_rcu(&map->list); 381 } 382 383 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 384 { 385 if (!map->refcount) { 386 synchronize_rcu_expedited(); 387 acpi_unmap(map->phys, map->virt); 388 kfree(map); 389 } 390 } 391 392 /** 393 * acpi_os_unmap_iomem - Drop a memory mapping reference. 394 * @virt: Start of the address range to drop a reference to. 395 * @size: Size of the address range to drop a reference to. 396 * 397 * Look up the given virtual address range in the list of existing ACPI memory 398 * mappings, drop a reference to it and unmap it if there are no more active 399 * references to it. 400 * 401 * During early init (when acpi_permanent_mmap has not been set yet) this 402 * routine simply calls __acpi_unmap_table() to get the job done. Since 403 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 404 * here. 405 */ 406 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 407 { 408 struct acpi_ioremap *map; 409 410 if (!acpi_permanent_mmap) { 411 __acpi_unmap_table(virt, size); 412 return; 413 } 414 415 mutex_lock(&acpi_ioremap_lock); 416 map = acpi_map_lookup_virt(virt, size); 417 if (!map) { 418 mutex_unlock(&acpi_ioremap_lock); 419 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 420 return; 421 } 422 acpi_os_drop_map_ref(map); 423 mutex_unlock(&acpi_ioremap_lock); 424 425 acpi_os_map_cleanup(map); 426 } 427 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 428 429 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 430 { 431 return acpi_os_unmap_iomem((void __iomem *)virt, size); 432 } 433 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 434 435 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 436 { 437 u64 addr; 438 void __iomem *virt; 439 440 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 441 return 0; 442 443 /* Handle possible alignment issues */ 444 memcpy(&addr, &gas->address, sizeof(addr)); 445 if (!addr || !gas->bit_width) 446 return -EINVAL; 447 448 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 449 if (!virt) 450 return -EIO; 451 452 return 0; 453 } 454 EXPORT_SYMBOL(acpi_os_map_generic_address); 455 456 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 457 { 458 u64 addr; 459 struct acpi_ioremap *map; 460 461 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 462 return; 463 464 /* Handle possible alignment issues */ 465 memcpy(&addr, &gas->address, sizeof(addr)); 466 if (!addr || !gas->bit_width) 467 return; 468 469 mutex_lock(&acpi_ioremap_lock); 470 map = acpi_map_lookup(addr, gas->bit_width / 8); 471 if (!map) { 472 mutex_unlock(&acpi_ioremap_lock); 473 return; 474 } 475 acpi_os_drop_map_ref(map); 476 mutex_unlock(&acpi_ioremap_lock); 477 478 acpi_os_map_cleanup(map); 479 } 480 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 481 482 #ifdef ACPI_FUTURE_USAGE 483 acpi_status 484 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 485 { 486 if (!phys || !virt) 487 return AE_BAD_PARAMETER; 488 489 *phys = virt_to_phys(virt); 490 491 return AE_OK; 492 } 493 #endif 494 495 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 496 static bool acpi_rev_override; 497 498 int __init acpi_rev_override_setup(char *str) 499 { 500 acpi_rev_override = true; 501 return 1; 502 } 503 __setup("acpi_rev_override", acpi_rev_override_setup); 504 #else 505 #define acpi_rev_override false 506 #endif 507 508 #define ACPI_MAX_OVERRIDE_LEN 100 509 510 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 511 512 acpi_status 513 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 514 acpi_string *new_val) 515 { 516 if (!init_val || !new_val) 517 return AE_BAD_PARAMETER; 518 519 *new_val = NULL; 520 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 521 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 522 acpi_os_name); 523 *new_val = acpi_os_name; 524 } 525 526 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 527 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 528 *new_val = (char *)5; 529 } 530 531 return AE_OK; 532 } 533 534 static irqreturn_t acpi_irq(int irq, void *dev_id) 535 { 536 u32 handled; 537 538 handled = (*acpi_irq_handler) (acpi_irq_context); 539 540 if (handled) { 541 acpi_irq_handled++; 542 return IRQ_HANDLED; 543 } else { 544 acpi_irq_not_handled++; 545 return IRQ_NONE; 546 } 547 } 548 549 acpi_status 550 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 551 void *context) 552 { 553 unsigned int irq; 554 555 acpi_irq_stats_init(); 556 557 /* 558 * ACPI interrupts different from the SCI in our copy of the FADT are 559 * not supported. 560 */ 561 if (gsi != acpi_gbl_FADT.sci_interrupt) 562 return AE_BAD_PARAMETER; 563 564 if (acpi_irq_handler) 565 return AE_ALREADY_ACQUIRED; 566 567 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 568 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 569 gsi); 570 return AE_OK; 571 } 572 573 acpi_irq_handler = handler; 574 acpi_irq_context = context; 575 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 576 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 577 acpi_irq_handler = NULL; 578 return AE_NOT_ACQUIRED; 579 } 580 acpi_sci_irq = irq; 581 582 return AE_OK; 583 } 584 585 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 586 { 587 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 588 return AE_BAD_PARAMETER; 589 590 free_irq(acpi_sci_irq, acpi_irq); 591 acpi_irq_handler = NULL; 592 acpi_sci_irq = INVALID_ACPI_IRQ; 593 594 return AE_OK; 595 } 596 597 /* 598 * Running in interpreter thread context, safe to sleep 599 */ 600 601 void acpi_os_sleep(u64 ms) 602 { 603 msleep(ms); 604 } 605 606 void acpi_os_stall(u32 us) 607 { 608 while (us) { 609 u32 delay = 1000; 610 611 if (delay > us) 612 delay = us; 613 udelay(delay); 614 touch_nmi_watchdog(); 615 us -= delay; 616 } 617 } 618 619 /* 620 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 621 * monotonically increasing timer with 100ns granularity. Do not use 622 * ktime_get() to implement this function because this function may get 623 * called after timekeeping has been suspended. Note: calling this function 624 * after timekeeping has been suspended may lead to unexpected results 625 * because when timekeeping is suspended the jiffies counter is not 626 * incremented. See also timekeeping_suspend(). 627 */ 628 u64 acpi_os_get_timer(void) 629 { 630 return (get_jiffies_64() - INITIAL_JIFFIES) * 631 (ACPI_100NSEC_PER_SEC / HZ); 632 } 633 634 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 635 { 636 u32 dummy; 637 638 if (!value) 639 value = &dummy; 640 641 *value = 0; 642 if (width <= 8) { 643 *(u8 *) value = inb(port); 644 } else if (width <= 16) { 645 *(u16 *) value = inw(port); 646 } else if (width <= 32) { 647 *(u32 *) value = inl(port); 648 } else { 649 BUG(); 650 } 651 652 return AE_OK; 653 } 654 655 EXPORT_SYMBOL(acpi_os_read_port); 656 657 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 658 { 659 if (width <= 8) { 660 outb(value, port); 661 } else if (width <= 16) { 662 outw(value, port); 663 } else if (width <= 32) { 664 outl(value, port); 665 } else { 666 BUG(); 667 } 668 669 return AE_OK; 670 } 671 672 EXPORT_SYMBOL(acpi_os_write_port); 673 674 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 675 { 676 677 switch (width) { 678 case 8: 679 *(u8 *) value = readb(virt_addr); 680 break; 681 case 16: 682 *(u16 *) value = readw(virt_addr); 683 break; 684 case 32: 685 *(u32 *) value = readl(virt_addr); 686 break; 687 case 64: 688 *(u64 *) value = readq(virt_addr); 689 break; 690 default: 691 return -EINVAL; 692 } 693 694 return 0; 695 } 696 697 acpi_status 698 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 699 { 700 void __iomem *virt_addr; 701 unsigned int size = width / 8; 702 bool unmap = false; 703 u64 dummy; 704 int error; 705 706 rcu_read_lock(); 707 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 708 if (!virt_addr) { 709 rcu_read_unlock(); 710 virt_addr = acpi_os_ioremap(phys_addr, size); 711 if (!virt_addr) 712 return AE_BAD_ADDRESS; 713 unmap = true; 714 } 715 716 if (!value) 717 value = &dummy; 718 719 error = acpi_os_read_iomem(virt_addr, value, width); 720 BUG_ON(error); 721 722 if (unmap) 723 iounmap(virt_addr); 724 else 725 rcu_read_unlock(); 726 727 return AE_OK; 728 } 729 730 acpi_status 731 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 732 { 733 void __iomem *virt_addr; 734 unsigned int size = width / 8; 735 bool unmap = false; 736 737 rcu_read_lock(); 738 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 739 if (!virt_addr) { 740 rcu_read_unlock(); 741 virt_addr = acpi_os_ioremap(phys_addr, size); 742 if (!virt_addr) 743 return AE_BAD_ADDRESS; 744 unmap = true; 745 } 746 747 switch (width) { 748 case 8: 749 writeb(value, virt_addr); 750 break; 751 case 16: 752 writew(value, virt_addr); 753 break; 754 case 32: 755 writel(value, virt_addr); 756 break; 757 case 64: 758 writeq(value, virt_addr); 759 break; 760 default: 761 BUG(); 762 } 763 764 if (unmap) 765 iounmap(virt_addr); 766 else 767 rcu_read_unlock(); 768 769 return AE_OK; 770 } 771 772 acpi_status 773 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 774 u64 *value, u32 width) 775 { 776 int result, size; 777 u32 value32; 778 779 if (!value) 780 return AE_BAD_PARAMETER; 781 782 switch (width) { 783 case 8: 784 size = 1; 785 break; 786 case 16: 787 size = 2; 788 break; 789 case 32: 790 size = 4; 791 break; 792 default: 793 return AE_ERROR; 794 } 795 796 result = raw_pci_read(pci_id->segment, pci_id->bus, 797 PCI_DEVFN(pci_id->device, pci_id->function), 798 reg, size, &value32); 799 *value = value32; 800 801 return (result ? AE_ERROR : AE_OK); 802 } 803 804 acpi_status 805 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 806 u64 value, u32 width) 807 { 808 int result, size; 809 810 switch (width) { 811 case 8: 812 size = 1; 813 break; 814 case 16: 815 size = 2; 816 break; 817 case 32: 818 size = 4; 819 break; 820 default: 821 return AE_ERROR; 822 } 823 824 result = raw_pci_write(pci_id->segment, pci_id->bus, 825 PCI_DEVFN(pci_id->device, pci_id->function), 826 reg, size, value); 827 828 return (result ? AE_ERROR : AE_OK); 829 } 830 831 static void acpi_os_execute_deferred(struct work_struct *work) 832 { 833 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 834 835 dpc->function(dpc->context); 836 kfree(dpc); 837 } 838 839 #ifdef CONFIG_ACPI_DEBUGGER 840 static struct acpi_debugger acpi_debugger; 841 static bool acpi_debugger_initialized; 842 843 int acpi_register_debugger(struct module *owner, 844 const struct acpi_debugger_ops *ops) 845 { 846 int ret = 0; 847 848 mutex_lock(&acpi_debugger.lock); 849 if (acpi_debugger.ops) { 850 ret = -EBUSY; 851 goto err_lock; 852 } 853 854 acpi_debugger.owner = owner; 855 acpi_debugger.ops = ops; 856 857 err_lock: 858 mutex_unlock(&acpi_debugger.lock); 859 return ret; 860 } 861 EXPORT_SYMBOL(acpi_register_debugger); 862 863 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 864 { 865 mutex_lock(&acpi_debugger.lock); 866 if (ops == acpi_debugger.ops) { 867 acpi_debugger.ops = NULL; 868 acpi_debugger.owner = NULL; 869 } 870 mutex_unlock(&acpi_debugger.lock); 871 } 872 EXPORT_SYMBOL(acpi_unregister_debugger); 873 874 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 875 { 876 int ret; 877 int (*func)(acpi_osd_exec_callback, void *); 878 struct module *owner; 879 880 if (!acpi_debugger_initialized) 881 return -ENODEV; 882 mutex_lock(&acpi_debugger.lock); 883 if (!acpi_debugger.ops) { 884 ret = -ENODEV; 885 goto err_lock; 886 } 887 if (!try_module_get(acpi_debugger.owner)) { 888 ret = -ENODEV; 889 goto err_lock; 890 } 891 func = acpi_debugger.ops->create_thread; 892 owner = acpi_debugger.owner; 893 mutex_unlock(&acpi_debugger.lock); 894 895 ret = func(function, context); 896 897 mutex_lock(&acpi_debugger.lock); 898 module_put(owner); 899 err_lock: 900 mutex_unlock(&acpi_debugger.lock); 901 return ret; 902 } 903 904 ssize_t acpi_debugger_write_log(const char *msg) 905 { 906 ssize_t ret; 907 ssize_t (*func)(const char *); 908 struct module *owner; 909 910 if (!acpi_debugger_initialized) 911 return -ENODEV; 912 mutex_lock(&acpi_debugger.lock); 913 if (!acpi_debugger.ops) { 914 ret = -ENODEV; 915 goto err_lock; 916 } 917 if (!try_module_get(acpi_debugger.owner)) { 918 ret = -ENODEV; 919 goto err_lock; 920 } 921 func = acpi_debugger.ops->write_log; 922 owner = acpi_debugger.owner; 923 mutex_unlock(&acpi_debugger.lock); 924 925 ret = func(msg); 926 927 mutex_lock(&acpi_debugger.lock); 928 module_put(owner); 929 err_lock: 930 mutex_unlock(&acpi_debugger.lock); 931 return ret; 932 } 933 934 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 935 { 936 ssize_t ret; 937 ssize_t (*func)(char *, size_t); 938 struct module *owner; 939 940 if (!acpi_debugger_initialized) 941 return -ENODEV; 942 mutex_lock(&acpi_debugger.lock); 943 if (!acpi_debugger.ops) { 944 ret = -ENODEV; 945 goto err_lock; 946 } 947 if (!try_module_get(acpi_debugger.owner)) { 948 ret = -ENODEV; 949 goto err_lock; 950 } 951 func = acpi_debugger.ops->read_cmd; 952 owner = acpi_debugger.owner; 953 mutex_unlock(&acpi_debugger.lock); 954 955 ret = func(buffer, buffer_length); 956 957 mutex_lock(&acpi_debugger.lock); 958 module_put(owner); 959 err_lock: 960 mutex_unlock(&acpi_debugger.lock); 961 return ret; 962 } 963 964 int acpi_debugger_wait_command_ready(void) 965 { 966 int ret; 967 int (*func)(bool, char *, size_t); 968 struct module *owner; 969 970 if (!acpi_debugger_initialized) 971 return -ENODEV; 972 mutex_lock(&acpi_debugger.lock); 973 if (!acpi_debugger.ops) { 974 ret = -ENODEV; 975 goto err_lock; 976 } 977 if (!try_module_get(acpi_debugger.owner)) { 978 ret = -ENODEV; 979 goto err_lock; 980 } 981 func = acpi_debugger.ops->wait_command_ready; 982 owner = acpi_debugger.owner; 983 mutex_unlock(&acpi_debugger.lock); 984 985 ret = func(acpi_gbl_method_executing, 986 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 987 988 mutex_lock(&acpi_debugger.lock); 989 module_put(owner); 990 err_lock: 991 mutex_unlock(&acpi_debugger.lock); 992 return ret; 993 } 994 995 int acpi_debugger_notify_command_complete(void) 996 { 997 int ret; 998 int (*func)(void); 999 struct module *owner; 1000 1001 if (!acpi_debugger_initialized) 1002 return -ENODEV; 1003 mutex_lock(&acpi_debugger.lock); 1004 if (!acpi_debugger.ops) { 1005 ret = -ENODEV; 1006 goto err_lock; 1007 } 1008 if (!try_module_get(acpi_debugger.owner)) { 1009 ret = -ENODEV; 1010 goto err_lock; 1011 } 1012 func = acpi_debugger.ops->notify_command_complete; 1013 owner = acpi_debugger.owner; 1014 mutex_unlock(&acpi_debugger.lock); 1015 1016 ret = func(); 1017 1018 mutex_lock(&acpi_debugger.lock); 1019 module_put(owner); 1020 err_lock: 1021 mutex_unlock(&acpi_debugger.lock); 1022 return ret; 1023 } 1024 1025 int __init acpi_debugger_init(void) 1026 { 1027 mutex_init(&acpi_debugger.lock); 1028 acpi_debugger_initialized = true; 1029 return 0; 1030 } 1031 #endif 1032 1033 /******************************************************************************* 1034 * 1035 * FUNCTION: acpi_os_execute 1036 * 1037 * PARAMETERS: Type - Type of the callback 1038 * Function - Function to be executed 1039 * Context - Function parameters 1040 * 1041 * RETURN: Status 1042 * 1043 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1044 * immediately executes function on a separate thread. 1045 * 1046 ******************************************************************************/ 1047 1048 acpi_status acpi_os_execute(acpi_execute_type type, 1049 acpi_osd_exec_callback function, void *context) 1050 { 1051 acpi_status status = AE_OK; 1052 struct acpi_os_dpc *dpc; 1053 struct workqueue_struct *queue; 1054 int ret; 1055 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1056 "Scheduling function [%p(%p)] for deferred execution.\n", 1057 function, context)); 1058 1059 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1060 ret = acpi_debugger_create_thread(function, context); 1061 if (ret) { 1062 pr_err("Call to kthread_create() failed.\n"); 1063 status = AE_ERROR; 1064 } 1065 goto out_thread; 1066 } 1067 1068 /* 1069 * Allocate/initialize DPC structure. Note that this memory will be 1070 * freed by the callee. The kernel handles the work_struct list in a 1071 * way that allows us to also free its memory inside the callee. 1072 * Because we may want to schedule several tasks with different 1073 * parameters we can't use the approach some kernel code uses of 1074 * having a static work_struct. 1075 */ 1076 1077 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1078 if (!dpc) 1079 return AE_NO_MEMORY; 1080 1081 dpc->function = function; 1082 dpc->context = context; 1083 1084 /* 1085 * To prevent lockdep from complaining unnecessarily, make sure that 1086 * there is a different static lockdep key for each workqueue by using 1087 * INIT_WORK() for each of them separately. 1088 */ 1089 if (type == OSL_NOTIFY_HANDLER) { 1090 queue = kacpi_notify_wq; 1091 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1092 } else if (type == OSL_GPE_HANDLER) { 1093 queue = kacpid_wq; 1094 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1095 } else { 1096 pr_err("Unsupported os_execute type %d.\n", type); 1097 status = AE_ERROR; 1098 } 1099 1100 if (ACPI_FAILURE(status)) 1101 goto err_workqueue; 1102 1103 /* 1104 * On some machines, a software-initiated SMI causes corruption unless 1105 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1106 * typically it's done in GPE-related methods that are run via 1107 * workqueues, so we can avoid the known corruption cases by always 1108 * queueing on CPU 0. 1109 */ 1110 ret = queue_work_on(0, queue, &dpc->work); 1111 if (!ret) { 1112 printk(KERN_ERR PREFIX 1113 "Call to queue_work() failed.\n"); 1114 status = AE_ERROR; 1115 } 1116 err_workqueue: 1117 if (ACPI_FAILURE(status)) 1118 kfree(dpc); 1119 out_thread: 1120 return status; 1121 } 1122 EXPORT_SYMBOL(acpi_os_execute); 1123 1124 void acpi_os_wait_events_complete(void) 1125 { 1126 /* 1127 * Make sure the GPE handler or the fixed event handler is not used 1128 * on another CPU after removal. 1129 */ 1130 if (acpi_sci_irq_valid()) 1131 synchronize_hardirq(acpi_sci_irq); 1132 flush_workqueue(kacpid_wq); 1133 flush_workqueue(kacpi_notify_wq); 1134 } 1135 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1136 1137 struct acpi_hp_work { 1138 struct work_struct work; 1139 struct acpi_device *adev; 1140 u32 src; 1141 }; 1142 1143 static void acpi_hotplug_work_fn(struct work_struct *work) 1144 { 1145 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1146 1147 acpi_os_wait_events_complete(); 1148 acpi_device_hotplug(hpw->adev, hpw->src); 1149 kfree(hpw); 1150 } 1151 1152 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1153 { 1154 struct acpi_hp_work *hpw; 1155 1156 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1157 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1158 adev, src)); 1159 1160 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1161 if (!hpw) 1162 return AE_NO_MEMORY; 1163 1164 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1165 hpw->adev = adev; 1166 hpw->src = src; 1167 /* 1168 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1169 * the hotplug code may call driver .remove() functions, which may 1170 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1171 * these workqueues. 1172 */ 1173 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1174 kfree(hpw); 1175 return AE_ERROR; 1176 } 1177 return AE_OK; 1178 } 1179 1180 bool acpi_queue_hotplug_work(struct work_struct *work) 1181 { 1182 return queue_work(kacpi_hotplug_wq, work); 1183 } 1184 1185 acpi_status 1186 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1187 { 1188 struct semaphore *sem = NULL; 1189 1190 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1191 if (!sem) 1192 return AE_NO_MEMORY; 1193 1194 sema_init(sem, initial_units); 1195 1196 *handle = (acpi_handle *) sem; 1197 1198 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1199 *handle, initial_units)); 1200 1201 return AE_OK; 1202 } 1203 1204 /* 1205 * TODO: A better way to delete semaphores? Linux doesn't have a 1206 * 'delete_semaphore()' function -- may result in an invalid 1207 * pointer dereference for non-synchronized consumers. Should 1208 * we at least check for blocked threads and signal/cancel them? 1209 */ 1210 1211 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1212 { 1213 struct semaphore *sem = (struct semaphore *)handle; 1214 1215 if (!sem) 1216 return AE_BAD_PARAMETER; 1217 1218 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1219 1220 BUG_ON(!list_empty(&sem->wait_list)); 1221 kfree(sem); 1222 sem = NULL; 1223 1224 return AE_OK; 1225 } 1226 1227 /* 1228 * TODO: Support for units > 1? 1229 */ 1230 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1231 { 1232 acpi_status status = AE_OK; 1233 struct semaphore *sem = (struct semaphore *)handle; 1234 long jiffies; 1235 int ret = 0; 1236 1237 if (!acpi_os_initialized) 1238 return AE_OK; 1239 1240 if (!sem || (units < 1)) 1241 return AE_BAD_PARAMETER; 1242 1243 if (units > 1) 1244 return AE_SUPPORT; 1245 1246 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1247 handle, units, timeout)); 1248 1249 if (timeout == ACPI_WAIT_FOREVER) 1250 jiffies = MAX_SCHEDULE_TIMEOUT; 1251 else 1252 jiffies = msecs_to_jiffies(timeout); 1253 1254 ret = down_timeout(sem, jiffies); 1255 if (ret) 1256 status = AE_TIME; 1257 1258 if (ACPI_FAILURE(status)) { 1259 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1260 "Failed to acquire semaphore[%p|%d|%d], %s", 1261 handle, units, timeout, 1262 acpi_format_exception(status))); 1263 } else { 1264 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1265 "Acquired semaphore[%p|%d|%d]", handle, 1266 units, timeout)); 1267 } 1268 1269 return status; 1270 } 1271 1272 /* 1273 * TODO: Support for units > 1? 1274 */ 1275 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1276 { 1277 struct semaphore *sem = (struct semaphore *)handle; 1278 1279 if (!acpi_os_initialized) 1280 return AE_OK; 1281 1282 if (!sem || (units < 1)) 1283 return AE_BAD_PARAMETER; 1284 1285 if (units > 1) 1286 return AE_SUPPORT; 1287 1288 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1289 units)); 1290 1291 up(sem); 1292 1293 return AE_OK; 1294 } 1295 1296 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1297 { 1298 #ifdef ENABLE_DEBUGGER 1299 if (acpi_in_debugger) { 1300 u32 chars; 1301 1302 kdb_read(buffer, buffer_length); 1303 1304 /* remove the CR kdb includes */ 1305 chars = strlen(buffer) - 1; 1306 buffer[chars] = '\0'; 1307 } 1308 #else 1309 int ret; 1310 1311 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1312 if (ret < 0) 1313 return AE_ERROR; 1314 if (bytes_read) 1315 *bytes_read = ret; 1316 #endif 1317 1318 return AE_OK; 1319 } 1320 EXPORT_SYMBOL(acpi_os_get_line); 1321 1322 acpi_status acpi_os_wait_command_ready(void) 1323 { 1324 int ret; 1325 1326 ret = acpi_debugger_wait_command_ready(); 1327 if (ret < 0) 1328 return AE_ERROR; 1329 return AE_OK; 1330 } 1331 1332 acpi_status acpi_os_notify_command_complete(void) 1333 { 1334 int ret; 1335 1336 ret = acpi_debugger_notify_command_complete(); 1337 if (ret < 0) 1338 return AE_ERROR; 1339 return AE_OK; 1340 } 1341 1342 acpi_status acpi_os_signal(u32 function, void *info) 1343 { 1344 switch (function) { 1345 case ACPI_SIGNAL_FATAL: 1346 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1347 break; 1348 case ACPI_SIGNAL_BREAKPOINT: 1349 /* 1350 * AML Breakpoint 1351 * ACPI spec. says to treat it as a NOP unless 1352 * you are debugging. So if/when we integrate 1353 * AML debugger into the kernel debugger its 1354 * hook will go here. But until then it is 1355 * not useful to print anything on breakpoints. 1356 */ 1357 break; 1358 default: 1359 break; 1360 } 1361 1362 return AE_OK; 1363 } 1364 1365 static int __init acpi_os_name_setup(char *str) 1366 { 1367 char *p = acpi_os_name; 1368 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1369 1370 if (!str || !*str) 1371 return 0; 1372 1373 for (; count-- && *str; str++) { 1374 if (isalnum(*str) || *str == ' ' || *str == ':') 1375 *p++ = *str; 1376 else if (*str == '\'' || *str == '"') 1377 continue; 1378 else 1379 break; 1380 } 1381 *p = 0; 1382 1383 return 1; 1384 1385 } 1386 1387 __setup("acpi_os_name=", acpi_os_name_setup); 1388 1389 /* 1390 * Disable the auto-serialization of named objects creation methods. 1391 * 1392 * This feature is enabled by default. It marks the AML control methods 1393 * that contain the opcodes to create named objects as "Serialized". 1394 */ 1395 static int __init acpi_no_auto_serialize_setup(char *str) 1396 { 1397 acpi_gbl_auto_serialize_methods = FALSE; 1398 pr_info("ACPI: auto-serialization disabled\n"); 1399 1400 return 1; 1401 } 1402 1403 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1404 1405 /* Check of resource interference between native drivers and ACPI 1406 * OperationRegions (SystemIO and System Memory only). 1407 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1408 * in arbitrary AML code and can interfere with legacy drivers. 1409 * acpi_enforce_resources= can be set to: 1410 * 1411 * - strict (default) (2) 1412 * -> further driver trying to access the resources will not load 1413 * - lax (1) 1414 * -> further driver trying to access the resources will load, but you 1415 * get a system message that something might go wrong... 1416 * 1417 * - no (0) 1418 * -> ACPI Operation Region resources will not be registered 1419 * 1420 */ 1421 #define ENFORCE_RESOURCES_STRICT 2 1422 #define ENFORCE_RESOURCES_LAX 1 1423 #define ENFORCE_RESOURCES_NO 0 1424 1425 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1426 1427 static int __init acpi_enforce_resources_setup(char *str) 1428 { 1429 if (str == NULL || *str == '\0') 1430 return 0; 1431 1432 if (!strcmp("strict", str)) 1433 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1434 else if (!strcmp("lax", str)) 1435 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1436 else if (!strcmp("no", str)) 1437 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1438 1439 return 1; 1440 } 1441 1442 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1443 1444 /* Check for resource conflicts between ACPI OperationRegions and native 1445 * drivers */ 1446 int acpi_check_resource_conflict(const struct resource *res) 1447 { 1448 acpi_adr_space_type space_id; 1449 acpi_size length; 1450 u8 warn = 0; 1451 int clash = 0; 1452 1453 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1454 return 0; 1455 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1456 return 0; 1457 1458 if (res->flags & IORESOURCE_IO) 1459 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1460 else 1461 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1462 1463 length = resource_size(res); 1464 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1465 warn = 1; 1466 clash = acpi_check_address_range(space_id, res->start, length, warn); 1467 1468 if (clash) { 1469 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1470 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1471 printk(KERN_NOTICE "ACPI: This conflict may" 1472 " cause random problems and system" 1473 " instability\n"); 1474 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1475 " for this device, you should use it instead of" 1476 " the native driver\n"); 1477 } 1478 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1479 return -EBUSY; 1480 } 1481 return 0; 1482 } 1483 EXPORT_SYMBOL(acpi_check_resource_conflict); 1484 1485 int acpi_check_region(resource_size_t start, resource_size_t n, 1486 const char *name) 1487 { 1488 struct resource res = { 1489 .start = start, 1490 .end = start + n - 1, 1491 .name = name, 1492 .flags = IORESOURCE_IO, 1493 }; 1494 1495 return acpi_check_resource_conflict(&res); 1496 } 1497 EXPORT_SYMBOL(acpi_check_region); 1498 1499 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, 1500 void *_res, void **return_value) 1501 { 1502 struct acpi_mem_space_context **mem_ctx; 1503 union acpi_operand_object *handler_obj; 1504 union acpi_operand_object *region_obj2; 1505 union acpi_operand_object *region_obj; 1506 struct resource *res = _res; 1507 acpi_status status; 1508 1509 region_obj = acpi_ns_get_attached_object(handle); 1510 if (!region_obj) 1511 return AE_OK; 1512 1513 handler_obj = region_obj->region.handler; 1514 if (!handler_obj) 1515 return AE_OK; 1516 1517 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 1518 return AE_OK; 1519 1520 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) 1521 return AE_OK; 1522 1523 region_obj2 = acpi_ns_get_secondary_object(region_obj); 1524 if (!region_obj2) 1525 return AE_OK; 1526 1527 mem_ctx = (void *)®ion_obj2->extra.region_context; 1528 1529 if (!(mem_ctx[0]->address >= res->start && 1530 mem_ctx[0]->address < res->end)) 1531 return AE_OK; 1532 1533 status = handler_obj->address_space.setup(region_obj, 1534 ACPI_REGION_DEACTIVATE, 1535 NULL, (void **)mem_ctx); 1536 if (ACPI_SUCCESS(status)) 1537 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); 1538 1539 return status; 1540 } 1541 1542 /** 1543 * acpi_release_memory - Release any mappings done to a memory region 1544 * @handle: Handle to namespace node 1545 * @res: Memory resource 1546 * @level: A level that terminates the search 1547 * 1548 * Walks through @handle and unmaps all SystemMemory Operation Regions that 1549 * overlap with @res and that have already been activated (mapped). 1550 * 1551 * This is a helper that allows drivers to place special requirements on memory 1552 * region that may overlap with operation regions, primarily allowing them to 1553 * safely map the region as non-cached memory. 1554 * 1555 * The unmapped Operation Regions will be automatically remapped next time they 1556 * are called, so the drivers do not need to do anything else. 1557 */ 1558 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, 1559 u32 level) 1560 { 1561 if (!(res->flags & IORESOURCE_MEM)) 1562 return AE_TYPE; 1563 1564 return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, 1565 acpi_deactivate_mem_region, NULL, res, NULL); 1566 } 1567 EXPORT_SYMBOL_GPL(acpi_release_memory); 1568 1569 /* 1570 * Let drivers know whether the resource checks are effective 1571 */ 1572 int acpi_resources_are_enforced(void) 1573 { 1574 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1575 } 1576 EXPORT_SYMBOL(acpi_resources_are_enforced); 1577 1578 /* 1579 * Deallocate the memory for a spinlock. 1580 */ 1581 void acpi_os_delete_lock(acpi_spinlock handle) 1582 { 1583 ACPI_FREE(handle); 1584 } 1585 1586 /* 1587 * Acquire a spinlock. 1588 * 1589 * handle is a pointer to the spinlock_t. 1590 */ 1591 1592 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1593 { 1594 acpi_cpu_flags flags; 1595 spin_lock_irqsave(lockp, flags); 1596 return flags; 1597 } 1598 1599 /* 1600 * Release a spinlock. See above. 1601 */ 1602 1603 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1604 { 1605 spin_unlock_irqrestore(lockp, flags); 1606 } 1607 1608 #ifndef ACPI_USE_LOCAL_CACHE 1609 1610 /******************************************************************************* 1611 * 1612 * FUNCTION: acpi_os_create_cache 1613 * 1614 * PARAMETERS: name - Ascii name for the cache 1615 * size - Size of each cached object 1616 * depth - Maximum depth of the cache (in objects) <ignored> 1617 * cache - Where the new cache object is returned 1618 * 1619 * RETURN: status 1620 * 1621 * DESCRIPTION: Create a cache object 1622 * 1623 ******************************************************************************/ 1624 1625 acpi_status 1626 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1627 { 1628 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1629 if (*cache == NULL) 1630 return AE_ERROR; 1631 else 1632 return AE_OK; 1633 } 1634 1635 /******************************************************************************* 1636 * 1637 * FUNCTION: acpi_os_purge_cache 1638 * 1639 * PARAMETERS: Cache - Handle to cache object 1640 * 1641 * RETURN: Status 1642 * 1643 * DESCRIPTION: Free all objects within the requested cache. 1644 * 1645 ******************************************************************************/ 1646 1647 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1648 { 1649 kmem_cache_shrink(cache); 1650 return (AE_OK); 1651 } 1652 1653 /******************************************************************************* 1654 * 1655 * FUNCTION: acpi_os_delete_cache 1656 * 1657 * PARAMETERS: Cache - Handle to cache object 1658 * 1659 * RETURN: Status 1660 * 1661 * DESCRIPTION: Free all objects within the requested cache and delete the 1662 * cache object. 1663 * 1664 ******************************************************************************/ 1665 1666 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1667 { 1668 kmem_cache_destroy(cache); 1669 return (AE_OK); 1670 } 1671 1672 /******************************************************************************* 1673 * 1674 * FUNCTION: acpi_os_release_object 1675 * 1676 * PARAMETERS: Cache - Handle to cache object 1677 * Object - The object to be released 1678 * 1679 * RETURN: None 1680 * 1681 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1682 * the object is deleted. 1683 * 1684 ******************************************************************************/ 1685 1686 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1687 { 1688 kmem_cache_free(cache, object); 1689 return (AE_OK); 1690 } 1691 #endif 1692 1693 static int __init acpi_no_static_ssdt_setup(char *s) 1694 { 1695 acpi_gbl_disable_ssdt_table_install = TRUE; 1696 pr_info("ACPI: static SSDT installation disabled\n"); 1697 1698 return 0; 1699 } 1700 1701 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1702 1703 static int __init acpi_disable_return_repair(char *s) 1704 { 1705 printk(KERN_NOTICE PREFIX 1706 "ACPI: Predefined validation mechanism disabled\n"); 1707 acpi_gbl_disable_auto_repair = TRUE; 1708 1709 return 1; 1710 } 1711 1712 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1713 1714 acpi_status __init acpi_os_initialize(void) 1715 { 1716 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1717 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1718 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1719 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1720 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1721 /* 1722 * Use acpi_os_map_generic_address to pre-map the reset 1723 * register if it's in system memory. 1724 */ 1725 int rv; 1726 1727 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1728 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1729 } 1730 acpi_os_initialized = true; 1731 1732 return AE_OK; 1733 } 1734 1735 acpi_status __init acpi_os_initialize1(void) 1736 { 1737 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1738 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1739 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1740 BUG_ON(!kacpid_wq); 1741 BUG_ON(!kacpi_notify_wq); 1742 BUG_ON(!kacpi_hotplug_wq); 1743 acpi_osi_init(); 1744 return AE_OK; 1745 } 1746 1747 acpi_status acpi_os_terminate(void) 1748 { 1749 if (acpi_irq_handler) { 1750 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1751 acpi_irq_handler); 1752 } 1753 1754 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1755 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1756 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1757 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1758 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1759 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1760 1761 destroy_workqueue(kacpid_wq); 1762 destroy_workqueue(kacpi_notify_wq); 1763 destroy_workqueue(kacpi_hotplug_wq); 1764 1765 return AE_OK; 1766 } 1767 1768 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1769 u32 pm1b_control) 1770 { 1771 int rc = 0; 1772 if (__acpi_os_prepare_sleep) 1773 rc = __acpi_os_prepare_sleep(sleep_state, 1774 pm1a_control, pm1b_control); 1775 if (rc < 0) 1776 return AE_ERROR; 1777 else if (rc > 0) 1778 return AE_CTRL_TERMINATE; 1779 1780 return AE_OK; 1781 } 1782 1783 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1784 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1785 { 1786 __acpi_os_prepare_sleep = func; 1787 } 1788 1789 #if (ACPI_REDUCED_HARDWARE) 1790 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1791 u32 val_b) 1792 { 1793 int rc = 0; 1794 if (__acpi_os_prepare_extended_sleep) 1795 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1796 val_a, val_b); 1797 if (rc < 0) 1798 return AE_ERROR; 1799 else if (rc > 0) 1800 return AE_CTRL_TERMINATE; 1801 1802 return AE_OK; 1803 } 1804 #else 1805 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1806 u32 val_b) 1807 { 1808 return AE_OK; 1809 } 1810 #endif 1811 1812 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1813 u32 val_a, u32 val_b)) 1814 { 1815 __acpi_os_prepare_extended_sleep = func; 1816 } 1817 1818 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1819 u32 reg_a_value, u32 reg_b_value) 1820 { 1821 acpi_status status; 1822 1823 if (acpi_gbl_reduced_hardware) 1824 status = acpi_os_prepare_extended_sleep(sleep_state, 1825 reg_a_value, 1826 reg_b_value); 1827 else 1828 status = acpi_os_prepare_sleep(sleep_state, 1829 reg_a_value, reg_b_value); 1830 return status; 1831 } 1832