1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/highmem.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/kmod.h> 34 #include <linux/delay.h> 35 #include <linux/workqueue.h> 36 #include <linux/nmi.h> 37 #include <linux/acpi.h> 38 #include <linux/efi.h> 39 #include <linux/ioport.h> 40 #include <linux/list.h> 41 #include <linux/jiffies.h> 42 #include <linux/semaphore.h> 43 44 #include <asm/io.h> 45 #include <asm/uaccess.h> 46 #include <linux/io-64-nonatomic-lo-hi.h> 47 48 #include "internal.h" 49 50 #define _COMPONENT ACPI_OS_SERVICES 51 ACPI_MODULE_NAME("osl"); 52 53 struct acpi_os_dpc { 54 acpi_osd_exec_callback function; 55 void *context; 56 struct work_struct work; 57 }; 58 59 #ifdef ENABLE_DEBUGGER 60 #include <linux/kdb.h> 61 62 /* stuff for debugger support */ 63 int acpi_in_debugger; 64 EXPORT_SYMBOL(acpi_in_debugger); 65 #endif /*ENABLE_DEBUGGER */ 66 67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 68 u32 pm1b_ctrl); 69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 70 u32 val_b); 71 72 static acpi_osd_handler acpi_irq_handler; 73 static void *acpi_irq_context; 74 static struct workqueue_struct *kacpid_wq; 75 static struct workqueue_struct *kacpi_notify_wq; 76 static struct workqueue_struct *kacpi_hotplug_wq; 77 static bool acpi_os_initialized; 78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 79 80 /* 81 * This list of permanent mappings is for memory that may be accessed from 82 * interrupt context, where we can't do the ioremap(). 83 */ 84 struct acpi_ioremap { 85 struct list_head list; 86 void __iomem *virt; 87 acpi_physical_address phys; 88 acpi_size size; 89 unsigned long refcount; 90 }; 91 92 static LIST_HEAD(acpi_ioremaps); 93 static DEFINE_MUTEX(acpi_ioremap_lock); 94 95 static void __init acpi_request_region (struct acpi_generic_address *gas, 96 unsigned int length, char *desc) 97 { 98 u64 addr; 99 100 /* Handle possible alignment issues */ 101 memcpy(&addr, &gas->address, sizeof(addr)); 102 if (!addr || !length) 103 return; 104 105 /* Resources are never freed */ 106 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 107 request_region(addr, length, desc); 108 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 109 request_mem_region(addr, length, desc); 110 } 111 112 static int __init acpi_reserve_resources(void) 113 { 114 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 115 "ACPI PM1a_EVT_BLK"); 116 117 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 118 "ACPI PM1b_EVT_BLK"); 119 120 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 121 "ACPI PM1a_CNT_BLK"); 122 123 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 124 "ACPI PM1b_CNT_BLK"); 125 126 if (acpi_gbl_FADT.pm_timer_length == 4) 127 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 128 129 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 130 "ACPI PM2_CNT_BLK"); 131 132 /* Length of GPE blocks must be a non-negative multiple of 2 */ 133 134 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 135 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 136 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 137 138 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 139 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 140 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 141 142 return 0; 143 } 144 fs_initcall_sync(acpi_reserve_resources); 145 146 void acpi_os_printf(const char *fmt, ...) 147 { 148 va_list args; 149 va_start(args, fmt); 150 acpi_os_vprintf(fmt, args); 151 va_end(args); 152 } 153 EXPORT_SYMBOL(acpi_os_printf); 154 155 void acpi_os_vprintf(const char *fmt, va_list args) 156 { 157 static char buffer[512]; 158 159 vsprintf(buffer, fmt, args); 160 161 #ifdef ENABLE_DEBUGGER 162 if (acpi_in_debugger) { 163 kdb_printf("%s", buffer); 164 } else { 165 if (printk_get_level(buffer)) 166 printk("%s", buffer); 167 else 168 printk(KERN_CONT "%s", buffer); 169 } 170 #else 171 if (acpi_debugger_write_log(buffer) < 0) { 172 if (printk_get_level(buffer)) 173 printk("%s", buffer); 174 else 175 printk(KERN_CONT "%s", buffer); 176 } 177 #endif 178 } 179 180 #ifdef CONFIG_KEXEC 181 static unsigned long acpi_rsdp; 182 static int __init setup_acpi_rsdp(char *arg) 183 { 184 if (kstrtoul(arg, 16, &acpi_rsdp)) 185 return -EINVAL; 186 return 0; 187 } 188 early_param("acpi_rsdp", setup_acpi_rsdp); 189 #endif 190 191 acpi_physical_address __init acpi_os_get_root_pointer(void) 192 { 193 #ifdef CONFIG_KEXEC 194 if (acpi_rsdp) 195 return acpi_rsdp; 196 #endif 197 198 if (efi_enabled(EFI_CONFIG_TABLES)) { 199 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 200 return efi.acpi20; 201 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 202 return efi.acpi; 203 else { 204 printk(KERN_ERR PREFIX 205 "System description tables not found\n"); 206 return 0; 207 } 208 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 209 acpi_physical_address pa = 0; 210 211 acpi_find_root_pointer(&pa); 212 return pa; 213 } 214 215 return 0; 216 } 217 218 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 219 static struct acpi_ioremap * 220 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 221 { 222 struct acpi_ioremap *map; 223 224 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 225 if (map->phys <= phys && 226 phys + size <= map->phys + map->size) 227 return map; 228 229 return NULL; 230 } 231 232 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 233 static void __iomem * 234 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 235 { 236 struct acpi_ioremap *map; 237 238 map = acpi_map_lookup(phys, size); 239 if (map) 240 return map->virt + (phys - map->phys); 241 242 return NULL; 243 } 244 245 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 246 { 247 struct acpi_ioremap *map; 248 void __iomem *virt = NULL; 249 250 mutex_lock(&acpi_ioremap_lock); 251 map = acpi_map_lookup(phys, size); 252 if (map) { 253 virt = map->virt + (phys - map->phys); 254 map->refcount++; 255 } 256 mutex_unlock(&acpi_ioremap_lock); 257 return virt; 258 } 259 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 260 261 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 262 static struct acpi_ioremap * 263 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 264 { 265 struct acpi_ioremap *map; 266 267 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 268 if (map->virt <= virt && 269 virt + size <= map->virt + map->size) 270 return map; 271 272 return NULL; 273 } 274 275 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 276 /* ioremap will take care of cache attributes */ 277 #define should_use_kmap(pfn) 0 278 #else 279 #define should_use_kmap(pfn) page_is_ram(pfn) 280 #endif 281 282 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 283 { 284 unsigned long pfn; 285 286 pfn = pg_off >> PAGE_SHIFT; 287 if (should_use_kmap(pfn)) { 288 if (pg_sz > PAGE_SIZE) 289 return NULL; 290 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 291 } else 292 return acpi_os_ioremap(pg_off, pg_sz); 293 } 294 295 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 296 { 297 unsigned long pfn; 298 299 pfn = pg_off >> PAGE_SHIFT; 300 if (should_use_kmap(pfn)) 301 kunmap(pfn_to_page(pfn)); 302 else 303 iounmap(vaddr); 304 } 305 306 /** 307 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 308 * @phys: Start of the physical address range to map. 309 * @size: Size of the physical address range to map. 310 * 311 * Look up the given physical address range in the list of existing ACPI memory 312 * mappings. If found, get a reference to it and return a pointer to it (its 313 * virtual address). If not found, map it, add it to that list and return a 314 * pointer to it. 315 * 316 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this 317 * routine simply calls __acpi_map_table() to get the job done. 318 */ 319 void __iomem *__ref 320 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 321 { 322 struct acpi_ioremap *map; 323 void __iomem *virt; 324 acpi_physical_address pg_off; 325 acpi_size pg_sz; 326 327 if (phys > ULONG_MAX) { 328 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 329 return NULL; 330 } 331 332 if (!acpi_gbl_permanent_mmap) 333 return __acpi_map_table((unsigned long)phys, size); 334 335 mutex_lock(&acpi_ioremap_lock); 336 /* Check if there's a suitable mapping already. */ 337 map = acpi_map_lookup(phys, size); 338 if (map) { 339 map->refcount++; 340 goto out; 341 } 342 343 map = kzalloc(sizeof(*map), GFP_KERNEL); 344 if (!map) { 345 mutex_unlock(&acpi_ioremap_lock); 346 return NULL; 347 } 348 349 pg_off = round_down(phys, PAGE_SIZE); 350 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 351 virt = acpi_map(pg_off, pg_sz); 352 if (!virt) { 353 mutex_unlock(&acpi_ioremap_lock); 354 kfree(map); 355 return NULL; 356 } 357 358 INIT_LIST_HEAD(&map->list); 359 map->virt = virt; 360 map->phys = pg_off; 361 map->size = pg_sz; 362 map->refcount = 1; 363 364 list_add_tail_rcu(&map->list, &acpi_ioremaps); 365 366 out: 367 mutex_unlock(&acpi_ioremap_lock); 368 return map->virt + (phys - map->phys); 369 } 370 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 371 372 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 373 { 374 return (void *)acpi_os_map_iomem(phys, size); 375 } 376 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 377 378 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 379 { 380 if (!--map->refcount) 381 list_del_rcu(&map->list); 382 } 383 384 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 385 { 386 if (!map->refcount) { 387 synchronize_rcu_expedited(); 388 acpi_unmap(map->phys, map->virt); 389 kfree(map); 390 } 391 } 392 393 /** 394 * acpi_os_unmap_iomem - Drop a memory mapping reference. 395 * @virt: Start of the address range to drop a reference to. 396 * @size: Size of the address range to drop a reference to. 397 * 398 * Look up the given virtual address range in the list of existing ACPI memory 399 * mappings, drop a reference to it and unmap it if there are no more active 400 * references to it. 401 * 402 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this 403 * routine simply calls __acpi_unmap_table() to get the job done. Since 404 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 405 * here. 406 */ 407 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 408 { 409 struct acpi_ioremap *map; 410 411 if (!acpi_gbl_permanent_mmap) { 412 __acpi_unmap_table(virt, size); 413 return; 414 } 415 416 mutex_lock(&acpi_ioremap_lock); 417 map = acpi_map_lookup_virt(virt, size); 418 if (!map) { 419 mutex_unlock(&acpi_ioremap_lock); 420 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 421 return; 422 } 423 acpi_os_drop_map_ref(map); 424 mutex_unlock(&acpi_ioremap_lock); 425 426 acpi_os_map_cleanup(map); 427 } 428 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 429 430 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 431 { 432 return acpi_os_unmap_iomem((void __iomem *)virt, size); 433 } 434 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 435 436 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 437 { 438 if (!acpi_gbl_permanent_mmap) 439 __acpi_unmap_table(virt, size); 440 } 441 442 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 443 { 444 u64 addr; 445 void __iomem *virt; 446 447 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 448 return 0; 449 450 /* Handle possible alignment issues */ 451 memcpy(&addr, &gas->address, sizeof(addr)); 452 if (!addr || !gas->bit_width) 453 return -EINVAL; 454 455 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 456 if (!virt) 457 return -EIO; 458 459 return 0; 460 } 461 EXPORT_SYMBOL(acpi_os_map_generic_address); 462 463 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 464 { 465 u64 addr; 466 struct acpi_ioremap *map; 467 468 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 469 return; 470 471 /* Handle possible alignment issues */ 472 memcpy(&addr, &gas->address, sizeof(addr)); 473 if (!addr || !gas->bit_width) 474 return; 475 476 mutex_lock(&acpi_ioremap_lock); 477 map = acpi_map_lookup(addr, gas->bit_width / 8); 478 if (!map) { 479 mutex_unlock(&acpi_ioremap_lock); 480 return; 481 } 482 acpi_os_drop_map_ref(map); 483 mutex_unlock(&acpi_ioremap_lock); 484 485 acpi_os_map_cleanup(map); 486 } 487 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 488 489 #ifdef ACPI_FUTURE_USAGE 490 acpi_status 491 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 492 { 493 if (!phys || !virt) 494 return AE_BAD_PARAMETER; 495 496 *phys = virt_to_phys(virt); 497 498 return AE_OK; 499 } 500 #endif 501 502 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 503 static bool acpi_rev_override; 504 505 int __init acpi_rev_override_setup(char *str) 506 { 507 acpi_rev_override = true; 508 return 1; 509 } 510 __setup("acpi_rev_override", acpi_rev_override_setup); 511 #else 512 #define acpi_rev_override false 513 #endif 514 515 #define ACPI_MAX_OVERRIDE_LEN 100 516 517 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 518 519 acpi_status 520 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 521 acpi_string *new_val) 522 { 523 if (!init_val || !new_val) 524 return AE_BAD_PARAMETER; 525 526 *new_val = NULL; 527 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 528 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 529 acpi_os_name); 530 *new_val = acpi_os_name; 531 } 532 533 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 534 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 535 *new_val = (char *)5; 536 } 537 538 return AE_OK; 539 } 540 541 static irqreturn_t acpi_irq(int irq, void *dev_id) 542 { 543 u32 handled; 544 545 handled = (*acpi_irq_handler) (acpi_irq_context); 546 547 if (handled) { 548 acpi_irq_handled++; 549 return IRQ_HANDLED; 550 } else { 551 acpi_irq_not_handled++; 552 return IRQ_NONE; 553 } 554 } 555 556 acpi_status 557 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 558 void *context) 559 { 560 unsigned int irq; 561 562 acpi_irq_stats_init(); 563 564 /* 565 * ACPI interrupts different from the SCI in our copy of the FADT are 566 * not supported. 567 */ 568 if (gsi != acpi_gbl_FADT.sci_interrupt) 569 return AE_BAD_PARAMETER; 570 571 if (acpi_irq_handler) 572 return AE_ALREADY_ACQUIRED; 573 574 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 575 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 576 gsi); 577 return AE_OK; 578 } 579 580 acpi_irq_handler = handler; 581 acpi_irq_context = context; 582 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 583 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 584 acpi_irq_handler = NULL; 585 return AE_NOT_ACQUIRED; 586 } 587 acpi_sci_irq = irq; 588 589 return AE_OK; 590 } 591 592 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 593 { 594 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 595 return AE_BAD_PARAMETER; 596 597 free_irq(acpi_sci_irq, acpi_irq); 598 acpi_irq_handler = NULL; 599 acpi_sci_irq = INVALID_ACPI_IRQ; 600 601 return AE_OK; 602 } 603 604 /* 605 * Running in interpreter thread context, safe to sleep 606 */ 607 608 void acpi_os_sleep(u64 ms) 609 { 610 msleep(ms); 611 } 612 613 void acpi_os_stall(u32 us) 614 { 615 while (us) { 616 u32 delay = 1000; 617 618 if (delay > us) 619 delay = us; 620 udelay(delay); 621 touch_nmi_watchdog(); 622 us -= delay; 623 } 624 } 625 626 /* 627 * Support ACPI 3.0 AML Timer operand 628 * Returns 64-bit free-running, monotonically increasing timer 629 * with 100ns granularity 630 */ 631 u64 acpi_os_get_timer(void) 632 { 633 u64 time_ns = ktime_to_ns(ktime_get()); 634 do_div(time_ns, 100); 635 return time_ns; 636 } 637 638 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 639 { 640 u32 dummy; 641 642 if (!value) 643 value = &dummy; 644 645 *value = 0; 646 if (width <= 8) { 647 *(u8 *) value = inb(port); 648 } else if (width <= 16) { 649 *(u16 *) value = inw(port); 650 } else if (width <= 32) { 651 *(u32 *) value = inl(port); 652 } else { 653 BUG(); 654 } 655 656 return AE_OK; 657 } 658 659 EXPORT_SYMBOL(acpi_os_read_port); 660 661 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 662 { 663 if (width <= 8) { 664 outb(value, port); 665 } else if (width <= 16) { 666 outw(value, port); 667 } else if (width <= 32) { 668 outl(value, port); 669 } else { 670 BUG(); 671 } 672 673 return AE_OK; 674 } 675 676 EXPORT_SYMBOL(acpi_os_write_port); 677 678 acpi_status 679 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 680 { 681 void __iomem *virt_addr; 682 unsigned int size = width / 8; 683 bool unmap = false; 684 u64 dummy; 685 686 rcu_read_lock(); 687 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 688 if (!virt_addr) { 689 rcu_read_unlock(); 690 virt_addr = acpi_os_ioremap(phys_addr, size); 691 if (!virt_addr) 692 return AE_BAD_ADDRESS; 693 unmap = true; 694 } 695 696 if (!value) 697 value = &dummy; 698 699 switch (width) { 700 case 8: 701 *(u8 *) value = readb(virt_addr); 702 break; 703 case 16: 704 *(u16 *) value = readw(virt_addr); 705 break; 706 case 32: 707 *(u32 *) value = readl(virt_addr); 708 break; 709 case 64: 710 *(u64 *) value = readq(virt_addr); 711 break; 712 default: 713 BUG(); 714 } 715 716 if (unmap) 717 iounmap(virt_addr); 718 else 719 rcu_read_unlock(); 720 721 return AE_OK; 722 } 723 724 acpi_status 725 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 726 { 727 void __iomem *virt_addr; 728 unsigned int size = width / 8; 729 bool unmap = false; 730 731 rcu_read_lock(); 732 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 733 if (!virt_addr) { 734 rcu_read_unlock(); 735 virt_addr = acpi_os_ioremap(phys_addr, size); 736 if (!virt_addr) 737 return AE_BAD_ADDRESS; 738 unmap = true; 739 } 740 741 switch (width) { 742 case 8: 743 writeb(value, virt_addr); 744 break; 745 case 16: 746 writew(value, virt_addr); 747 break; 748 case 32: 749 writel(value, virt_addr); 750 break; 751 case 64: 752 writeq(value, virt_addr); 753 break; 754 default: 755 BUG(); 756 } 757 758 if (unmap) 759 iounmap(virt_addr); 760 else 761 rcu_read_unlock(); 762 763 return AE_OK; 764 } 765 766 acpi_status 767 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 768 u64 *value, u32 width) 769 { 770 int result, size; 771 u32 value32; 772 773 if (!value) 774 return AE_BAD_PARAMETER; 775 776 switch (width) { 777 case 8: 778 size = 1; 779 break; 780 case 16: 781 size = 2; 782 break; 783 case 32: 784 size = 4; 785 break; 786 default: 787 return AE_ERROR; 788 } 789 790 result = raw_pci_read(pci_id->segment, pci_id->bus, 791 PCI_DEVFN(pci_id->device, pci_id->function), 792 reg, size, &value32); 793 *value = value32; 794 795 return (result ? AE_ERROR : AE_OK); 796 } 797 798 acpi_status 799 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 800 u64 value, u32 width) 801 { 802 int result, size; 803 804 switch (width) { 805 case 8: 806 size = 1; 807 break; 808 case 16: 809 size = 2; 810 break; 811 case 32: 812 size = 4; 813 break; 814 default: 815 return AE_ERROR; 816 } 817 818 result = raw_pci_write(pci_id->segment, pci_id->bus, 819 PCI_DEVFN(pci_id->device, pci_id->function), 820 reg, size, value); 821 822 return (result ? AE_ERROR : AE_OK); 823 } 824 825 static void acpi_os_execute_deferred(struct work_struct *work) 826 { 827 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 828 829 dpc->function(dpc->context); 830 kfree(dpc); 831 } 832 833 #ifdef CONFIG_ACPI_DEBUGGER 834 static struct acpi_debugger acpi_debugger; 835 static bool acpi_debugger_initialized; 836 837 int acpi_register_debugger(struct module *owner, 838 const struct acpi_debugger_ops *ops) 839 { 840 int ret = 0; 841 842 mutex_lock(&acpi_debugger.lock); 843 if (acpi_debugger.ops) { 844 ret = -EBUSY; 845 goto err_lock; 846 } 847 848 acpi_debugger.owner = owner; 849 acpi_debugger.ops = ops; 850 851 err_lock: 852 mutex_unlock(&acpi_debugger.lock); 853 return ret; 854 } 855 EXPORT_SYMBOL(acpi_register_debugger); 856 857 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 858 { 859 mutex_lock(&acpi_debugger.lock); 860 if (ops == acpi_debugger.ops) { 861 acpi_debugger.ops = NULL; 862 acpi_debugger.owner = NULL; 863 } 864 mutex_unlock(&acpi_debugger.lock); 865 } 866 EXPORT_SYMBOL(acpi_unregister_debugger); 867 868 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 869 { 870 int ret; 871 int (*func)(acpi_osd_exec_callback, void *); 872 struct module *owner; 873 874 if (!acpi_debugger_initialized) 875 return -ENODEV; 876 mutex_lock(&acpi_debugger.lock); 877 if (!acpi_debugger.ops) { 878 ret = -ENODEV; 879 goto err_lock; 880 } 881 if (!try_module_get(acpi_debugger.owner)) { 882 ret = -ENODEV; 883 goto err_lock; 884 } 885 func = acpi_debugger.ops->create_thread; 886 owner = acpi_debugger.owner; 887 mutex_unlock(&acpi_debugger.lock); 888 889 ret = func(function, context); 890 891 mutex_lock(&acpi_debugger.lock); 892 module_put(owner); 893 err_lock: 894 mutex_unlock(&acpi_debugger.lock); 895 return ret; 896 } 897 898 ssize_t acpi_debugger_write_log(const char *msg) 899 { 900 ssize_t ret; 901 ssize_t (*func)(const char *); 902 struct module *owner; 903 904 if (!acpi_debugger_initialized) 905 return -ENODEV; 906 mutex_lock(&acpi_debugger.lock); 907 if (!acpi_debugger.ops) { 908 ret = -ENODEV; 909 goto err_lock; 910 } 911 if (!try_module_get(acpi_debugger.owner)) { 912 ret = -ENODEV; 913 goto err_lock; 914 } 915 func = acpi_debugger.ops->write_log; 916 owner = acpi_debugger.owner; 917 mutex_unlock(&acpi_debugger.lock); 918 919 ret = func(msg); 920 921 mutex_lock(&acpi_debugger.lock); 922 module_put(owner); 923 err_lock: 924 mutex_unlock(&acpi_debugger.lock); 925 return ret; 926 } 927 928 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 929 { 930 ssize_t ret; 931 ssize_t (*func)(char *, size_t); 932 struct module *owner; 933 934 if (!acpi_debugger_initialized) 935 return -ENODEV; 936 mutex_lock(&acpi_debugger.lock); 937 if (!acpi_debugger.ops) { 938 ret = -ENODEV; 939 goto err_lock; 940 } 941 if (!try_module_get(acpi_debugger.owner)) { 942 ret = -ENODEV; 943 goto err_lock; 944 } 945 func = acpi_debugger.ops->read_cmd; 946 owner = acpi_debugger.owner; 947 mutex_unlock(&acpi_debugger.lock); 948 949 ret = func(buffer, buffer_length); 950 951 mutex_lock(&acpi_debugger.lock); 952 module_put(owner); 953 err_lock: 954 mutex_unlock(&acpi_debugger.lock); 955 return ret; 956 } 957 958 int acpi_debugger_wait_command_ready(void) 959 { 960 int ret; 961 int (*func)(bool, char *, size_t); 962 struct module *owner; 963 964 if (!acpi_debugger_initialized) 965 return -ENODEV; 966 mutex_lock(&acpi_debugger.lock); 967 if (!acpi_debugger.ops) { 968 ret = -ENODEV; 969 goto err_lock; 970 } 971 if (!try_module_get(acpi_debugger.owner)) { 972 ret = -ENODEV; 973 goto err_lock; 974 } 975 func = acpi_debugger.ops->wait_command_ready; 976 owner = acpi_debugger.owner; 977 mutex_unlock(&acpi_debugger.lock); 978 979 ret = func(acpi_gbl_method_executing, 980 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 981 982 mutex_lock(&acpi_debugger.lock); 983 module_put(owner); 984 err_lock: 985 mutex_unlock(&acpi_debugger.lock); 986 return ret; 987 } 988 989 int acpi_debugger_notify_command_complete(void) 990 { 991 int ret; 992 int (*func)(void); 993 struct module *owner; 994 995 if (!acpi_debugger_initialized) 996 return -ENODEV; 997 mutex_lock(&acpi_debugger.lock); 998 if (!acpi_debugger.ops) { 999 ret = -ENODEV; 1000 goto err_lock; 1001 } 1002 if (!try_module_get(acpi_debugger.owner)) { 1003 ret = -ENODEV; 1004 goto err_lock; 1005 } 1006 func = acpi_debugger.ops->notify_command_complete; 1007 owner = acpi_debugger.owner; 1008 mutex_unlock(&acpi_debugger.lock); 1009 1010 ret = func(); 1011 1012 mutex_lock(&acpi_debugger.lock); 1013 module_put(owner); 1014 err_lock: 1015 mutex_unlock(&acpi_debugger.lock); 1016 return ret; 1017 } 1018 1019 int __init acpi_debugger_init(void) 1020 { 1021 mutex_init(&acpi_debugger.lock); 1022 acpi_debugger_initialized = true; 1023 return 0; 1024 } 1025 #endif 1026 1027 /******************************************************************************* 1028 * 1029 * FUNCTION: acpi_os_execute 1030 * 1031 * PARAMETERS: Type - Type of the callback 1032 * Function - Function to be executed 1033 * Context - Function parameters 1034 * 1035 * RETURN: Status 1036 * 1037 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1038 * immediately executes function on a separate thread. 1039 * 1040 ******************************************************************************/ 1041 1042 acpi_status acpi_os_execute(acpi_execute_type type, 1043 acpi_osd_exec_callback function, void *context) 1044 { 1045 acpi_status status = AE_OK; 1046 struct acpi_os_dpc *dpc; 1047 struct workqueue_struct *queue; 1048 int ret; 1049 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1050 "Scheduling function [%p(%p)] for deferred execution.\n", 1051 function, context)); 1052 1053 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1054 ret = acpi_debugger_create_thread(function, context); 1055 if (ret) { 1056 pr_err("Call to kthread_create() failed.\n"); 1057 status = AE_ERROR; 1058 } 1059 goto out_thread; 1060 } 1061 1062 /* 1063 * Allocate/initialize DPC structure. Note that this memory will be 1064 * freed by the callee. The kernel handles the work_struct list in a 1065 * way that allows us to also free its memory inside the callee. 1066 * Because we may want to schedule several tasks with different 1067 * parameters we can't use the approach some kernel code uses of 1068 * having a static work_struct. 1069 */ 1070 1071 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1072 if (!dpc) 1073 return AE_NO_MEMORY; 1074 1075 dpc->function = function; 1076 dpc->context = context; 1077 1078 /* 1079 * To prevent lockdep from complaining unnecessarily, make sure that 1080 * there is a different static lockdep key for each workqueue by using 1081 * INIT_WORK() for each of them separately. 1082 */ 1083 if (type == OSL_NOTIFY_HANDLER) { 1084 queue = kacpi_notify_wq; 1085 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1086 } else if (type == OSL_GPE_HANDLER) { 1087 queue = kacpid_wq; 1088 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1089 } else { 1090 pr_err("Unsupported os_execute type %d.\n", type); 1091 status = AE_ERROR; 1092 } 1093 1094 if (ACPI_FAILURE(status)) 1095 goto err_workqueue; 1096 1097 /* 1098 * On some machines, a software-initiated SMI causes corruption unless 1099 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1100 * typically it's done in GPE-related methods that are run via 1101 * workqueues, so we can avoid the known corruption cases by always 1102 * queueing on CPU 0. 1103 */ 1104 ret = queue_work_on(0, queue, &dpc->work); 1105 if (!ret) { 1106 printk(KERN_ERR PREFIX 1107 "Call to queue_work() failed.\n"); 1108 status = AE_ERROR; 1109 } 1110 err_workqueue: 1111 if (ACPI_FAILURE(status)) 1112 kfree(dpc); 1113 out_thread: 1114 return status; 1115 } 1116 EXPORT_SYMBOL(acpi_os_execute); 1117 1118 void acpi_os_wait_events_complete(void) 1119 { 1120 /* 1121 * Make sure the GPE handler or the fixed event handler is not used 1122 * on another CPU after removal. 1123 */ 1124 if (acpi_sci_irq_valid()) 1125 synchronize_hardirq(acpi_sci_irq); 1126 flush_workqueue(kacpid_wq); 1127 flush_workqueue(kacpi_notify_wq); 1128 } 1129 1130 struct acpi_hp_work { 1131 struct work_struct work; 1132 struct acpi_device *adev; 1133 u32 src; 1134 }; 1135 1136 static void acpi_hotplug_work_fn(struct work_struct *work) 1137 { 1138 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1139 1140 acpi_os_wait_events_complete(); 1141 acpi_device_hotplug(hpw->adev, hpw->src); 1142 kfree(hpw); 1143 } 1144 1145 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1146 { 1147 struct acpi_hp_work *hpw; 1148 1149 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1150 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1151 adev, src)); 1152 1153 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1154 if (!hpw) 1155 return AE_NO_MEMORY; 1156 1157 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1158 hpw->adev = adev; 1159 hpw->src = src; 1160 /* 1161 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1162 * the hotplug code may call driver .remove() functions, which may 1163 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1164 * these workqueues. 1165 */ 1166 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1167 kfree(hpw); 1168 return AE_ERROR; 1169 } 1170 return AE_OK; 1171 } 1172 1173 bool acpi_queue_hotplug_work(struct work_struct *work) 1174 { 1175 return queue_work(kacpi_hotplug_wq, work); 1176 } 1177 1178 acpi_status 1179 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1180 { 1181 struct semaphore *sem = NULL; 1182 1183 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1184 if (!sem) 1185 return AE_NO_MEMORY; 1186 1187 sema_init(sem, initial_units); 1188 1189 *handle = (acpi_handle *) sem; 1190 1191 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1192 *handle, initial_units)); 1193 1194 return AE_OK; 1195 } 1196 1197 /* 1198 * TODO: A better way to delete semaphores? Linux doesn't have a 1199 * 'delete_semaphore()' function -- may result in an invalid 1200 * pointer dereference for non-synchronized consumers. Should 1201 * we at least check for blocked threads and signal/cancel them? 1202 */ 1203 1204 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1205 { 1206 struct semaphore *sem = (struct semaphore *)handle; 1207 1208 if (!sem) 1209 return AE_BAD_PARAMETER; 1210 1211 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1212 1213 BUG_ON(!list_empty(&sem->wait_list)); 1214 kfree(sem); 1215 sem = NULL; 1216 1217 return AE_OK; 1218 } 1219 1220 /* 1221 * TODO: Support for units > 1? 1222 */ 1223 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1224 { 1225 acpi_status status = AE_OK; 1226 struct semaphore *sem = (struct semaphore *)handle; 1227 long jiffies; 1228 int ret = 0; 1229 1230 if (!acpi_os_initialized) 1231 return AE_OK; 1232 1233 if (!sem || (units < 1)) 1234 return AE_BAD_PARAMETER; 1235 1236 if (units > 1) 1237 return AE_SUPPORT; 1238 1239 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1240 handle, units, timeout)); 1241 1242 if (timeout == ACPI_WAIT_FOREVER) 1243 jiffies = MAX_SCHEDULE_TIMEOUT; 1244 else 1245 jiffies = msecs_to_jiffies(timeout); 1246 1247 ret = down_timeout(sem, jiffies); 1248 if (ret) 1249 status = AE_TIME; 1250 1251 if (ACPI_FAILURE(status)) { 1252 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1253 "Failed to acquire semaphore[%p|%d|%d], %s", 1254 handle, units, timeout, 1255 acpi_format_exception(status))); 1256 } else { 1257 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1258 "Acquired semaphore[%p|%d|%d]", handle, 1259 units, timeout)); 1260 } 1261 1262 return status; 1263 } 1264 1265 /* 1266 * TODO: Support for units > 1? 1267 */ 1268 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1269 { 1270 struct semaphore *sem = (struct semaphore *)handle; 1271 1272 if (!acpi_os_initialized) 1273 return AE_OK; 1274 1275 if (!sem || (units < 1)) 1276 return AE_BAD_PARAMETER; 1277 1278 if (units > 1) 1279 return AE_SUPPORT; 1280 1281 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1282 units)); 1283 1284 up(sem); 1285 1286 return AE_OK; 1287 } 1288 1289 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1290 { 1291 #ifdef ENABLE_DEBUGGER 1292 if (acpi_in_debugger) { 1293 u32 chars; 1294 1295 kdb_read(buffer, buffer_length); 1296 1297 /* remove the CR kdb includes */ 1298 chars = strlen(buffer) - 1; 1299 buffer[chars] = '\0'; 1300 } 1301 #else 1302 int ret; 1303 1304 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1305 if (ret < 0) 1306 return AE_ERROR; 1307 if (bytes_read) 1308 *bytes_read = ret; 1309 #endif 1310 1311 return AE_OK; 1312 } 1313 EXPORT_SYMBOL(acpi_os_get_line); 1314 1315 acpi_status acpi_os_wait_command_ready(void) 1316 { 1317 int ret; 1318 1319 ret = acpi_debugger_wait_command_ready(); 1320 if (ret < 0) 1321 return AE_ERROR; 1322 return AE_OK; 1323 } 1324 1325 acpi_status acpi_os_notify_command_complete(void) 1326 { 1327 int ret; 1328 1329 ret = acpi_debugger_notify_command_complete(); 1330 if (ret < 0) 1331 return AE_ERROR; 1332 return AE_OK; 1333 } 1334 1335 acpi_status acpi_os_signal(u32 function, void *info) 1336 { 1337 switch (function) { 1338 case ACPI_SIGNAL_FATAL: 1339 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1340 break; 1341 case ACPI_SIGNAL_BREAKPOINT: 1342 /* 1343 * AML Breakpoint 1344 * ACPI spec. says to treat it as a NOP unless 1345 * you are debugging. So if/when we integrate 1346 * AML debugger into the kernel debugger its 1347 * hook will go here. But until then it is 1348 * not useful to print anything on breakpoints. 1349 */ 1350 break; 1351 default: 1352 break; 1353 } 1354 1355 return AE_OK; 1356 } 1357 1358 static int __init acpi_os_name_setup(char *str) 1359 { 1360 char *p = acpi_os_name; 1361 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1362 1363 if (!str || !*str) 1364 return 0; 1365 1366 for (; count-- && *str; str++) { 1367 if (isalnum(*str) || *str == ' ' || *str == ':') 1368 *p++ = *str; 1369 else if (*str == '\'' || *str == '"') 1370 continue; 1371 else 1372 break; 1373 } 1374 *p = 0; 1375 1376 return 1; 1377 1378 } 1379 1380 __setup("acpi_os_name=", acpi_os_name_setup); 1381 1382 /* 1383 * Disable the auto-serialization of named objects creation methods. 1384 * 1385 * This feature is enabled by default. It marks the AML control methods 1386 * that contain the opcodes to create named objects as "Serialized". 1387 */ 1388 static int __init acpi_no_auto_serialize_setup(char *str) 1389 { 1390 acpi_gbl_auto_serialize_methods = FALSE; 1391 pr_info("ACPI: auto-serialization disabled\n"); 1392 1393 return 1; 1394 } 1395 1396 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1397 1398 /* Check of resource interference between native drivers and ACPI 1399 * OperationRegions (SystemIO and System Memory only). 1400 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1401 * in arbitrary AML code and can interfere with legacy drivers. 1402 * acpi_enforce_resources= can be set to: 1403 * 1404 * - strict (default) (2) 1405 * -> further driver trying to access the resources will not load 1406 * - lax (1) 1407 * -> further driver trying to access the resources will load, but you 1408 * get a system message that something might go wrong... 1409 * 1410 * - no (0) 1411 * -> ACPI Operation Region resources will not be registered 1412 * 1413 */ 1414 #define ENFORCE_RESOURCES_STRICT 2 1415 #define ENFORCE_RESOURCES_LAX 1 1416 #define ENFORCE_RESOURCES_NO 0 1417 1418 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1419 1420 static int __init acpi_enforce_resources_setup(char *str) 1421 { 1422 if (str == NULL || *str == '\0') 1423 return 0; 1424 1425 if (!strcmp("strict", str)) 1426 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1427 else if (!strcmp("lax", str)) 1428 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1429 else if (!strcmp("no", str)) 1430 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1431 1432 return 1; 1433 } 1434 1435 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1436 1437 /* Check for resource conflicts between ACPI OperationRegions and native 1438 * drivers */ 1439 int acpi_check_resource_conflict(const struct resource *res) 1440 { 1441 acpi_adr_space_type space_id; 1442 acpi_size length; 1443 u8 warn = 0; 1444 int clash = 0; 1445 1446 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1447 return 0; 1448 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1449 return 0; 1450 1451 if (res->flags & IORESOURCE_IO) 1452 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1453 else 1454 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1455 1456 length = resource_size(res); 1457 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1458 warn = 1; 1459 clash = acpi_check_address_range(space_id, res->start, length, warn); 1460 1461 if (clash) { 1462 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1463 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1464 printk(KERN_NOTICE "ACPI: This conflict may" 1465 " cause random problems and system" 1466 " instability\n"); 1467 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1468 " for this device, you should use it instead of" 1469 " the native driver\n"); 1470 } 1471 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1472 return -EBUSY; 1473 } 1474 return 0; 1475 } 1476 EXPORT_SYMBOL(acpi_check_resource_conflict); 1477 1478 int acpi_check_region(resource_size_t start, resource_size_t n, 1479 const char *name) 1480 { 1481 struct resource res = { 1482 .start = start, 1483 .end = start + n - 1, 1484 .name = name, 1485 .flags = IORESOURCE_IO, 1486 }; 1487 1488 return acpi_check_resource_conflict(&res); 1489 } 1490 EXPORT_SYMBOL(acpi_check_region); 1491 1492 /* 1493 * Let drivers know whether the resource checks are effective 1494 */ 1495 int acpi_resources_are_enforced(void) 1496 { 1497 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1498 } 1499 EXPORT_SYMBOL(acpi_resources_are_enforced); 1500 1501 /* 1502 * Deallocate the memory for a spinlock. 1503 */ 1504 void acpi_os_delete_lock(acpi_spinlock handle) 1505 { 1506 ACPI_FREE(handle); 1507 } 1508 1509 /* 1510 * Acquire a spinlock. 1511 * 1512 * handle is a pointer to the spinlock_t. 1513 */ 1514 1515 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1516 { 1517 acpi_cpu_flags flags; 1518 spin_lock_irqsave(lockp, flags); 1519 return flags; 1520 } 1521 1522 /* 1523 * Release a spinlock. See above. 1524 */ 1525 1526 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1527 { 1528 spin_unlock_irqrestore(lockp, flags); 1529 } 1530 1531 #ifndef ACPI_USE_LOCAL_CACHE 1532 1533 /******************************************************************************* 1534 * 1535 * FUNCTION: acpi_os_create_cache 1536 * 1537 * PARAMETERS: name - Ascii name for the cache 1538 * size - Size of each cached object 1539 * depth - Maximum depth of the cache (in objects) <ignored> 1540 * cache - Where the new cache object is returned 1541 * 1542 * RETURN: status 1543 * 1544 * DESCRIPTION: Create a cache object 1545 * 1546 ******************************************************************************/ 1547 1548 acpi_status 1549 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1550 { 1551 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1552 if (*cache == NULL) 1553 return AE_ERROR; 1554 else 1555 return AE_OK; 1556 } 1557 1558 /******************************************************************************* 1559 * 1560 * FUNCTION: acpi_os_purge_cache 1561 * 1562 * PARAMETERS: Cache - Handle to cache object 1563 * 1564 * RETURN: Status 1565 * 1566 * DESCRIPTION: Free all objects within the requested cache. 1567 * 1568 ******************************************************************************/ 1569 1570 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1571 { 1572 kmem_cache_shrink(cache); 1573 return (AE_OK); 1574 } 1575 1576 /******************************************************************************* 1577 * 1578 * FUNCTION: acpi_os_delete_cache 1579 * 1580 * PARAMETERS: Cache - Handle to cache object 1581 * 1582 * RETURN: Status 1583 * 1584 * DESCRIPTION: Free all objects within the requested cache and delete the 1585 * cache object. 1586 * 1587 ******************************************************************************/ 1588 1589 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1590 { 1591 kmem_cache_destroy(cache); 1592 return (AE_OK); 1593 } 1594 1595 /******************************************************************************* 1596 * 1597 * FUNCTION: acpi_os_release_object 1598 * 1599 * PARAMETERS: Cache - Handle to cache object 1600 * Object - The object to be released 1601 * 1602 * RETURN: None 1603 * 1604 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1605 * the object is deleted. 1606 * 1607 ******************************************************************************/ 1608 1609 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1610 { 1611 kmem_cache_free(cache, object); 1612 return (AE_OK); 1613 } 1614 #endif 1615 1616 static int __init acpi_no_static_ssdt_setup(char *s) 1617 { 1618 acpi_gbl_disable_ssdt_table_install = TRUE; 1619 pr_info("ACPI: static SSDT installation disabled\n"); 1620 1621 return 0; 1622 } 1623 1624 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1625 1626 static int __init acpi_disable_return_repair(char *s) 1627 { 1628 printk(KERN_NOTICE PREFIX 1629 "ACPI: Predefined validation mechanism disabled\n"); 1630 acpi_gbl_disable_auto_repair = TRUE; 1631 1632 return 1; 1633 } 1634 1635 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1636 1637 acpi_status __init acpi_os_initialize(void) 1638 { 1639 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1640 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1641 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1642 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1643 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1644 /* 1645 * Use acpi_os_map_generic_address to pre-map the reset 1646 * register if it's in system memory. 1647 */ 1648 int rv; 1649 1650 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1651 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1652 } 1653 acpi_os_initialized = true; 1654 1655 return AE_OK; 1656 } 1657 1658 acpi_status __init acpi_os_initialize1(void) 1659 { 1660 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1661 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1662 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1663 BUG_ON(!kacpid_wq); 1664 BUG_ON(!kacpi_notify_wq); 1665 BUG_ON(!kacpi_hotplug_wq); 1666 acpi_osi_init(); 1667 return AE_OK; 1668 } 1669 1670 acpi_status acpi_os_terminate(void) 1671 { 1672 if (acpi_irq_handler) { 1673 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1674 acpi_irq_handler); 1675 } 1676 1677 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1678 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1679 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1680 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1681 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1682 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1683 1684 destroy_workqueue(kacpid_wq); 1685 destroy_workqueue(kacpi_notify_wq); 1686 destroy_workqueue(kacpi_hotplug_wq); 1687 1688 return AE_OK; 1689 } 1690 1691 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1692 u32 pm1b_control) 1693 { 1694 int rc = 0; 1695 if (__acpi_os_prepare_sleep) 1696 rc = __acpi_os_prepare_sleep(sleep_state, 1697 pm1a_control, pm1b_control); 1698 if (rc < 0) 1699 return AE_ERROR; 1700 else if (rc > 0) 1701 return AE_CTRL_SKIP; 1702 1703 return AE_OK; 1704 } 1705 1706 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1707 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1708 { 1709 __acpi_os_prepare_sleep = func; 1710 } 1711 1712 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1713 u32 val_b) 1714 { 1715 int rc = 0; 1716 if (__acpi_os_prepare_extended_sleep) 1717 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1718 val_a, val_b); 1719 if (rc < 0) 1720 return AE_ERROR; 1721 else if (rc > 0) 1722 return AE_CTRL_SKIP; 1723 1724 return AE_OK; 1725 } 1726 1727 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1728 u32 val_a, u32 val_b)) 1729 { 1730 __acpi_os_prepare_extended_sleep = func; 1731 } 1732