1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * 28 */ 29 30 #include <linux/module.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/mm.h> 34 #include <linux/highmem.h> 35 #include <linux/pci.h> 36 #include <linux/interrupt.h> 37 #include <linux/kmod.h> 38 #include <linux/delay.h> 39 #include <linux/workqueue.h> 40 #include <linux/nmi.h> 41 #include <linux/acpi.h> 42 #include <linux/acpi_io.h> 43 #include <linux/efi.h> 44 #include <linux/ioport.h> 45 #include <linux/list.h> 46 #include <linux/jiffies.h> 47 #include <linux/semaphore.h> 48 49 #include <asm/io.h> 50 #include <asm/uaccess.h> 51 52 #include <acpi/acpi.h> 53 #include <acpi/acpi_bus.h> 54 #include <acpi/processor.h> 55 56 #define _COMPONENT ACPI_OS_SERVICES 57 ACPI_MODULE_NAME("osl"); 58 #define PREFIX "ACPI: " 59 struct acpi_os_dpc { 60 acpi_osd_exec_callback function; 61 void *context; 62 struct work_struct work; 63 int wait; 64 }; 65 66 #ifdef CONFIG_ACPI_CUSTOM_DSDT 67 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 68 #endif 69 70 #ifdef ENABLE_DEBUGGER 71 #include <linux/kdb.h> 72 73 /* stuff for debugger support */ 74 int acpi_in_debugger; 75 EXPORT_SYMBOL(acpi_in_debugger); 76 77 extern char line_buf[80]; 78 #endif /*ENABLE_DEBUGGER */ 79 80 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 81 u32 pm1b_ctrl); 82 83 static acpi_osd_handler acpi_irq_handler; 84 static void *acpi_irq_context; 85 static struct workqueue_struct *kacpid_wq; 86 static struct workqueue_struct *kacpi_notify_wq; 87 struct workqueue_struct *kacpi_hotplug_wq; 88 EXPORT_SYMBOL(kacpi_hotplug_wq); 89 90 /* 91 * This list of permanent mappings is for memory that may be accessed from 92 * interrupt context, where we can't do the ioremap(). 93 */ 94 struct acpi_ioremap { 95 struct list_head list; 96 void __iomem *virt; 97 acpi_physical_address phys; 98 acpi_size size; 99 unsigned long refcount; 100 }; 101 102 static LIST_HEAD(acpi_ioremaps); 103 static DEFINE_MUTEX(acpi_ioremap_lock); 104 105 static void __init acpi_osi_setup_late(void); 106 107 /* 108 * The story of _OSI(Linux) 109 * 110 * From pre-history through Linux-2.6.22, 111 * Linux responded TRUE upon a BIOS OSI(Linux) query. 112 * 113 * Unfortunately, reference BIOS writers got wind of this 114 * and put OSI(Linux) in their example code, quickly exposing 115 * this string as ill-conceived and opening the door to 116 * an un-bounded number of BIOS incompatibilities. 117 * 118 * For example, OSI(Linux) was used on resume to re-POST a 119 * video card on one system, because Linux at that time 120 * could not do a speedy restore in its native driver. 121 * But then upon gaining quick native restore capability, 122 * Linux has no way to tell the BIOS to skip the time-consuming 123 * POST -- putting Linux at a permanent performance disadvantage. 124 * On another system, the BIOS writer used OSI(Linux) 125 * to infer native OS support for IPMI! On other systems, 126 * OSI(Linux) simply got in the way of Linux claiming to 127 * be compatible with other operating systems, exposing 128 * BIOS issues such as skipped device initialization. 129 * 130 * So "Linux" turned out to be a really poor chose of 131 * OSI string, and from Linux-2.6.23 onward we respond FALSE. 132 * 133 * BIOS writers should NOT query _OSI(Linux) on future systems. 134 * Linux will complain on the console when it sees it, and return FALSE. 135 * To get Linux to return TRUE for your system will require 136 * a kernel source update to add a DMI entry, 137 * or boot with "acpi_osi=Linux" 138 */ 139 140 static struct osi_linux { 141 unsigned int enable:1; 142 unsigned int dmi:1; 143 unsigned int cmdline:1; 144 } osi_linux = {0, 0, 0}; 145 146 static u32 acpi_osi_handler(acpi_string interface, u32 supported) 147 { 148 if (!strcmp("Linux", interface)) { 149 150 printk_once(KERN_NOTICE FW_BUG PREFIX 151 "BIOS _OSI(Linux) query %s%s\n", 152 osi_linux.enable ? "honored" : "ignored", 153 osi_linux.cmdline ? " via cmdline" : 154 osi_linux.dmi ? " via DMI" : ""); 155 } 156 157 return supported; 158 } 159 160 static void __init acpi_request_region (struct acpi_generic_address *gas, 161 unsigned int length, char *desc) 162 { 163 u64 addr; 164 165 /* Handle possible alignment issues */ 166 memcpy(&addr, &gas->address, sizeof(addr)); 167 if (!addr || !length) 168 return; 169 170 /* Resources are never freed */ 171 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 172 request_region(addr, length, desc); 173 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 174 request_mem_region(addr, length, desc); 175 } 176 177 static int __init acpi_reserve_resources(void) 178 { 179 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 180 "ACPI PM1a_EVT_BLK"); 181 182 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 183 "ACPI PM1b_EVT_BLK"); 184 185 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 186 "ACPI PM1a_CNT_BLK"); 187 188 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 189 "ACPI PM1b_CNT_BLK"); 190 191 if (acpi_gbl_FADT.pm_timer_length == 4) 192 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 193 194 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 195 "ACPI PM2_CNT_BLK"); 196 197 /* Length of GPE blocks must be a non-negative multiple of 2 */ 198 199 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 200 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 201 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 202 203 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 204 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 205 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 206 207 return 0; 208 } 209 device_initcall(acpi_reserve_resources); 210 211 void acpi_os_printf(const char *fmt, ...) 212 { 213 va_list args; 214 va_start(args, fmt); 215 acpi_os_vprintf(fmt, args); 216 va_end(args); 217 } 218 219 void acpi_os_vprintf(const char *fmt, va_list args) 220 { 221 static char buffer[512]; 222 223 vsprintf(buffer, fmt, args); 224 225 #ifdef ENABLE_DEBUGGER 226 if (acpi_in_debugger) { 227 kdb_printf("%s", buffer); 228 } else { 229 printk(KERN_CONT "%s", buffer); 230 } 231 #else 232 printk(KERN_CONT "%s", buffer); 233 #endif 234 } 235 236 #ifdef CONFIG_KEXEC 237 static unsigned long acpi_rsdp; 238 static int __init setup_acpi_rsdp(char *arg) 239 { 240 acpi_rsdp = simple_strtoul(arg, NULL, 16); 241 return 0; 242 } 243 early_param("acpi_rsdp", setup_acpi_rsdp); 244 #endif 245 246 acpi_physical_address __init acpi_os_get_root_pointer(void) 247 { 248 #ifdef CONFIG_KEXEC 249 if (acpi_rsdp) 250 return acpi_rsdp; 251 #endif 252 253 if (efi_enabled) { 254 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 255 return efi.acpi20; 256 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 257 return efi.acpi; 258 else { 259 printk(KERN_ERR PREFIX 260 "System description tables not found\n"); 261 return 0; 262 } 263 } else { 264 acpi_physical_address pa = 0; 265 266 acpi_find_root_pointer(&pa); 267 return pa; 268 } 269 } 270 271 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 272 static struct acpi_ioremap * 273 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 274 { 275 struct acpi_ioremap *map; 276 277 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 278 if (map->phys <= phys && 279 phys + size <= map->phys + map->size) 280 return map; 281 282 return NULL; 283 } 284 285 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 286 static void __iomem * 287 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 288 { 289 struct acpi_ioremap *map; 290 291 map = acpi_map_lookup(phys, size); 292 if (map) 293 return map->virt + (phys - map->phys); 294 295 return NULL; 296 } 297 298 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 299 { 300 struct acpi_ioremap *map; 301 void __iomem *virt = NULL; 302 303 mutex_lock(&acpi_ioremap_lock); 304 map = acpi_map_lookup(phys, size); 305 if (map) { 306 virt = map->virt + (phys - map->phys); 307 map->refcount++; 308 } 309 mutex_unlock(&acpi_ioremap_lock); 310 return virt; 311 } 312 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 313 314 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 315 static struct acpi_ioremap * 316 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 317 { 318 struct acpi_ioremap *map; 319 320 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 321 if (map->virt <= virt && 322 virt + size <= map->virt + map->size) 323 return map; 324 325 return NULL; 326 } 327 328 #ifndef CONFIG_IA64 329 #define should_use_kmap(pfn) page_is_ram(pfn) 330 #else 331 /* ioremap will take care of cache attributes */ 332 #define should_use_kmap(pfn) 0 333 #endif 334 335 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 336 { 337 unsigned long pfn; 338 339 pfn = pg_off >> PAGE_SHIFT; 340 if (should_use_kmap(pfn)) { 341 if (pg_sz > PAGE_SIZE) 342 return NULL; 343 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 344 } else 345 return acpi_os_ioremap(pg_off, pg_sz); 346 } 347 348 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 349 { 350 unsigned long pfn; 351 352 pfn = pg_off >> PAGE_SHIFT; 353 if (should_use_kmap(pfn)) 354 kunmap(pfn_to_page(pfn)); 355 else 356 iounmap(vaddr); 357 } 358 359 void __iomem *__init_refok 360 acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 361 { 362 struct acpi_ioremap *map; 363 void __iomem *virt; 364 acpi_physical_address pg_off; 365 acpi_size pg_sz; 366 367 if (phys > ULONG_MAX) { 368 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 369 return NULL; 370 } 371 372 if (!acpi_gbl_permanent_mmap) 373 return __acpi_map_table((unsigned long)phys, size); 374 375 mutex_lock(&acpi_ioremap_lock); 376 /* Check if there's a suitable mapping already. */ 377 map = acpi_map_lookup(phys, size); 378 if (map) { 379 map->refcount++; 380 goto out; 381 } 382 383 map = kzalloc(sizeof(*map), GFP_KERNEL); 384 if (!map) { 385 mutex_unlock(&acpi_ioremap_lock); 386 return NULL; 387 } 388 389 pg_off = round_down(phys, PAGE_SIZE); 390 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 391 virt = acpi_map(pg_off, pg_sz); 392 if (!virt) { 393 mutex_unlock(&acpi_ioremap_lock); 394 kfree(map); 395 return NULL; 396 } 397 398 INIT_LIST_HEAD(&map->list); 399 map->virt = virt; 400 map->phys = pg_off; 401 map->size = pg_sz; 402 map->refcount = 1; 403 404 list_add_tail_rcu(&map->list, &acpi_ioremaps); 405 406 out: 407 mutex_unlock(&acpi_ioremap_lock); 408 return map->virt + (phys - map->phys); 409 } 410 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 411 412 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 413 { 414 if (!--map->refcount) 415 list_del_rcu(&map->list); 416 } 417 418 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 419 { 420 if (!map->refcount) { 421 synchronize_rcu(); 422 acpi_unmap(map->phys, map->virt); 423 kfree(map); 424 } 425 } 426 427 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 428 { 429 struct acpi_ioremap *map; 430 431 if (!acpi_gbl_permanent_mmap) { 432 __acpi_unmap_table(virt, size); 433 return; 434 } 435 436 mutex_lock(&acpi_ioremap_lock); 437 map = acpi_map_lookup_virt(virt, size); 438 if (!map) { 439 mutex_unlock(&acpi_ioremap_lock); 440 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 441 return; 442 } 443 acpi_os_drop_map_ref(map); 444 mutex_unlock(&acpi_ioremap_lock); 445 446 acpi_os_map_cleanup(map); 447 } 448 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 449 450 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 451 { 452 if (!acpi_gbl_permanent_mmap) 453 __acpi_unmap_table(virt, size); 454 } 455 456 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 457 { 458 u64 addr; 459 void __iomem *virt; 460 461 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 462 return 0; 463 464 /* Handle possible alignment issues */ 465 memcpy(&addr, &gas->address, sizeof(addr)); 466 if (!addr || !gas->bit_width) 467 return -EINVAL; 468 469 virt = acpi_os_map_memory(addr, gas->bit_width / 8); 470 if (!virt) 471 return -EIO; 472 473 return 0; 474 } 475 EXPORT_SYMBOL(acpi_os_map_generic_address); 476 477 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 478 { 479 u64 addr; 480 struct acpi_ioremap *map; 481 482 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 483 return; 484 485 /* Handle possible alignment issues */ 486 memcpy(&addr, &gas->address, sizeof(addr)); 487 if (!addr || !gas->bit_width) 488 return; 489 490 mutex_lock(&acpi_ioremap_lock); 491 map = acpi_map_lookup(addr, gas->bit_width / 8); 492 if (!map) { 493 mutex_unlock(&acpi_ioremap_lock); 494 return; 495 } 496 acpi_os_drop_map_ref(map); 497 mutex_unlock(&acpi_ioremap_lock); 498 499 acpi_os_map_cleanup(map); 500 } 501 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 502 503 #ifdef ACPI_FUTURE_USAGE 504 acpi_status 505 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 506 { 507 if (!phys || !virt) 508 return AE_BAD_PARAMETER; 509 510 *phys = virt_to_phys(virt); 511 512 return AE_OK; 513 } 514 #endif 515 516 #define ACPI_MAX_OVERRIDE_LEN 100 517 518 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 519 520 acpi_status 521 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 522 acpi_string * new_val) 523 { 524 if (!init_val || !new_val) 525 return AE_BAD_PARAMETER; 526 527 *new_val = NULL; 528 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 529 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 530 acpi_os_name); 531 *new_val = acpi_os_name; 532 } 533 534 return AE_OK; 535 } 536 537 acpi_status 538 acpi_os_table_override(struct acpi_table_header * existing_table, 539 struct acpi_table_header ** new_table) 540 { 541 if (!existing_table || !new_table) 542 return AE_BAD_PARAMETER; 543 544 *new_table = NULL; 545 546 #ifdef CONFIG_ACPI_CUSTOM_DSDT 547 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 548 *new_table = (struct acpi_table_header *)AmlCode; 549 #endif 550 if (*new_table != NULL) { 551 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " 552 "this is unsafe: tainting kernel\n", 553 existing_table->signature, 554 existing_table->oem_table_id); 555 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); 556 } 557 return AE_OK; 558 } 559 560 acpi_status 561 acpi_os_physical_table_override(struct acpi_table_header *existing_table, 562 acpi_physical_address * new_address, 563 u32 *new_table_length) 564 { 565 return AE_SUPPORT; 566 } 567 568 569 static irqreturn_t acpi_irq(int irq, void *dev_id) 570 { 571 u32 handled; 572 573 handled = (*acpi_irq_handler) (acpi_irq_context); 574 575 if (handled) { 576 acpi_irq_handled++; 577 return IRQ_HANDLED; 578 } else { 579 acpi_irq_not_handled++; 580 return IRQ_NONE; 581 } 582 } 583 584 acpi_status 585 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 586 void *context) 587 { 588 unsigned int irq; 589 590 acpi_irq_stats_init(); 591 592 /* 593 * ACPI interrupts different from the SCI in our copy of the FADT are 594 * not supported. 595 */ 596 if (gsi != acpi_gbl_FADT.sci_interrupt) 597 return AE_BAD_PARAMETER; 598 599 if (acpi_irq_handler) 600 return AE_ALREADY_ACQUIRED; 601 602 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 603 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 604 gsi); 605 return AE_OK; 606 } 607 608 acpi_irq_handler = handler; 609 acpi_irq_context = context; 610 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 611 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 612 acpi_irq_handler = NULL; 613 return AE_NOT_ACQUIRED; 614 } 615 616 return AE_OK; 617 } 618 619 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 620 { 621 if (irq != acpi_gbl_FADT.sci_interrupt) 622 return AE_BAD_PARAMETER; 623 624 free_irq(irq, acpi_irq); 625 acpi_irq_handler = NULL; 626 627 return AE_OK; 628 } 629 630 /* 631 * Running in interpreter thread context, safe to sleep 632 */ 633 634 void acpi_os_sleep(u64 ms) 635 { 636 schedule_timeout_interruptible(msecs_to_jiffies(ms)); 637 } 638 639 void acpi_os_stall(u32 us) 640 { 641 while (us) { 642 u32 delay = 1000; 643 644 if (delay > us) 645 delay = us; 646 udelay(delay); 647 touch_nmi_watchdog(); 648 us -= delay; 649 } 650 } 651 652 /* 653 * Support ACPI 3.0 AML Timer operand 654 * Returns 64-bit free-running, monotonically increasing timer 655 * with 100ns granularity 656 */ 657 u64 acpi_os_get_timer(void) 658 { 659 static u64 t; 660 661 #ifdef CONFIG_HPET 662 /* TBD: use HPET if available */ 663 #endif 664 665 #ifdef CONFIG_X86_PM_TIMER 666 /* TBD: default to PM timer if HPET was not available */ 667 #endif 668 if (!t) 669 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); 670 671 return ++t; 672 } 673 674 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 675 { 676 u32 dummy; 677 678 if (!value) 679 value = &dummy; 680 681 *value = 0; 682 if (width <= 8) { 683 *(u8 *) value = inb(port); 684 } else if (width <= 16) { 685 *(u16 *) value = inw(port); 686 } else if (width <= 32) { 687 *(u32 *) value = inl(port); 688 } else { 689 BUG(); 690 } 691 692 return AE_OK; 693 } 694 695 EXPORT_SYMBOL(acpi_os_read_port); 696 697 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 698 { 699 if (width <= 8) { 700 outb(value, port); 701 } else if (width <= 16) { 702 outw(value, port); 703 } else if (width <= 32) { 704 outl(value, port); 705 } else { 706 BUG(); 707 } 708 709 return AE_OK; 710 } 711 712 EXPORT_SYMBOL(acpi_os_write_port); 713 714 #ifdef readq 715 static inline u64 read64(const volatile void __iomem *addr) 716 { 717 return readq(addr); 718 } 719 #else 720 static inline u64 read64(const volatile void __iomem *addr) 721 { 722 u64 l, h; 723 l = readl(addr); 724 h = readl(addr+4); 725 return l | (h << 32); 726 } 727 #endif 728 729 acpi_status 730 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 731 { 732 void __iomem *virt_addr; 733 unsigned int size = width / 8; 734 bool unmap = false; 735 u64 dummy; 736 737 rcu_read_lock(); 738 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 739 if (!virt_addr) { 740 rcu_read_unlock(); 741 virt_addr = acpi_os_ioremap(phys_addr, size); 742 if (!virt_addr) 743 return AE_BAD_ADDRESS; 744 unmap = true; 745 } 746 747 if (!value) 748 value = &dummy; 749 750 switch (width) { 751 case 8: 752 *(u8 *) value = readb(virt_addr); 753 break; 754 case 16: 755 *(u16 *) value = readw(virt_addr); 756 break; 757 case 32: 758 *(u32 *) value = readl(virt_addr); 759 break; 760 case 64: 761 *(u64 *) value = read64(virt_addr); 762 break; 763 default: 764 BUG(); 765 } 766 767 if (unmap) 768 iounmap(virt_addr); 769 else 770 rcu_read_unlock(); 771 772 return AE_OK; 773 } 774 775 #ifdef writeq 776 static inline void write64(u64 val, volatile void __iomem *addr) 777 { 778 writeq(val, addr); 779 } 780 #else 781 static inline void write64(u64 val, volatile void __iomem *addr) 782 { 783 writel(val, addr); 784 writel(val>>32, addr+4); 785 } 786 #endif 787 788 acpi_status 789 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 790 { 791 void __iomem *virt_addr; 792 unsigned int size = width / 8; 793 bool unmap = false; 794 795 rcu_read_lock(); 796 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 797 if (!virt_addr) { 798 rcu_read_unlock(); 799 virt_addr = acpi_os_ioremap(phys_addr, size); 800 if (!virt_addr) 801 return AE_BAD_ADDRESS; 802 unmap = true; 803 } 804 805 switch (width) { 806 case 8: 807 writeb(value, virt_addr); 808 break; 809 case 16: 810 writew(value, virt_addr); 811 break; 812 case 32: 813 writel(value, virt_addr); 814 break; 815 case 64: 816 write64(value, virt_addr); 817 break; 818 default: 819 BUG(); 820 } 821 822 if (unmap) 823 iounmap(virt_addr); 824 else 825 rcu_read_unlock(); 826 827 return AE_OK; 828 } 829 830 acpi_status 831 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 832 u64 *value, u32 width) 833 { 834 int result, size; 835 u32 value32; 836 837 if (!value) 838 return AE_BAD_PARAMETER; 839 840 switch (width) { 841 case 8: 842 size = 1; 843 break; 844 case 16: 845 size = 2; 846 break; 847 case 32: 848 size = 4; 849 break; 850 default: 851 return AE_ERROR; 852 } 853 854 result = raw_pci_read(pci_id->segment, pci_id->bus, 855 PCI_DEVFN(pci_id->device, pci_id->function), 856 reg, size, &value32); 857 *value = value32; 858 859 return (result ? AE_ERROR : AE_OK); 860 } 861 862 acpi_status 863 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 864 u64 value, u32 width) 865 { 866 int result, size; 867 868 switch (width) { 869 case 8: 870 size = 1; 871 break; 872 case 16: 873 size = 2; 874 break; 875 case 32: 876 size = 4; 877 break; 878 default: 879 return AE_ERROR; 880 } 881 882 result = raw_pci_write(pci_id->segment, pci_id->bus, 883 PCI_DEVFN(pci_id->device, pci_id->function), 884 reg, size, value); 885 886 return (result ? AE_ERROR : AE_OK); 887 } 888 889 static void acpi_os_execute_deferred(struct work_struct *work) 890 { 891 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 892 893 if (dpc->wait) 894 acpi_os_wait_events_complete(); 895 896 dpc->function(dpc->context); 897 kfree(dpc); 898 } 899 900 /******************************************************************************* 901 * 902 * FUNCTION: acpi_os_execute 903 * 904 * PARAMETERS: Type - Type of the callback 905 * Function - Function to be executed 906 * Context - Function parameters 907 * 908 * RETURN: Status 909 * 910 * DESCRIPTION: Depending on type, either queues function for deferred execution or 911 * immediately executes function on a separate thread. 912 * 913 ******************************************************************************/ 914 915 static acpi_status __acpi_os_execute(acpi_execute_type type, 916 acpi_osd_exec_callback function, void *context, int hp) 917 { 918 acpi_status status = AE_OK; 919 struct acpi_os_dpc *dpc; 920 struct workqueue_struct *queue; 921 int ret; 922 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 923 "Scheduling function [%p(%p)] for deferred execution.\n", 924 function, context)); 925 926 /* 927 * Allocate/initialize DPC structure. Note that this memory will be 928 * freed by the callee. The kernel handles the work_struct list in a 929 * way that allows us to also free its memory inside the callee. 930 * Because we may want to schedule several tasks with different 931 * parameters we can't use the approach some kernel code uses of 932 * having a static work_struct. 933 */ 934 935 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 936 if (!dpc) 937 return AE_NO_MEMORY; 938 939 dpc->function = function; 940 dpc->context = context; 941 942 /* 943 * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq 944 * because the hotplug code may call driver .remove() functions, 945 * which invoke flush_scheduled_work/acpi_os_wait_events_complete 946 * to flush these workqueues. 947 */ 948 queue = hp ? kacpi_hotplug_wq : 949 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); 950 dpc->wait = hp ? 1 : 0; 951 952 if (queue == kacpi_hotplug_wq) 953 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 954 else if (queue == kacpi_notify_wq) 955 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 956 else 957 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 958 959 /* 960 * On some machines, a software-initiated SMI causes corruption unless 961 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 962 * typically it's done in GPE-related methods that are run via 963 * workqueues, so we can avoid the known corruption cases by always 964 * queueing on CPU 0. 965 */ 966 ret = queue_work_on(0, queue, &dpc->work); 967 968 if (!ret) { 969 printk(KERN_ERR PREFIX 970 "Call to queue_work() failed.\n"); 971 status = AE_ERROR; 972 kfree(dpc); 973 } 974 return status; 975 } 976 977 acpi_status acpi_os_execute(acpi_execute_type type, 978 acpi_osd_exec_callback function, void *context) 979 { 980 return __acpi_os_execute(type, function, context, 0); 981 } 982 EXPORT_SYMBOL(acpi_os_execute); 983 984 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, 985 void *context) 986 { 987 return __acpi_os_execute(0, function, context, 1); 988 } 989 990 void acpi_os_wait_events_complete(void) 991 { 992 flush_workqueue(kacpid_wq); 993 flush_workqueue(kacpi_notify_wq); 994 } 995 996 EXPORT_SYMBOL(acpi_os_wait_events_complete); 997 998 acpi_status 999 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1000 { 1001 struct semaphore *sem = NULL; 1002 1003 sem = acpi_os_allocate(sizeof(struct semaphore)); 1004 if (!sem) 1005 return AE_NO_MEMORY; 1006 memset(sem, 0, sizeof(struct semaphore)); 1007 1008 sema_init(sem, initial_units); 1009 1010 *handle = (acpi_handle *) sem; 1011 1012 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1013 *handle, initial_units)); 1014 1015 return AE_OK; 1016 } 1017 1018 /* 1019 * TODO: A better way to delete semaphores? Linux doesn't have a 1020 * 'delete_semaphore()' function -- may result in an invalid 1021 * pointer dereference for non-synchronized consumers. Should 1022 * we at least check for blocked threads and signal/cancel them? 1023 */ 1024 1025 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1026 { 1027 struct semaphore *sem = (struct semaphore *)handle; 1028 1029 if (!sem) 1030 return AE_BAD_PARAMETER; 1031 1032 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1033 1034 BUG_ON(!list_empty(&sem->wait_list)); 1035 kfree(sem); 1036 sem = NULL; 1037 1038 return AE_OK; 1039 } 1040 1041 /* 1042 * TODO: Support for units > 1? 1043 */ 1044 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1045 { 1046 acpi_status status = AE_OK; 1047 struct semaphore *sem = (struct semaphore *)handle; 1048 long jiffies; 1049 int ret = 0; 1050 1051 if (!sem || (units < 1)) 1052 return AE_BAD_PARAMETER; 1053 1054 if (units > 1) 1055 return AE_SUPPORT; 1056 1057 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1058 handle, units, timeout)); 1059 1060 if (timeout == ACPI_WAIT_FOREVER) 1061 jiffies = MAX_SCHEDULE_TIMEOUT; 1062 else 1063 jiffies = msecs_to_jiffies(timeout); 1064 1065 ret = down_timeout(sem, jiffies); 1066 if (ret) 1067 status = AE_TIME; 1068 1069 if (ACPI_FAILURE(status)) { 1070 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1071 "Failed to acquire semaphore[%p|%d|%d], %s", 1072 handle, units, timeout, 1073 acpi_format_exception(status))); 1074 } else { 1075 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1076 "Acquired semaphore[%p|%d|%d]", handle, 1077 units, timeout)); 1078 } 1079 1080 return status; 1081 } 1082 1083 /* 1084 * TODO: Support for units > 1? 1085 */ 1086 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1087 { 1088 struct semaphore *sem = (struct semaphore *)handle; 1089 1090 if (!sem || (units < 1)) 1091 return AE_BAD_PARAMETER; 1092 1093 if (units > 1) 1094 return AE_SUPPORT; 1095 1096 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1097 units)); 1098 1099 up(sem); 1100 1101 return AE_OK; 1102 } 1103 1104 #ifdef ACPI_FUTURE_USAGE 1105 u32 acpi_os_get_line(char *buffer) 1106 { 1107 1108 #ifdef ENABLE_DEBUGGER 1109 if (acpi_in_debugger) { 1110 u32 chars; 1111 1112 kdb_read(buffer, sizeof(line_buf)); 1113 1114 /* remove the CR kdb includes */ 1115 chars = strlen(buffer) - 1; 1116 buffer[chars] = '\0'; 1117 } 1118 #endif 1119 1120 return 0; 1121 } 1122 #endif /* ACPI_FUTURE_USAGE */ 1123 1124 acpi_status acpi_os_signal(u32 function, void *info) 1125 { 1126 switch (function) { 1127 case ACPI_SIGNAL_FATAL: 1128 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1129 break; 1130 case ACPI_SIGNAL_BREAKPOINT: 1131 /* 1132 * AML Breakpoint 1133 * ACPI spec. says to treat it as a NOP unless 1134 * you are debugging. So if/when we integrate 1135 * AML debugger into the kernel debugger its 1136 * hook will go here. But until then it is 1137 * not useful to print anything on breakpoints. 1138 */ 1139 break; 1140 default: 1141 break; 1142 } 1143 1144 return AE_OK; 1145 } 1146 1147 static int __init acpi_os_name_setup(char *str) 1148 { 1149 char *p = acpi_os_name; 1150 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1151 1152 if (!str || !*str) 1153 return 0; 1154 1155 for (; count-- && str && *str; str++) { 1156 if (isalnum(*str) || *str == ' ' || *str == ':') 1157 *p++ = *str; 1158 else if (*str == '\'' || *str == '"') 1159 continue; 1160 else 1161 break; 1162 } 1163 *p = 0; 1164 1165 return 1; 1166 1167 } 1168 1169 __setup("acpi_os_name=", acpi_os_name_setup); 1170 1171 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 1172 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ 1173 1174 struct osi_setup_entry { 1175 char string[OSI_STRING_LENGTH_MAX]; 1176 bool enable; 1177 }; 1178 1179 static struct osi_setup_entry __initdata 1180 osi_setup_entries[OSI_STRING_ENTRIES_MAX] = { 1181 {"Module Device", true}, 1182 {"Processor Device", true}, 1183 {"3.0 _SCP Extensions", true}, 1184 {"Processor Aggregator Device", true}, 1185 }; 1186 1187 void __init acpi_osi_setup(char *str) 1188 { 1189 struct osi_setup_entry *osi; 1190 bool enable = true; 1191 int i; 1192 1193 if (!acpi_gbl_create_osi_method) 1194 return; 1195 1196 if (str == NULL || *str == '\0') { 1197 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1198 acpi_gbl_create_osi_method = FALSE; 1199 return; 1200 } 1201 1202 if (*str == '!') { 1203 str++; 1204 enable = false; 1205 } 1206 1207 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1208 osi = &osi_setup_entries[i]; 1209 if (!strcmp(osi->string, str)) { 1210 osi->enable = enable; 1211 break; 1212 } else if (osi->string[0] == '\0') { 1213 osi->enable = enable; 1214 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); 1215 break; 1216 } 1217 } 1218 } 1219 1220 static void __init set_osi_linux(unsigned int enable) 1221 { 1222 if (osi_linux.enable != enable) 1223 osi_linux.enable = enable; 1224 1225 if (osi_linux.enable) 1226 acpi_osi_setup("Linux"); 1227 else 1228 acpi_osi_setup("!Linux"); 1229 1230 return; 1231 } 1232 1233 static void __init acpi_cmdline_osi_linux(unsigned int enable) 1234 { 1235 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ 1236 osi_linux.dmi = 0; 1237 set_osi_linux(enable); 1238 1239 return; 1240 } 1241 1242 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) 1243 { 1244 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 1245 1246 if (enable == -1) 1247 return; 1248 1249 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 1250 set_osi_linux(enable); 1251 1252 return; 1253 } 1254 1255 /* 1256 * Modify the list of "OS Interfaces" reported to BIOS via _OSI 1257 * 1258 * empty string disables _OSI 1259 * string starting with '!' disables that string 1260 * otherwise string is added to list, augmenting built-in strings 1261 */ 1262 static void __init acpi_osi_setup_late(void) 1263 { 1264 struct osi_setup_entry *osi; 1265 char *str; 1266 int i; 1267 acpi_status status; 1268 1269 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1270 osi = &osi_setup_entries[i]; 1271 str = osi->string; 1272 1273 if (*str == '\0') 1274 break; 1275 if (osi->enable) { 1276 status = acpi_install_interface(str); 1277 1278 if (ACPI_SUCCESS(status)) 1279 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1280 } else { 1281 status = acpi_remove_interface(str); 1282 1283 if (ACPI_SUCCESS(status)) 1284 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1285 } 1286 } 1287 } 1288 1289 static int __init osi_setup(char *str) 1290 { 1291 if (str && !strcmp("Linux", str)) 1292 acpi_cmdline_osi_linux(1); 1293 else if (str && !strcmp("!Linux", str)) 1294 acpi_cmdline_osi_linux(0); 1295 else 1296 acpi_osi_setup(str); 1297 1298 return 1; 1299 } 1300 1301 __setup("acpi_osi=", osi_setup); 1302 1303 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 1304 static int __init acpi_serialize_setup(char *str) 1305 { 1306 printk(KERN_INFO PREFIX "serialize enabled\n"); 1307 1308 acpi_gbl_all_methods_serialized = TRUE; 1309 1310 return 1; 1311 } 1312 1313 __setup("acpi_serialize", acpi_serialize_setup); 1314 1315 /* Check of resource interference between native drivers and ACPI 1316 * OperationRegions (SystemIO and System Memory only). 1317 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1318 * in arbitrary AML code and can interfere with legacy drivers. 1319 * acpi_enforce_resources= can be set to: 1320 * 1321 * - strict (default) (2) 1322 * -> further driver trying to access the resources will not load 1323 * - lax (1) 1324 * -> further driver trying to access the resources will load, but you 1325 * get a system message that something might go wrong... 1326 * 1327 * - no (0) 1328 * -> ACPI Operation Region resources will not be registered 1329 * 1330 */ 1331 #define ENFORCE_RESOURCES_STRICT 2 1332 #define ENFORCE_RESOURCES_LAX 1 1333 #define ENFORCE_RESOURCES_NO 0 1334 1335 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1336 1337 static int __init acpi_enforce_resources_setup(char *str) 1338 { 1339 if (str == NULL || *str == '\0') 1340 return 0; 1341 1342 if (!strcmp("strict", str)) 1343 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1344 else if (!strcmp("lax", str)) 1345 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1346 else if (!strcmp("no", str)) 1347 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1348 1349 return 1; 1350 } 1351 1352 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1353 1354 /* Check for resource conflicts between ACPI OperationRegions and native 1355 * drivers */ 1356 int acpi_check_resource_conflict(const struct resource *res) 1357 { 1358 acpi_adr_space_type space_id; 1359 acpi_size length; 1360 u8 warn = 0; 1361 int clash = 0; 1362 1363 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1364 return 0; 1365 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1366 return 0; 1367 1368 if (res->flags & IORESOURCE_IO) 1369 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1370 else 1371 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1372 1373 length = res->end - res->start + 1; 1374 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1375 warn = 1; 1376 clash = acpi_check_address_range(space_id, res->start, length, warn); 1377 1378 if (clash) { 1379 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1380 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1381 printk(KERN_NOTICE "ACPI: This conflict may" 1382 " cause random problems and system" 1383 " instability\n"); 1384 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1385 " for this device, you should use it instead of" 1386 " the native driver\n"); 1387 } 1388 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1389 return -EBUSY; 1390 } 1391 return 0; 1392 } 1393 EXPORT_SYMBOL(acpi_check_resource_conflict); 1394 1395 int acpi_check_region(resource_size_t start, resource_size_t n, 1396 const char *name) 1397 { 1398 struct resource res = { 1399 .start = start, 1400 .end = start + n - 1, 1401 .name = name, 1402 .flags = IORESOURCE_IO, 1403 }; 1404 1405 return acpi_check_resource_conflict(&res); 1406 } 1407 EXPORT_SYMBOL(acpi_check_region); 1408 1409 /* 1410 * Let drivers know whether the resource checks are effective 1411 */ 1412 int acpi_resources_are_enforced(void) 1413 { 1414 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1415 } 1416 EXPORT_SYMBOL(acpi_resources_are_enforced); 1417 1418 /* 1419 * Deallocate the memory for a spinlock. 1420 */ 1421 void acpi_os_delete_lock(acpi_spinlock handle) 1422 { 1423 ACPI_FREE(handle); 1424 } 1425 1426 /* 1427 * Acquire a spinlock. 1428 * 1429 * handle is a pointer to the spinlock_t. 1430 */ 1431 1432 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1433 { 1434 acpi_cpu_flags flags; 1435 spin_lock_irqsave(lockp, flags); 1436 return flags; 1437 } 1438 1439 /* 1440 * Release a spinlock. See above. 1441 */ 1442 1443 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1444 { 1445 spin_unlock_irqrestore(lockp, flags); 1446 } 1447 1448 #ifndef ACPI_USE_LOCAL_CACHE 1449 1450 /******************************************************************************* 1451 * 1452 * FUNCTION: acpi_os_create_cache 1453 * 1454 * PARAMETERS: name - Ascii name for the cache 1455 * size - Size of each cached object 1456 * depth - Maximum depth of the cache (in objects) <ignored> 1457 * cache - Where the new cache object is returned 1458 * 1459 * RETURN: status 1460 * 1461 * DESCRIPTION: Create a cache object 1462 * 1463 ******************************************************************************/ 1464 1465 acpi_status 1466 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1467 { 1468 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1469 if (*cache == NULL) 1470 return AE_ERROR; 1471 else 1472 return AE_OK; 1473 } 1474 1475 /******************************************************************************* 1476 * 1477 * FUNCTION: acpi_os_purge_cache 1478 * 1479 * PARAMETERS: Cache - Handle to cache object 1480 * 1481 * RETURN: Status 1482 * 1483 * DESCRIPTION: Free all objects within the requested cache. 1484 * 1485 ******************************************************************************/ 1486 1487 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1488 { 1489 kmem_cache_shrink(cache); 1490 return (AE_OK); 1491 } 1492 1493 /******************************************************************************* 1494 * 1495 * FUNCTION: acpi_os_delete_cache 1496 * 1497 * PARAMETERS: Cache - Handle to cache object 1498 * 1499 * RETURN: Status 1500 * 1501 * DESCRIPTION: Free all objects within the requested cache and delete the 1502 * cache object. 1503 * 1504 ******************************************************************************/ 1505 1506 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1507 { 1508 kmem_cache_destroy(cache); 1509 return (AE_OK); 1510 } 1511 1512 /******************************************************************************* 1513 * 1514 * FUNCTION: acpi_os_release_object 1515 * 1516 * PARAMETERS: Cache - Handle to cache object 1517 * Object - The object to be released 1518 * 1519 * RETURN: None 1520 * 1521 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1522 * the object is deleted. 1523 * 1524 ******************************************************************************/ 1525 1526 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1527 { 1528 kmem_cache_free(cache, object); 1529 return (AE_OK); 1530 } 1531 #endif 1532 1533 acpi_status __init acpi_os_initialize(void) 1534 { 1535 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1536 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1537 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1538 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1539 1540 return AE_OK; 1541 } 1542 1543 acpi_status __init acpi_os_initialize1(void) 1544 { 1545 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1546 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1547 kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); 1548 BUG_ON(!kacpid_wq); 1549 BUG_ON(!kacpi_notify_wq); 1550 BUG_ON(!kacpi_hotplug_wq); 1551 acpi_install_interface_handler(acpi_osi_handler); 1552 acpi_osi_setup_late(); 1553 return AE_OK; 1554 } 1555 1556 acpi_status acpi_os_terminate(void) 1557 { 1558 if (acpi_irq_handler) { 1559 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1560 acpi_irq_handler); 1561 } 1562 1563 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1564 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1565 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1566 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1567 1568 destroy_workqueue(kacpid_wq); 1569 destroy_workqueue(kacpi_notify_wq); 1570 destroy_workqueue(kacpi_hotplug_wq); 1571 1572 return AE_OK; 1573 } 1574 1575 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1576 u32 pm1b_control) 1577 { 1578 int rc = 0; 1579 if (__acpi_os_prepare_sleep) 1580 rc = __acpi_os_prepare_sleep(sleep_state, 1581 pm1a_control, pm1b_control); 1582 if (rc < 0) 1583 return AE_ERROR; 1584 else if (rc > 0) 1585 return AE_CTRL_SKIP; 1586 1587 return AE_OK; 1588 } 1589 1590 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1591 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1592 { 1593 __acpi_os_prepare_sleep = func; 1594 } 1595