xref: /openbmc/linux/drivers/acpi/osl.c (revision f42b3800)
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29 
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/dmi.h>
39 #include <linux/workqueue.h>
40 #include <linux/nmi.h>
41 #include <linux/acpi.h>
42 #include <linux/efi.h>
43 #include <linux/ioport.h>
44 #include <linux/list.h>
45 #include <linux/jiffies.h>
46 #include <linux/semaphore.h>
47 
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 
51 #include <acpi/acpi.h>
52 #include <acpi/acpi_bus.h>
53 #include <acpi/processor.h>
54 
55 #define _COMPONENT		ACPI_OS_SERVICES
56 ACPI_MODULE_NAME("osl");
57 #define PREFIX		"ACPI: "
58 struct acpi_os_dpc {
59 	acpi_osd_exec_callback function;
60 	void *context;
61 	struct work_struct work;
62 };
63 
64 #ifdef CONFIG_ACPI_CUSTOM_DSDT
65 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
66 #endif
67 
68 #ifdef ENABLE_DEBUGGER
69 #include <linux/kdb.h>
70 
71 /* stuff for debugger support */
72 int acpi_in_debugger;
73 EXPORT_SYMBOL(acpi_in_debugger);
74 
75 extern char line_buf[80];
76 #endif				/*ENABLE_DEBUGGER */
77 
78 static unsigned int acpi_irq_irq;
79 static acpi_osd_handler acpi_irq_handler;
80 static void *acpi_irq_context;
81 static struct workqueue_struct *kacpid_wq;
82 static struct workqueue_struct *kacpi_notify_wq;
83 
84 struct acpi_res_list {
85 	resource_size_t start;
86 	resource_size_t end;
87 	acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
88 	char name[5];   /* only can have a length of 4 chars, make use of this
89 			   one instead of res->name, no need to kalloc then */
90 	struct list_head resource_list;
91 };
92 
93 static LIST_HEAD(resource_list_head);
94 static DEFINE_SPINLOCK(acpi_res_lock);
95 
96 #define	OSI_STRING_LENGTH_MAX 64	/* arbitrary */
97 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
98 
99 /*
100  * "Ode to _OSI(Linux)"
101  *
102  * osi_linux -- Control response to BIOS _OSI(Linux) query.
103  *
104  * As Linux evolves, the features that it supports change.
105  * So an OSI string such as "Linux" is not specific enough
106  * to be useful across multiple versions of Linux.  It
107  * doesn't identify any particular feature, interface,
108  * or even any particular version of Linux...
109  *
110  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
111  * to a BIOS _OSI(Linux) query.  When
112  * a reference mobile BIOS started using it, its use
113  * started to spread to many vendor platforms.
114  * As it is not supportable, we need to halt that spread.
115  *
116  * Today, most BIOS references to _OSI(Linux) are noise --
117  * they have no functional effect and are just dead code
118  * carried over from the reference BIOS.
119  *
120  * The next most common case is that _OSI(Linux) harms Linux,
121  * usually by causing the BIOS to follow paths that are
122  * not tested during Windows validation.
123  *
124  * Finally, there is a short list of platforms
125  * where OSI(Linux) benefits Linux.
126  *
127  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
128  * DMI is used to disable the dmesg warning about OSI(Linux)
129  * on platforms where it is known to have no effect.
130  * But a dmesg warning remains for systems where
131  * we do not know if OSI(Linux) is good or bad for the system.
132  * DMI is also used to enable OSI(Linux) for the machines
133  * that are known to need it.
134  *
135  * BIOS writers should NOT query _OSI(Linux) on future systems.
136  * It will be ignored by default, and to get Linux to
137  * not ignore it will require a kernel source update to
138  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
139  */
140 #define OSI_LINUX_ENABLE 0
141 
142 static struct osi_linux {
143 	unsigned int	enable:1;
144 	unsigned int	dmi:1;
145 	unsigned int	cmdline:1;
146 	unsigned int	known:1;
147 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
148 
149 static void __init acpi_request_region (struct acpi_generic_address *addr,
150 	unsigned int length, char *desc)
151 {
152 	struct resource *res;
153 
154 	if (!addr->address || !length)
155 		return;
156 
157 	if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
158 		res = request_region(addr->address, length, desc);
159 	else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
160 		res = request_mem_region(addr->address, length, desc);
161 }
162 
163 static int __init acpi_reserve_resources(void)
164 {
165 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
166 		"ACPI PM1a_EVT_BLK");
167 
168 	acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
169 		"ACPI PM1b_EVT_BLK");
170 
171 	acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
172 		"ACPI PM1a_CNT_BLK");
173 
174 	acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
175 		"ACPI PM1b_CNT_BLK");
176 
177 	if (acpi_gbl_FADT.pm_timer_length == 4)
178 		acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
179 
180 	acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
181 		"ACPI PM2_CNT_BLK");
182 
183 	/* Length of GPE blocks must be a non-negative multiple of 2 */
184 
185 	if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
186 		acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
187 			       acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
188 
189 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
190 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
191 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
192 
193 	return 0;
194 }
195 device_initcall(acpi_reserve_resources);
196 
197 acpi_status __init acpi_os_initialize(void)
198 {
199 	return AE_OK;
200 }
201 
202 acpi_status acpi_os_initialize1(void)
203 {
204 	kacpid_wq = create_singlethread_workqueue("kacpid");
205 	kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
206 	BUG_ON(!kacpid_wq);
207 	BUG_ON(!kacpi_notify_wq);
208 	return AE_OK;
209 }
210 
211 acpi_status acpi_os_terminate(void)
212 {
213 	if (acpi_irq_handler) {
214 		acpi_os_remove_interrupt_handler(acpi_irq_irq,
215 						 acpi_irq_handler);
216 	}
217 
218 	destroy_workqueue(kacpid_wq);
219 	destroy_workqueue(kacpi_notify_wq);
220 
221 	return AE_OK;
222 }
223 
224 void acpi_os_printf(const char *fmt, ...)
225 {
226 	va_list args;
227 	va_start(args, fmt);
228 	acpi_os_vprintf(fmt, args);
229 	va_end(args);
230 }
231 
232 void acpi_os_vprintf(const char *fmt, va_list args)
233 {
234 	static char buffer[512];
235 
236 	vsprintf(buffer, fmt, args);
237 
238 #ifdef ENABLE_DEBUGGER
239 	if (acpi_in_debugger) {
240 		kdb_printf("%s", buffer);
241 	} else {
242 		printk("%s", buffer);
243 	}
244 #else
245 	printk("%s", buffer);
246 #endif
247 }
248 
249 acpi_physical_address __init acpi_os_get_root_pointer(void)
250 {
251 	if (efi_enabled) {
252 		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
253 			return efi.acpi20;
254 		else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
255 			return efi.acpi;
256 		else {
257 			printk(KERN_ERR PREFIX
258 			       "System description tables not found\n");
259 			return 0;
260 		}
261 	} else {
262 		acpi_physical_address pa = 0;
263 
264 		acpi_find_root_pointer(&pa);
265 		return pa;
266 	}
267 }
268 
269 void __iomem *__init_refok
270 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
271 {
272 	if (phys > ULONG_MAX) {
273 		printk(KERN_ERR PREFIX "Cannot map memory that high\n");
274 		return NULL;
275 	}
276 	if (acpi_gbl_permanent_mmap)
277 		/*
278 		* ioremap checks to ensure this is in reserved space
279 		*/
280 		return ioremap((unsigned long)phys, size);
281 	else
282 		return __acpi_map_table((unsigned long)phys, size);
283 }
284 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
285 
286 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
287 {
288 	if (acpi_gbl_permanent_mmap) {
289 		iounmap(virt);
290 	}
291 }
292 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
293 
294 #ifdef ACPI_FUTURE_USAGE
295 acpi_status
296 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
297 {
298 	if (!phys || !virt)
299 		return AE_BAD_PARAMETER;
300 
301 	*phys = virt_to_phys(virt);
302 
303 	return AE_OK;
304 }
305 #endif
306 
307 #define ACPI_MAX_OVERRIDE_LEN 100
308 
309 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
310 
311 acpi_status
312 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
313 			    acpi_string * new_val)
314 {
315 	if (!init_val || !new_val)
316 		return AE_BAD_PARAMETER;
317 
318 	*new_val = NULL;
319 	if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
320 		printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
321 		       acpi_os_name);
322 		*new_val = acpi_os_name;
323 	}
324 
325 	return AE_OK;
326 }
327 
328 acpi_status
329 acpi_os_table_override(struct acpi_table_header * existing_table,
330 		       struct acpi_table_header ** new_table)
331 {
332 	if (!existing_table || !new_table)
333 		return AE_BAD_PARAMETER;
334 
335 	*new_table = NULL;
336 
337 #ifdef CONFIG_ACPI_CUSTOM_DSDT
338 	if (strncmp(existing_table->signature, "DSDT", 4) == 0)
339 		*new_table = (struct acpi_table_header *)AmlCode;
340 #endif
341 	if (*new_table != NULL) {
342 		printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
343 			   "this is unsafe: tainting kernel\n",
344 		       existing_table->signature,
345 		       existing_table->oem_table_id);
346 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
347 	}
348 	return AE_OK;
349 }
350 
351 static irqreturn_t acpi_irq(int irq, void *dev_id)
352 {
353 	u32 handled;
354 
355 	handled = (*acpi_irq_handler) (acpi_irq_context);
356 
357 	if (handled) {
358 		acpi_irq_handled++;
359 		return IRQ_HANDLED;
360 	} else
361 		return IRQ_NONE;
362 }
363 
364 acpi_status
365 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
366 				  void *context)
367 {
368 	unsigned int irq;
369 
370 	acpi_irq_stats_init();
371 
372 	/*
373 	 * Ignore the GSI from the core, and use the value in our copy of the
374 	 * FADT. It may not be the same if an interrupt source override exists
375 	 * for the SCI.
376 	 */
377 	gsi = acpi_gbl_FADT.sci_interrupt;
378 	if (acpi_gsi_to_irq(gsi, &irq) < 0) {
379 		printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
380 		       gsi);
381 		return AE_OK;
382 	}
383 
384 	acpi_irq_handler = handler;
385 	acpi_irq_context = context;
386 	if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
387 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
388 		return AE_NOT_ACQUIRED;
389 	}
390 	acpi_irq_irq = irq;
391 
392 	return AE_OK;
393 }
394 
395 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
396 {
397 	if (irq) {
398 		free_irq(irq, acpi_irq);
399 		acpi_irq_handler = NULL;
400 		acpi_irq_irq = 0;
401 	}
402 
403 	return AE_OK;
404 }
405 
406 /*
407  * Running in interpreter thread context, safe to sleep
408  */
409 
410 void acpi_os_sleep(acpi_integer ms)
411 {
412 	schedule_timeout_interruptible(msecs_to_jiffies(ms));
413 }
414 
415 void acpi_os_stall(u32 us)
416 {
417 	while (us) {
418 		u32 delay = 1000;
419 
420 		if (delay > us)
421 			delay = us;
422 		udelay(delay);
423 		touch_nmi_watchdog();
424 		us -= delay;
425 	}
426 }
427 
428 /*
429  * Support ACPI 3.0 AML Timer operand
430  * Returns 64-bit free-running, monotonically increasing timer
431  * with 100ns granularity
432  */
433 u64 acpi_os_get_timer(void)
434 {
435 	static u64 t;
436 
437 #ifdef	CONFIG_HPET
438 	/* TBD: use HPET if available */
439 #endif
440 
441 #ifdef	CONFIG_X86_PM_TIMER
442 	/* TBD: default to PM timer if HPET was not available */
443 #endif
444 	if (!t)
445 		printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
446 
447 	return ++t;
448 }
449 
450 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
451 {
452 	u32 dummy;
453 
454 	if (!value)
455 		value = &dummy;
456 
457 	*value = 0;
458 	if (width <= 8) {
459 		*(u8 *) value = inb(port);
460 	} else if (width <= 16) {
461 		*(u16 *) value = inw(port);
462 	} else if (width <= 32) {
463 		*(u32 *) value = inl(port);
464 	} else {
465 		BUG();
466 	}
467 
468 	return AE_OK;
469 }
470 
471 EXPORT_SYMBOL(acpi_os_read_port);
472 
473 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
474 {
475 	if (width <= 8) {
476 		outb(value, port);
477 	} else if (width <= 16) {
478 		outw(value, port);
479 	} else if (width <= 32) {
480 		outl(value, port);
481 	} else {
482 		BUG();
483 	}
484 
485 	return AE_OK;
486 }
487 
488 EXPORT_SYMBOL(acpi_os_write_port);
489 
490 acpi_status
491 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
492 {
493 	u32 dummy;
494 	void __iomem *virt_addr;
495 
496 	virt_addr = ioremap(phys_addr, width);
497 	if (!value)
498 		value = &dummy;
499 
500 	switch (width) {
501 	case 8:
502 		*(u8 *) value = readb(virt_addr);
503 		break;
504 	case 16:
505 		*(u16 *) value = readw(virt_addr);
506 		break;
507 	case 32:
508 		*(u32 *) value = readl(virt_addr);
509 		break;
510 	default:
511 		BUG();
512 	}
513 
514 	iounmap(virt_addr);
515 
516 	return AE_OK;
517 }
518 
519 acpi_status
520 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
521 {
522 	void __iomem *virt_addr;
523 
524 	virt_addr = ioremap(phys_addr, width);
525 
526 	switch (width) {
527 	case 8:
528 		writeb(value, virt_addr);
529 		break;
530 	case 16:
531 		writew(value, virt_addr);
532 		break;
533 	case 32:
534 		writel(value, virt_addr);
535 		break;
536 	default:
537 		BUG();
538 	}
539 
540 	iounmap(virt_addr);
541 
542 	return AE_OK;
543 }
544 
545 acpi_status
546 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
547 			       u32 *value, u32 width)
548 {
549 	int result, size;
550 
551 	if (!value)
552 		return AE_BAD_PARAMETER;
553 
554 	switch (width) {
555 	case 8:
556 		size = 1;
557 		break;
558 	case 16:
559 		size = 2;
560 		break;
561 	case 32:
562 		size = 4;
563 		break;
564 	default:
565 		return AE_ERROR;
566 	}
567 
568 	result = raw_pci_read(pci_id->segment, pci_id->bus,
569 				PCI_DEVFN(pci_id->device, pci_id->function),
570 				reg, size, value);
571 
572 	return (result ? AE_ERROR : AE_OK);
573 }
574 
575 acpi_status
576 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
577 				acpi_integer value, u32 width)
578 {
579 	int result, size;
580 
581 	switch (width) {
582 	case 8:
583 		size = 1;
584 		break;
585 	case 16:
586 		size = 2;
587 		break;
588 	case 32:
589 		size = 4;
590 		break;
591 	default:
592 		return AE_ERROR;
593 	}
594 
595 	result = raw_pci_write(pci_id->segment, pci_id->bus,
596 				PCI_DEVFN(pci_id->device, pci_id->function),
597 				reg, size, value);
598 
599 	return (result ? AE_ERROR : AE_OK);
600 }
601 
602 /* TODO: Change code to take advantage of driver model more */
603 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,	/* upper bound  */
604 				    acpi_handle chandle,	/* current node */
605 				    struct acpi_pci_id **id,
606 				    int *is_bridge, u8 * bus_number)
607 {
608 	acpi_handle handle;
609 	struct acpi_pci_id *pci_id = *id;
610 	acpi_status status;
611 	unsigned long temp;
612 	acpi_object_type type;
613 
614 	acpi_get_parent(chandle, &handle);
615 	if (handle != rhandle) {
616 		acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
617 					bus_number);
618 
619 		status = acpi_get_type(handle, &type);
620 		if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
621 			return;
622 
623 		status =
624 		    acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
625 					  &temp);
626 		if (ACPI_SUCCESS(status)) {
627 			u32 val;
628 			pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
629 			pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
630 
631 			if (*is_bridge)
632 				pci_id->bus = *bus_number;
633 
634 			/* any nicer way to get bus number of bridge ? */
635 			status =
636 			    acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
637 							   8);
638 			if (ACPI_SUCCESS(status)
639 			    && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
640 				status =
641 				    acpi_os_read_pci_configuration(pci_id, 0x18,
642 								   &val, 8);
643 				if (!ACPI_SUCCESS(status)) {
644 					/* Certainly broken...  FIX ME */
645 					return;
646 				}
647 				*is_bridge = 1;
648 				pci_id->bus = val;
649 				status =
650 				    acpi_os_read_pci_configuration(pci_id, 0x19,
651 								   &val, 8);
652 				if (ACPI_SUCCESS(status)) {
653 					*bus_number = val;
654 				}
655 			} else
656 				*is_bridge = 0;
657 		}
658 	}
659 }
660 
661 void acpi_os_derive_pci_id(acpi_handle rhandle,	/* upper bound  */
662 			   acpi_handle chandle,	/* current node */
663 			   struct acpi_pci_id **id)
664 {
665 	int is_bridge = 1;
666 	u8 bus_number = (*id)->bus;
667 
668 	acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
669 }
670 
671 static void acpi_os_execute_deferred(struct work_struct *work)
672 {
673 	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
674 	if (!dpc) {
675 		printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
676 		return;
677 	}
678 
679 	dpc->function(dpc->context);
680 	kfree(dpc);
681 
682 	return;
683 }
684 
685 /*******************************************************************************
686  *
687  * FUNCTION:    acpi_os_execute
688  *
689  * PARAMETERS:  Type               - Type of the callback
690  *              Function           - Function to be executed
691  *              Context            - Function parameters
692  *
693  * RETURN:      Status
694  *
695  * DESCRIPTION: Depending on type, either queues function for deferred execution or
696  *              immediately executes function on a separate thread.
697  *
698  ******************************************************************************/
699 
700 acpi_status acpi_os_execute(acpi_execute_type type,
701 			    acpi_osd_exec_callback function, void *context)
702 {
703 	acpi_status status = AE_OK;
704 	struct acpi_os_dpc *dpc;
705 	struct workqueue_struct *queue;
706 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
707 			  "Scheduling function [%p(%p)] for deferred execution.\n",
708 			  function, context));
709 
710 	if (!function)
711 		return AE_BAD_PARAMETER;
712 
713 	/*
714 	 * Allocate/initialize DPC structure.  Note that this memory will be
715 	 * freed by the callee.  The kernel handles the work_struct list  in a
716 	 * way that allows us to also free its memory inside the callee.
717 	 * Because we may want to schedule several tasks with different
718 	 * parameters we can't use the approach some kernel code uses of
719 	 * having a static work_struct.
720 	 */
721 
722 	dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
723 	if (!dpc)
724 		return_ACPI_STATUS(AE_NO_MEMORY);
725 
726 	dpc->function = function;
727 	dpc->context = context;
728 
729 	INIT_WORK(&dpc->work, acpi_os_execute_deferred);
730 	queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
731 	if (!queue_work(queue, &dpc->work)) {
732 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
733 			  "Call to queue_work() failed.\n"));
734 		status = AE_ERROR;
735 		kfree(dpc);
736 	}
737 	return_ACPI_STATUS(status);
738 }
739 
740 EXPORT_SYMBOL(acpi_os_execute);
741 
742 void acpi_os_wait_events_complete(void *context)
743 {
744 	flush_workqueue(kacpid_wq);
745 }
746 
747 EXPORT_SYMBOL(acpi_os_wait_events_complete);
748 
749 /*
750  * Allocate the memory for a spinlock and initialize it.
751  */
752 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
753 {
754 	spin_lock_init(*handle);
755 
756 	return AE_OK;
757 }
758 
759 /*
760  * Deallocate the memory for a spinlock.
761  */
762 void acpi_os_delete_lock(acpi_spinlock handle)
763 {
764 	return;
765 }
766 
767 acpi_status
768 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
769 {
770 	struct semaphore *sem = NULL;
771 
772 	sem = acpi_os_allocate(sizeof(struct semaphore));
773 	if (!sem)
774 		return AE_NO_MEMORY;
775 	memset(sem, 0, sizeof(struct semaphore));
776 
777 	sema_init(sem, initial_units);
778 
779 	*handle = (acpi_handle *) sem;
780 
781 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
782 			  *handle, initial_units));
783 
784 	return AE_OK;
785 }
786 
787 /*
788  * TODO: A better way to delete semaphores?  Linux doesn't have a
789  * 'delete_semaphore()' function -- may result in an invalid
790  * pointer dereference for non-synchronized consumers.	Should
791  * we at least check for blocked threads and signal/cancel them?
792  */
793 
794 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
795 {
796 	struct semaphore *sem = (struct semaphore *)handle;
797 
798 	if (!sem)
799 		return AE_BAD_PARAMETER;
800 
801 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
802 
803 	BUG_ON(!list_empty(&sem->wait_list));
804 	kfree(sem);
805 	sem = NULL;
806 
807 	return AE_OK;
808 }
809 
810 /*
811  * TODO: Support for units > 1?
812  */
813 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
814 {
815 	acpi_status status = AE_OK;
816 	struct semaphore *sem = (struct semaphore *)handle;
817 	long jiffies;
818 	int ret = 0;
819 
820 	if (!sem || (units < 1))
821 		return AE_BAD_PARAMETER;
822 
823 	if (units > 1)
824 		return AE_SUPPORT;
825 
826 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
827 			  handle, units, timeout));
828 
829 	if (timeout == ACPI_WAIT_FOREVER)
830 		jiffies = MAX_SCHEDULE_TIMEOUT;
831 	else
832 		jiffies = msecs_to_jiffies(timeout);
833 
834 	ret = down_timeout(sem, jiffies);
835 	if (ret)
836 		status = AE_TIME;
837 
838 	if (ACPI_FAILURE(status)) {
839 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
840 				  "Failed to acquire semaphore[%p|%d|%d], %s",
841 				  handle, units, timeout,
842 				  acpi_format_exception(status)));
843 	} else {
844 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
845 				  "Acquired semaphore[%p|%d|%d]", handle,
846 				  units, timeout));
847 	}
848 
849 	return status;
850 }
851 
852 /*
853  * TODO: Support for units > 1?
854  */
855 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
856 {
857 	struct semaphore *sem = (struct semaphore *)handle;
858 
859 	if (!sem || (units < 1))
860 		return AE_BAD_PARAMETER;
861 
862 	if (units > 1)
863 		return AE_SUPPORT;
864 
865 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
866 			  units));
867 
868 	up(sem);
869 
870 	return AE_OK;
871 }
872 
873 #ifdef ACPI_FUTURE_USAGE
874 u32 acpi_os_get_line(char *buffer)
875 {
876 
877 #ifdef ENABLE_DEBUGGER
878 	if (acpi_in_debugger) {
879 		u32 chars;
880 
881 		kdb_read(buffer, sizeof(line_buf));
882 
883 		/* remove the CR kdb includes */
884 		chars = strlen(buffer) - 1;
885 		buffer[chars] = '\0';
886 	}
887 #endif
888 
889 	return 0;
890 }
891 #endif				/*  ACPI_FUTURE_USAGE  */
892 
893 acpi_status acpi_os_signal(u32 function, void *info)
894 {
895 	switch (function) {
896 	case ACPI_SIGNAL_FATAL:
897 		printk(KERN_ERR PREFIX "Fatal opcode executed\n");
898 		break;
899 	case ACPI_SIGNAL_BREAKPOINT:
900 		/*
901 		 * AML Breakpoint
902 		 * ACPI spec. says to treat it as a NOP unless
903 		 * you are debugging.  So if/when we integrate
904 		 * AML debugger into the kernel debugger its
905 		 * hook will go here.  But until then it is
906 		 * not useful to print anything on breakpoints.
907 		 */
908 		break;
909 	default:
910 		break;
911 	}
912 
913 	return AE_OK;
914 }
915 
916 static int __init acpi_os_name_setup(char *str)
917 {
918 	char *p = acpi_os_name;
919 	int count = ACPI_MAX_OVERRIDE_LEN - 1;
920 
921 	if (!str || !*str)
922 		return 0;
923 
924 	for (; count-- && str && *str; str++) {
925 		if (isalnum(*str) || *str == ' ' || *str == ':')
926 			*p++ = *str;
927 		else if (*str == '\'' || *str == '"')
928 			continue;
929 		else
930 			break;
931 	}
932 	*p = 0;
933 
934 	return 1;
935 
936 }
937 
938 __setup("acpi_os_name=", acpi_os_name_setup);
939 
940 static void __init set_osi_linux(unsigned int enable)
941 {
942 	if (osi_linux.enable != enable) {
943 		osi_linux.enable = enable;
944 		printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
945 			enable ? "Add": "Delet");
946 	}
947 	return;
948 }
949 
950 static void __init acpi_cmdline_osi_linux(unsigned int enable)
951 {
952 	osi_linux.cmdline = 1;	/* cmdline set the default */
953 	set_osi_linux(enable);
954 
955 	return;
956 }
957 
958 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
959 {
960 	osi_linux.dmi = 1;	/* DMI knows that this box asks OSI(Linux) */
961 
962 	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
963 
964 	if (enable == -1)
965 		return;
966 
967 	osi_linux.known = 1;	/* DMI knows which OSI(Linux) default needed */
968 
969 	set_osi_linux(enable);
970 
971 	return;
972 }
973 
974 /*
975  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
976  *
977  * empty string disables _OSI
978  * string starting with '!' disables that string
979  * otherwise string is added to list, augmenting built-in strings
980  */
981 int __init acpi_osi_setup(char *str)
982 {
983 	if (str == NULL || *str == '\0') {
984 		printk(KERN_INFO PREFIX "_OSI method disabled\n");
985 		acpi_gbl_create_osi_method = FALSE;
986 	} else if (!strcmp("!Linux", str)) {
987 		acpi_cmdline_osi_linux(0);	/* !enable */
988 	} else if (*str == '!') {
989 		if (acpi_osi_invalidate(++str) == AE_OK)
990 			printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
991 	} else if (!strcmp("Linux", str)) {
992 		acpi_cmdline_osi_linux(1);	/* enable */
993 	} else if (*osi_additional_string == '\0') {
994 		strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
995 		printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
996 	}
997 
998 	return 1;
999 }
1000 
1001 __setup("acpi_osi=", acpi_osi_setup);
1002 
1003 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1004 static int __init acpi_serialize_setup(char *str)
1005 {
1006 	printk(KERN_INFO PREFIX "serialize enabled\n");
1007 
1008 	acpi_gbl_all_methods_serialized = TRUE;
1009 
1010 	return 1;
1011 }
1012 
1013 __setup("acpi_serialize", acpi_serialize_setup);
1014 
1015 /*
1016  * Wake and Run-Time GPES are expected to be separate.
1017  * We disable wake-GPEs at run-time to prevent spurious
1018  * interrupts.
1019  *
1020  * However, if a system exists that shares Wake and
1021  * Run-time events on the same GPE this flag is available
1022  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1023  */
1024 static int __init acpi_wake_gpes_always_on_setup(char *str)
1025 {
1026 	printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1027 
1028 	acpi_gbl_leave_wake_gpes_disabled = FALSE;
1029 
1030 	return 1;
1031 }
1032 
1033 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1034 
1035 /* Check of resource interference between native drivers and ACPI
1036  * OperationRegions (SystemIO and System Memory only).
1037  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1038  * in arbitrary AML code and can interfere with legacy drivers.
1039  * acpi_enforce_resources= can be set to:
1040  *
1041  *   - strict           (2)
1042  *     -> further driver trying to access the resources will not load
1043  *   - lax (default)    (1)
1044  *     -> further driver trying to access the resources will load, but you
1045  *     get a system message that something might go wrong...
1046  *
1047  *   - no               (0)
1048  *     -> ACPI Operation Region resources will not be registered
1049  *
1050  */
1051 #define ENFORCE_RESOURCES_STRICT 2
1052 #define ENFORCE_RESOURCES_LAX    1
1053 #define ENFORCE_RESOURCES_NO     0
1054 
1055 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1056 
1057 static int __init acpi_enforce_resources_setup(char *str)
1058 {
1059 	if (str == NULL || *str == '\0')
1060 		return 0;
1061 
1062 	if (!strcmp("strict", str))
1063 		acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1064 	else if (!strcmp("lax", str))
1065 		acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1066 	else if (!strcmp("no", str))
1067 		acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1068 
1069 	return 1;
1070 }
1071 
1072 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1073 
1074 /* Check for resource conflicts between ACPI OperationRegions and native
1075  * drivers */
1076 int acpi_check_resource_conflict(struct resource *res)
1077 {
1078 	struct acpi_res_list *res_list_elem;
1079 	int ioport;
1080 	int clash = 0;
1081 
1082 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1083 		return 0;
1084 	if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1085 		return 0;
1086 
1087 	ioport = res->flags & IORESOURCE_IO;
1088 
1089 	spin_lock(&acpi_res_lock);
1090 	list_for_each_entry(res_list_elem, &resource_list_head,
1091 			    resource_list) {
1092 		if (ioport && (res_list_elem->resource_type
1093 			       != ACPI_ADR_SPACE_SYSTEM_IO))
1094 			continue;
1095 		if (!ioport && (res_list_elem->resource_type
1096 				!= ACPI_ADR_SPACE_SYSTEM_MEMORY))
1097 			continue;
1098 
1099 		if (res->end < res_list_elem->start
1100 		    || res_list_elem->end < res->start)
1101 			continue;
1102 		clash = 1;
1103 		break;
1104 	}
1105 	spin_unlock(&acpi_res_lock);
1106 
1107 	if (clash) {
1108 		if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1109 			printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
1110 			       " conflicts with ACPI region %s"
1111 			       " [0x%llx-0x%llx]\n",
1112 			       acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1113 			       ? KERN_WARNING : KERN_ERR,
1114 			       ioport ? "I/O" : "Memory", res->name,
1115 			       (long long) res->start, (long long) res->end,
1116 			       res_list_elem->name,
1117 			       (long long) res_list_elem->start,
1118 			       (long long) res_list_elem->end);
1119 			printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1120 		}
1121 		if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1122 			return -EBUSY;
1123 	}
1124 	return 0;
1125 }
1126 EXPORT_SYMBOL(acpi_check_resource_conflict);
1127 
1128 int acpi_check_region(resource_size_t start, resource_size_t n,
1129 		      const char *name)
1130 {
1131 	struct resource res = {
1132 		.start = start,
1133 		.end   = start + n - 1,
1134 		.name  = name,
1135 		.flags = IORESOURCE_IO,
1136 	};
1137 
1138 	return acpi_check_resource_conflict(&res);
1139 }
1140 EXPORT_SYMBOL(acpi_check_region);
1141 
1142 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1143 		      const char *name)
1144 {
1145 	struct resource res = {
1146 		.start = start,
1147 		.end   = start + n - 1,
1148 		.name  = name,
1149 		.flags = IORESOURCE_MEM,
1150 	};
1151 
1152 	return acpi_check_resource_conflict(&res);
1153 
1154 }
1155 EXPORT_SYMBOL(acpi_check_mem_region);
1156 
1157 /*
1158  * Acquire a spinlock.
1159  *
1160  * handle is a pointer to the spinlock_t.
1161  */
1162 
1163 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1164 {
1165 	acpi_cpu_flags flags;
1166 	spin_lock_irqsave(lockp, flags);
1167 	return flags;
1168 }
1169 
1170 /*
1171  * Release a spinlock. See above.
1172  */
1173 
1174 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1175 {
1176 	spin_unlock_irqrestore(lockp, flags);
1177 }
1178 
1179 #ifndef ACPI_USE_LOCAL_CACHE
1180 
1181 /*******************************************************************************
1182  *
1183  * FUNCTION:    acpi_os_create_cache
1184  *
1185  * PARAMETERS:  name      - Ascii name for the cache
1186  *              size      - Size of each cached object
1187  *              depth     - Maximum depth of the cache (in objects) <ignored>
1188  *              cache     - Where the new cache object is returned
1189  *
1190  * RETURN:      status
1191  *
1192  * DESCRIPTION: Create a cache object
1193  *
1194  ******************************************************************************/
1195 
1196 acpi_status
1197 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1198 {
1199 	*cache = kmem_cache_create(name, size, 0, 0, NULL);
1200 	if (*cache == NULL)
1201 		return AE_ERROR;
1202 	else
1203 		return AE_OK;
1204 }
1205 
1206 /*******************************************************************************
1207  *
1208  * FUNCTION:    acpi_os_purge_cache
1209  *
1210  * PARAMETERS:  Cache           - Handle to cache object
1211  *
1212  * RETURN:      Status
1213  *
1214  * DESCRIPTION: Free all objects within the requested cache.
1215  *
1216  ******************************************************************************/
1217 
1218 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1219 {
1220 	kmem_cache_shrink(cache);
1221 	return (AE_OK);
1222 }
1223 
1224 /*******************************************************************************
1225  *
1226  * FUNCTION:    acpi_os_delete_cache
1227  *
1228  * PARAMETERS:  Cache           - Handle to cache object
1229  *
1230  * RETURN:      Status
1231  *
1232  * DESCRIPTION: Free all objects within the requested cache and delete the
1233  *              cache object.
1234  *
1235  ******************************************************************************/
1236 
1237 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1238 {
1239 	kmem_cache_destroy(cache);
1240 	return (AE_OK);
1241 }
1242 
1243 /*******************************************************************************
1244  *
1245  * FUNCTION:    acpi_os_release_object
1246  *
1247  * PARAMETERS:  Cache       - Handle to cache object
1248  *              Object      - The object to be released
1249  *
1250  * RETURN:      None
1251  *
1252  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1253  *              the object is deleted.
1254  *
1255  ******************************************************************************/
1256 
1257 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1258 {
1259 	kmem_cache_free(cache, object);
1260 	return (AE_OK);
1261 }
1262 
1263 /**
1264  *	acpi_dmi_dump - dump DMI slots needed for blacklist entry
1265  *
1266  *	Returns 0 on success
1267  */
1268 static int acpi_dmi_dump(void)
1269 {
1270 
1271 	if (!dmi_available)
1272 		return -1;
1273 
1274 	printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1275 		dmi_get_system_info(DMI_SYS_VENDOR));
1276 	printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1277 		dmi_get_system_info(DMI_PRODUCT_NAME));
1278 	printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1279 		dmi_get_system_info(DMI_PRODUCT_VERSION));
1280 	printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1281 		dmi_get_system_info(DMI_BOARD_NAME));
1282 	printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1283 		dmi_get_system_info(DMI_BIOS_VENDOR));
1284 	printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1285 		dmi_get_system_info(DMI_BIOS_DATE));
1286 
1287 	return 0;
1288 }
1289 
1290 
1291 /******************************************************************************
1292  *
1293  * FUNCTION:    acpi_os_validate_interface
1294  *
1295  * PARAMETERS:  interface           - Requested interface to be validated
1296  *
1297  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1298  *
1299  * DESCRIPTION: Match an interface string to the interfaces supported by the
1300  *              host. Strings originate from an AML call to the _OSI method.
1301  *
1302  *****************************************************************************/
1303 
1304 acpi_status
1305 acpi_os_validate_interface (char *interface)
1306 {
1307 	if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1308 		return AE_OK;
1309 	if (!strcmp("Linux", interface)) {
1310 
1311 		printk(KERN_NOTICE PREFIX
1312 			"BIOS _OSI(Linux) query %s%s\n",
1313 			osi_linux.enable ? "honored" : "ignored",
1314 			osi_linux.cmdline ? " via cmdline" :
1315 			osi_linux.dmi ? " via DMI" : "");
1316 
1317 		if (!osi_linux.dmi) {
1318 			if (acpi_dmi_dump())
1319 				printk(KERN_NOTICE PREFIX
1320 					"[please extract dmidecode output]\n");
1321 			printk(KERN_NOTICE PREFIX
1322 				"Please send DMI info above to "
1323 				"linux-acpi@vger.kernel.org\n");
1324 		}
1325 		if (!osi_linux.known && !osi_linux.cmdline) {
1326 			printk(KERN_NOTICE PREFIX
1327 				"If \"acpi_osi=%sLinux\" works better, "
1328 				"please notify linux-acpi@vger.kernel.org\n",
1329 				osi_linux.enable ? "!" : "");
1330 		}
1331 
1332 		if (osi_linux.enable)
1333 			return AE_OK;
1334 	}
1335 	return AE_SUPPORT;
1336 }
1337 
1338 /******************************************************************************
1339  *
1340  * FUNCTION:    acpi_os_validate_address
1341  *
1342  * PARAMETERS:  space_id             - ACPI space ID
1343  *              address             - Physical address
1344  *              length              - Address length
1345  *
1346  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1347  *              should return AE_AML_ILLEGAL_ADDRESS.
1348  *
1349  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1350  *              the addresses accessed by AML operation regions.
1351  *
1352  *****************************************************************************/
1353 
1354 acpi_status
1355 acpi_os_validate_address (
1356     u8                   space_id,
1357     acpi_physical_address   address,
1358     acpi_size               length,
1359     char *name)
1360 {
1361 	struct acpi_res_list *res;
1362 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1363 		return AE_OK;
1364 
1365 	switch (space_id) {
1366 	case ACPI_ADR_SPACE_SYSTEM_IO:
1367 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1368 		/* Only interference checks against SystemIO and SytemMemory
1369 		   are needed */
1370 		res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1371 		if (!res)
1372 			return AE_OK;
1373 		/* ACPI names are fixed to 4 bytes, still better use strlcpy */
1374 		strlcpy(res->name, name, 5);
1375 		res->start = address;
1376 		res->end = address + length - 1;
1377 		res->resource_type = space_id;
1378 		spin_lock(&acpi_res_lock);
1379 		list_add(&res->resource_list, &resource_list_head);
1380 		spin_unlock(&acpi_res_lock);
1381 		pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1382 			 "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1383 			 ? "SystemIO" : "System Memory",
1384 			 (unsigned long long)res->start,
1385 			 (unsigned long long)res->end,
1386 			 res->name);
1387 		break;
1388 	case ACPI_ADR_SPACE_PCI_CONFIG:
1389 	case ACPI_ADR_SPACE_EC:
1390 	case ACPI_ADR_SPACE_SMBUS:
1391 	case ACPI_ADR_SPACE_CMOS:
1392 	case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1393 	case ACPI_ADR_SPACE_DATA_TABLE:
1394 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
1395 		break;
1396 	}
1397 	return AE_OK;
1398 }
1399 
1400 #endif
1401