xref: /openbmc/linux/drivers/acpi/osl.c (revision 5bdef865)
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (c) 2008 Intel Corporation
8  *   Author: Matthew Wilcox <willy@linux.intel.com>
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or
15  *  (at your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful,
18  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  *  GNU General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License
23  *  along with this program; if not, write to the Free Software
24  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *
28  */
29 
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/mm.h>
34 #include <linux/pci.h>
35 #include <linux/interrupt.h>
36 #include <linux/kmod.h>
37 #include <linux/delay.h>
38 #include <linux/workqueue.h>
39 #include <linux/nmi.h>
40 #include <linux/acpi.h>
41 #include <linux/efi.h>
42 #include <linux/ioport.h>
43 #include <linux/list.h>
44 #include <linux/jiffies.h>
45 #include <linux/semaphore.h>
46 
47 #include <asm/io.h>
48 #include <asm/uaccess.h>
49 
50 #include <acpi/acpi.h>
51 #include <acpi/acpi_bus.h>
52 #include <acpi/processor.h>
53 
54 #define _COMPONENT		ACPI_OS_SERVICES
55 ACPI_MODULE_NAME("osl");
56 #define PREFIX		"ACPI: "
57 struct acpi_os_dpc {
58 	acpi_osd_exec_callback function;
59 	void *context;
60 	struct work_struct work;
61 };
62 
63 #ifdef CONFIG_ACPI_CUSTOM_DSDT
64 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
65 #endif
66 
67 #ifdef ENABLE_DEBUGGER
68 #include <linux/kdb.h>
69 
70 /* stuff for debugger support */
71 int acpi_in_debugger;
72 EXPORT_SYMBOL(acpi_in_debugger);
73 
74 extern char line_buf[80];
75 #endif				/*ENABLE_DEBUGGER */
76 
77 static unsigned int acpi_irq_irq;
78 static acpi_osd_handler acpi_irq_handler;
79 static void *acpi_irq_context;
80 static struct workqueue_struct *kacpid_wq;
81 static struct workqueue_struct *kacpi_notify_wq;
82 static struct workqueue_struct *kacpi_hotplug_wq;
83 
84 struct acpi_res_list {
85 	resource_size_t start;
86 	resource_size_t end;
87 	acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
88 	char name[5];   /* only can have a length of 4 chars, make use of this
89 			   one instead of res->name, no need to kalloc then */
90 	struct list_head resource_list;
91 };
92 
93 static LIST_HEAD(resource_list_head);
94 static DEFINE_SPINLOCK(acpi_res_lock);
95 
96 #define	OSI_STRING_LENGTH_MAX 64	/* arbitrary */
97 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
98 
99 /*
100  * The story of _OSI(Linux)
101  *
102  * From pre-history through Linux-2.6.22,
103  * Linux responded TRUE upon a BIOS OSI(Linux) query.
104  *
105  * Unfortunately, reference BIOS writers got wind of this
106  * and put OSI(Linux) in their example code, quickly exposing
107  * this string as ill-conceived and opening the door to
108  * an un-bounded number of BIOS incompatibilities.
109  *
110  * For example, OSI(Linux) was used on resume to re-POST a
111  * video card on one system, because Linux at that time
112  * could not do a speedy restore in its native driver.
113  * But then upon gaining quick native restore capability,
114  * Linux has no way to tell the BIOS to skip the time-consuming
115  * POST -- putting Linux at a permanent performance disadvantage.
116  * On another system, the BIOS writer used OSI(Linux)
117  * to infer native OS support for IPMI!  On other systems,
118  * OSI(Linux) simply got in the way of Linux claiming to
119  * be compatible with other operating systems, exposing
120  * BIOS issues such as skipped device initialization.
121  *
122  * So "Linux" turned out to be a really poor chose of
123  * OSI string, and from Linux-2.6.23 onward we respond FALSE.
124  *
125  * BIOS writers should NOT query _OSI(Linux) on future systems.
126  * Linux will complain on the console when it sees it, and return FALSE.
127  * To get Linux to return TRUE for your system  will require
128  * a kernel source update to add a DMI entry,
129  * or boot with "acpi_osi=Linux"
130  */
131 
132 static struct osi_linux {
133 	unsigned int	enable:1;
134 	unsigned int	dmi:1;
135 	unsigned int	cmdline:1;
136 	unsigned int	known:1;
137 } osi_linux = { 0, 0, 0, 0};
138 
139 static void __init acpi_request_region (struct acpi_generic_address *addr,
140 	unsigned int length, char *desc)
141 {
142 	struct resource *res;
143 
144 	if (!addr->address || !length)
145 		return;
146 
147 	if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
148 		res = request_region(addr->address, length, desc);
149 	else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
150 		res = request_mem_region(addr->address, length, desc);
151 }
152 
153 static int __init acpi_reserve_resources(void)
154 {
155 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
156 		"ACPI PM1a_EVT_BLK");
157 
158 	acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
159 		"ACPI PM1b_EVT_BLK");
160 
161 	acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
162 		"ACPI PM1a_CNT_BLK");
163 
164 	acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
165 		"ACPI PM1b_CNT_BLK");
166 
167 	if (acpi_gbl_FADT.pm_timer_length == 4)
168 		acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
169 
170 	acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
171 		"ACPI PM2_CNT_BLK");
172 
173 	/* Length of GPE blocks must be a non-negative multiple of 2 */
174 
175 	if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
176 		acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
177 			       acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
178 
179 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
180 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
181 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
182 
183 	return 0;
184 }
185 device_initcall(acpi_reserve_resources);
186 
187 acpi_status __init acpi_os_initialize(void)
188 {
189 	return AE_OK;
190 }
191 
192 acpi_status acpi_os_initialize1(void)
193 {
194 	kacpid_wq = create_singlethread_workqueue("kacpid");
195 	kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
196 	kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug");
197 	BUG_ON(!kacpid_wq);
198 	BUG_ON(!kacpi_notify_wq);
199 	BUG_ON(!kacpi_hotplug_wq);
200 	return AE_OK;
201 }
202 
203 acpi_status acpi_os_terminate(void)
204 {
205 	if (acpi_irq_handler) {
206 		acpi_os_remove_interrupt_handler(acpi_irq_irq,
207 						 acpi_irq_handler);
208 	}
209 
210 	destroy_workqueue(kacpid_wq);
211 	destroy_workqueue(kacpi_notify_wq);
212 	destroy_workqueue(kacpi_hotplug_wq);
213 
214 	return AE_OK;
215 }
216 
217 void acpi_os_printf(const char *fmt, ...)
218 {
219 	va_list args;
220 	va_start(args, fmt);
221 	acpi_os_vprintf(fmt, args);
222 	va_end(args);
223 }
224 
225 void acpi_os_vprintf(const char *fmt, va_list args)
226 {
227 	static char buffer[512];
228 
229 	vsprintf(buffer, fmt, args);
230 
231 #ifdef ENABLE_DEBUGGER
232 	if (acpi_in_debugger) {
233 		kdb_printf("%s", buffer);
234 	} else {
235 		printk(KERN_CONT "%s", buffer);
236 	}
237 #else
238 	printk(KERN_CONT "%s", buffer);
239 #endif
240 }
241 
242 acpi_physical_address __init acpi_os_get_root_pointer(void)
243 {
244 	if (efi_enabled) {
245 		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
246 			return efi.acpi20;
247 		else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
248 			return efi.acpi;
249 		else {
250 			printk(KERN_ERR PREFIX
251 			       "System description tables not found\n");
252 			return 0;
253 		}
254 	} else {
255 		acpi_physical_address pa = 0;
256 
257 		acpi_find_root_pointer(&pa);
258 		return pa;
259 	}
260 }
261 
262 void __iomem *__init_refok
263 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
264 {
265 	if (phys > ULONG_MAX) {
266 		printk(KERN_ERR PREFIX "Cannot map memory that high\n");
267 		return NULL;
268 	}
269 	if (acpi_gbl_permanent_mmap)
270 		/*
271 		* ioremap checks to ensure this is in reserved space
272 		*/
273 		return ioremap((unsigned long)phys, size);
274 	else
275 		return __acpi_map_table((unsigned long)phys, size);
276 }
277 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
278 
279 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
280 {
281 	if (acpi_gbl_permanent_mmap)
282 		iounmap(virt);
283 	else
284 		__acpi_unmap_table(virt, size);
285 }
286 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
287 
288 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
289 {
290 	if (!acpi_gbl_permanent_mmap)
291 		__acpi_unmap_table(virt, size);
292 }
293 
294 #ifdef ACPI_FUTURE_USAGE
295 acpi_status
296 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
297 {
298 	if (!phys || !virt)
299 		return AE_BAD_PARAMETER;
300 
301 	*phys = virt_to_phys(virt);
302 
303 	return AE_OK;
304 }
305 #endif
306 
307 #define ACPI_MAX_OVERRIDE_LEN 100
308 
309 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
310 
311 acpi_status
312 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
313 			    acpi_string * new_val)
314 {
315 	if (!init_val || !new_val)
316 		return AE_BAD_PARAMETER;
317 
318 	*new_val = NULL;
319 	if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
320 		printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
321 		       acpi_os_name);
322 		*new_val = acpi_os_name;
323 	}
324 
325 	return AE_OK;
326 }
327 
328 acpi_status
329 acpi_os_table_override(struct acpi_table_header * existing_table,
330 		       struct acpi_table_header ** new_table)
331 {
332 	if (!existing_table || !new_table)
333 		return AE_BAD_PARAMETER;
334 
335 	*new_table = NULL;
336 
337 #ifdef CONFIG_ACPI_CUSTOM_DSDT
338 	if (strncmp(existing_table->signature, "DSDT", 4) == 0)
339 		*new_table = (struct acpi_table_header *)AmlCode;
340 #endif
341 	if (*new_table != NULL) {
342 		printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
343 			   "this is unsafe: tainting kernel\n",
344 		       existing_table->signature,
345 		       existing_table->oem_table_id);
346 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
347 	}
348 	return AE_OK;
349 }
350 
351 static irqreturn_t acpi_irq(int irq, void *dev_id)
352 {
353 	u32 handled;
354 
355 	handled = (*acpi_irq_handler) (acpi_irq_context);
356 
357 	if (handled) {
358 		acpi_irq_handled++;
359 		return IRQ_HANDLED;
360 	} else {
361 		acpi_irq_not_handled++;
362 		return IRQ_NONE;
363 	}
364 }
365 
366 acpi_status
367 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
368 				  void *context)
369 {
370 	unsigned int irq;
371 
372 	acpi_irq_stats_init();
373 
374 	/*
375 	 * Ignore the GSI from the core, and use the value in our copy of the
376 	 * FADT. It may not be the same if an interrupt source override exists
377 	 * for the SCI.
378 	 */
379 	gsi = acpi_gbl_FADT.sci_interrupt;
380 	if (acpi_gsi_to_irq(gsi, &irq) < 0) {
381 		printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
382 		       gsi);
383 		return AE_OK;
384 	}
385 
386 	acpi_irq_handler = handler;
387 	acpi_irq_context = context;
388 	if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
389 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
390 		return AE_NOT_ACQUIRED;
391 	}
392 	acpi_irq_irq = irq;
393 
394 	return AE_OK;
395 }
396 
397 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
398 {
399 	if (irq) {
400 		free_irq(irq, acpi_irq);
401 		acpi_irq_handler = NULL;
402 		acpi_irq_irq = 0;
403 	}
404 
405 	return AE_OK;
406 }
407 
408 /*
409  * Running in interpreter thread context, safe to sleep
410  */
411 
412 void acpi_os_sleep(acpi_integer ms)
413 {
414 	schedule_timeout_interruptible(msecs_to_jiffies(ms));
415 }
416 
417 void acpi_os_stall(u32 us)
418 {
419 	while (us) {
420 		u32 delay = 1000;
421 
422 		if (delay > us)
423 			delay = us;
424 		udelay(delay);
425 		touch_nmi_watchdog();
426 		us -= delay;
427 	}
428 }
429 
430 /*
431  * Support ACPI 3.0 AML Timer operand
432  * Returns 64-bit free-running, monotonically increasing timer
433  * with 100ns granularity
434  */
435 u64 acpi_os_get_timer(void)
436 {
437 	static u64 t;
438 
439 #ifdef	CONFIG_HPET
440 	/* TBD: use HPET if available */
441 #endif
442 
443 #ifdef	CONFIG_X86_PM_TIMER
444 	/* TBD: default to PM timer if HPET was not available */
445 #endif
446 	if (!t)
447 		printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
448 
449 	return ++t;
450 }
451 
452 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
453 {
454 	u32 dummy;
455 
456 	if (!value)
457 		value = &dummy;
458 
459 	*value = 0;
460 	if (width <= 8) {
461 		*(u8 *) value = inb(port);
462 	} else if (width <= 16) {
463 		*(u16 *) value = inw(port);
464 	} else if (width <= 32) {
465 		*(u32 *) value = inl(port);
466 	} else {
467 		BUG();
468 	}
469 
470 	return AE_OK;
471 }
472 
473 EXPORT_SYMBOL(acpi_os_read_port);
474 
475 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
476 {
477 	if (width <= 8) {
478 		outb(value, port);
479 	} else if (width <= 16) {
480 		outw(value, port);
481 	} else if (width <= 32) {
482 		outl(value, port);
483 	} else {
484 		BUG();
485 	}
486 
487 	return AE_OK;
488 }
489 
490 EXPORT_SYMBOL(acpi_os_write_port);
491 
492 acpi_status
493 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
494 {
495 	u32 dummy;
496 	void __iomem *virt_addr;
497 
498 	virt_addr = ioremap(phys_addr, width);
499 	if (!value)
500 		value = &dummy;
501 
502 	switch (width) {
503 	case 8:
504 		*(u8 *) value = readb(virt_addr);
505 		break;
506 	case 16:
507 		*(u16 *) value = readw(virt_addr);
508 		break;
509 	case 32:
510 		*(u32 *) value = readl(virt_addr);
511 		break;
512 	default:
513 		BUG();
514 	}
515 
516 	iounmap(virt_addr);
517 
518 	return AE_OK;
519 }
520 
521 acpi_status
522 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
523 {
524 	void __iomem *virt_addr;
525 
526 	virt_addr = ioremap(phys_addr, width);
527 
528 	switch (width) {
529 	case 8:
530 		writeb(value, virt_addr);
531 		break;
532 	case 16:
533 		writew(value, virt_addr);
534 		break;
535 	case 32:
536 		writel(value, virt_addr);
537 		break;
538 	default:
539 		BUG();
540 	}
541 
542 	iounmap(virt_addr);
543 
544 	return AE_OK;
545 }
546 
547 acpi_status
548 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
549 			       u32 *value, u32 width)
550 {
551 	int result, size;
552 
553 	if (!value)
554 		return AE_BAD_PARAMETER;
555 
556 	switch (width) {
557 	case 8:
558 		size = 1;
559 		break;
560 	case 16:
561 		size = 2;
562 		break;
563 	case 32:
564 		size = 4;
565 		break;
566 	default:
567 		return AE_ERROR;
568 	}
569 
570 	result = raw_pci_read(pci_id->segment, pci_id->bus,
571 				PCI_DEVFN(pci_id->device, pci_id->function),
572 				reg, size, value);
573 
574 	return (result ? AE_ERROR : AE_OK);
575 }
576 
577 acpi_status
578 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
579 				acpi_integer value, u32 width)
580 {
581 	int result, size;
582 
583 	switch (width) {
584 	case 8:
585 		size = 1;
586 		break;
587 	case 16:
588 		size = 2;
589 		break;
590 	case 32:
591 		size = 4;
592 		break;
593 	default:
594 		return AE_ERROR;
595 	}
596 
597 	result = raw_pci_write(pci_id->segment, pci_id->bus,
598 				PCI_DEVFN(pci_id->device, pci_id->function),
599 				reg, size, value);
600 
601 	return (result ? AE_ERROR : AE_OK);
602 }
603 
604 /* TODO: Change code to take advantage of driver model more */
605 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,	/* upper bound  */
606 				    acpi_handle chandle,	/* current node */
607 				    struct acpi_pci_id **id,
608 				    int *is_bridge, u8 * bus_number)
609 {
610 	acpi_handle handle;
611 	struct acpi_pci_id *pci_id = *id;
612 	acpi_status status;
613 	unsigned long long temp;
614 	acpi_object_type type;
615 
616 	acpi_get_parent(chandle, &handle);
617 	if (handle != rhandle) {
618 		acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
619 					bus_number);
620 
621 		status = acpi_get_type(handle, &type);
622 		if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
623 			return;
624 
625 		status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
626 					  &temp);
627 		if (ACPI_SUCCESS(status)) {
628 			u32 val;
629 			pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
630 			pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
631 
632 			if (*is_bridge)
633 				pci_id->bus = *bus_number;
634 
635 			/* any nicer way to get bus number of bridge ? */
636 			status =
637 			    acpi_os_read_pci_configuration(pci_id, 0x0e, &val,
638 							   8);
639 			if (ACPI_SUCCESS(status)
640 			    && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) {
641 				status =
642 				    acpi_os_read_pci_configuration(pci_id, 0x18,
643 								   &val, 8);
644 				if (!ACPI_SUCCESS(status)) {
645 					/* Certainly broken...  FIX ME */
646 					return;
647 				}
648 				*is_bridge = 1;
649 				pci_id->bus = val;
650 				status =
651 				    acpi_os_read_pci_configuration(pci_id, 0x19,
652 								   &val, 8);
653 				if (ACPI_SUCCESS(status)) {
654 					*bus_number = val;
655 				}
656 			} else
657 				*is_bridge = 0;
658 		}
659 	}
660 }
661 
662 void acpi_os_derive_pci_id(acpi_handle rhandle,	/* upper bound  */
663 			   acpi_handle chandle,	/* current node */
664 			   struct acpi_pci_id **id)
665 {
666 	int is_bridge = 1;
667 	u8 bus_number = (*id)->bus;
668 
669 	acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
670 }
671 
672 static void acpi_os_execute_deferred(struct work_struct *work)
673 {
674 	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
675 	if (!dpc) {
676 		printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
677 		return;
678 	}
679 
680 	dpc->function(dpc->context);
681 	kfree(dpc);
682 
683 	return;
684 }
685 
686 static void acpi_os_execute_hp_deferred(struct work_struct *work)
687 {
688 	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
689 	if (!dpc) {
690 		printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
691 		return;
692 	}
693 
694 	acpi_os_wait_events_complete(NULL);
695 
696 	dpc->function(dpc->context);
697 	kfree(dpc);
698 
699 	return;
700 }
701 
702 /*******************************************************************************
703  *
704  * FUNCTION:    acpi_os_execute
705  *
706  * PARAMETERS:  Type               - Type of the callback
707  *              Function           - Function to be executed
708  *              Context            - Function parameters
709  *
710  * RETURN:      Status
711  *
712  * DESCRIPTION: Depending on type, either queues function for deferred execution or
713  *              immediately executes function on a separate thread.
714  *
715  ******************************************************************************/
716 
717 static acpi_status __acpi_os_execute(acpi_execute_type type,
718 	acpi_osd_exec_callback function, void *context, int hp)
719 {
720 	acpi_status status = AE_OK;
721 	struct acpi_os_dpc *dpc;
722 	struct workqueue_struct *queue;
723 	work_func_t func;
724 	int ret;
725 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
726 			  "Scheduling function [%p(%p)] for deferred execution.\n",
727 			  function, context));
728 
729 	if (!function)
730 		return AE_BAD_PARAMETER;
731 
732 	/*
733 	 * Allocate/initialize DPC structure.  Note that this memory will be
734 	 * freed by the callee.  The kernel handles the work_struct list  in a
735 	 * way that allows us to also free its memory inside the callee.
736 	 * Because we may want to schedule several tasks with different
737 	 * parameters we can't use the approach some kernel code uses of
738 	 * having a static work_struct.
739 	 */
740 
741 	dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
742 	if (!dpc)
743 		return AE_NO_MEMORY;
744 
745 	dpc->function = function;
746 	dpc->context = context;
747 
748 	/*
749 	 * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
750 	 * because the hotplug code may call driver .remove() functions,
751 	 * which invoke flush_scheduled_work/acpi_os_wait_events_complete
752 	 * to flush these workqueues.
753 	 */
754 	queue = hp ? kacpi_hotplug_wq :
755 		(type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq);
756 	func = hp ? acpi_os_execute_hp_deferred : acpi_os_execute_deferred;
757 	INIT_WORK(&dpc->work, func);
758 	ret = queue_work(queue, &dpc->work);
759 
760 	if (!ret) {
761 		printk(KERN_ERR PREFIX
762 			  "Call to queue_work() failed.\n");
763 		status = AE_ERROR;
764 		kfree(dpc);
765 	}
766 	return status;
767 }
768 
769 acpi_status acpi_os_execute(acpi_execute_type type,
770 			    acpi_osd_exec_callback function, void *context)
771 {
772 	return __acpi_os_execute(type, function, context, 0);
773 }
774 EXPORT_SYMBOL(acpi_os_execute);
775 
776 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
777 	void *context)
778 {
779 	return __acpi_os_execute(0, function, context, 1);
780 }
781 
782 void acpi_os_wait_events_complete(void *context)
783 {
784 	flush_workqueue(kacpid_wq);
785 	flush_workqueue(kacpi_notify_wq);
786 }
787 
788 EXPORT_SYMBOL(acpi_os_wait_events_complete);
789 
790 /*
791  * Allocate the memory for a spinlock and initialize it.
792  */
793 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
794 {
795 	spin_lock_init(*handle);
796 
797 	return AE_OK;
798 }
799 
800 /*
801  * Deallocate the memory for a spinlock.
802  */
803 void acpi_os_delete_lock(acpi_spinlock handle)
804 {
805 	return;
806 }
807 
808 acpi_status
809 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
810 {
811 	struct semaphore *sem = NULL;
812 
813 	sem = acpi_os_allocate(sizeof(struct semaphore));
814 	if (!sem)
815 		return AE_NO_MEMORY;
816 	memset(sem, 0, sizeof(struct semaphore));
817 
818 	sema_init(sem, initial_units);
819 
820 	*handle = (acpi_handle *) sem;
821 
822 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
823 			  *handle, initial_units));
824 
825 	return AE_OK;
826 }
827 
828 /*
829  * TODO: A better way to delete semaphores?  Linux doesn't have a
830  * 'delete_semaphore()' function -- may result in an invalid
831  * pointer dereference for non-synchronized consumers.	Should
832  * we at least check for blocked threads and signal/cancel them?
833  */
834 
835 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
836 {
837 	struct semaphore *sem = (struct semaphore *)handle;
838 
839 	if (!sem)
840 		return AE_BAD_PARAMETER;
841 
842 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
843 
844 	BUG_ON(!list_empty(&sem->wait_list));
845 	kfree(sem);
846 	sem = NULL;
847 
848 	return AE_OK;
849 }
850 
851 /*
852  * TODO: Support for units > 1?
853  */
854 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
855 {
856 	acpi_status status = AE_OK;
857 	struct semaphore *sem = (struct semaphore *)handle;
858 	long jiffies;
859 	int ret = 0;
860 
861 	if (!sem || (units < 1))
862 		return AE_BAD_PARAMETER;
863 
864 	if (units > 1)
865 		return AE_SUPPORT;
866 
867 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
868 			  handle, units, timeout));
869 
870 	if (timeout == ACPI_WAIT_FOREVER)
871 		jiffies = MAX_SCHEDULE_TIMEOUT;
872 	else
873 		jiffies = msecs_to_jiffies(timeout);
874 
875 	ret = down_timeout(sem, jiffies);
876 	if (ret)
877 		status = AE_TIME;
878 
879 	if (ACPI_FAILURE(status)) {
880 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
881 				  "Failed to acquire semaphore[%p|%d|%d], %s",
882 				  handle, units, timeout,
883 				  acpi_format_exception(status)));
884 	} else {
885 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
886 				  "Acquired semaphore[%p|%d|%d]", handle,
887 				  units, timeout));
888 	}
889 
890 	return status;
891 }
892 
893 /*
894  * TODO: Support for units > 1?
895  */
896 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
897 {
898 	struct semaphore *sem = (struct semaphore *)handle;
899 
900 	if (!sem || (units < 1))
901 		return AE_BAD_PARAMETER;
902 
903 	if (units > 1)
904 		return AE_SUPPORT;
905 
906 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
907 			  units));
908 
909 	up(sem);
910 
911 	return AE_OK;
912 }
913 
914 #ifdef ACPI_FUTURE_USAGE
915 u32 acpi_os_get_line(char *buffer)
916 {
917 
918 #ifdef ENABLE_DEBUGGER
919 	if (acpi_in_debugger) {
920 		u32 chars;
921 
922 		kdb_read(buffer, sizeof(line_buf));
923 
924 		/* remove the CR kdb includes */
925 		chars = strlen(buffer) - 1;
926 		buffer[chars] = '\0';
927 	}
928 #endif
929 
930 	return 0;
931 }
932 #endif				/*  ACPI_FUTURE_USAGE  */
933 
934 acpi_status acpi_os_signal(u32 function, void *info)
935 {
936 	switch (function) {
937 	case ACPI_SIGNAL_FATAL:
938 		printk(KERN_ERR PREFIX "Fatal opcode executed\n");
939 		break;
940 	case ACPI_SIGNAL_BREAKPOINT:
941 		/*
942 		 * AML Breakpoint
943 		 * ACPI spec. says to treat it as a NOP unless
944 		 * you are debugging.  So if/when we integrate
945 		 * AML debugger into the kernel debugger its
946 		 * hook will go here.  But until then it is
947 		 * not useful to print anything on breakpoints.
948 		 */
949 		break;
950 	default:
951 		break;
952 	}
953 
954 	return AE_OK;
955 }
956 
957 static int __init acpi_os_name_setup(char *str)
958 {
959 	char *p = acpi_os_name;
960 	int count = ACPI_MAX_OVERRIDE_LEN - 1;
961 
962 	if (!str || !*str)
963 		return 0;
964 
965 	for (; count-- && str && *str; str++) {
966 		if (isalnum(*str) || *str == ' ' || *str == ':')
967 			*p++ = *str;
968 		else if (*str == '\'' || *str == '"')
969 			continue;
970 		else
971 			break;
972 	}
973 	*p = 0;
974 
975 	return 1;
976 
977 }
978 
979 __setup("acpi_os_name=", acpi_os_name_setup);
980 
981 static void __init set_osi_linux(unsigned int enable)
982 {
983 	if (osi_linux.enable != enable) {
984 		osi_linux.enable = enable;
985 		printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
986 			enable ? "Add": "Delet");
987 	}
988 	return;
989 }
990 
991 static void __init acpi_cmdline_osi_linux(unsigned int enable)
992 {
993 	osi_linux.cmdline = 1;	/* cmdline set the default */
994 	set_osi_linux(enable);
995 
996 	return;
997 }
998 
999 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1000 {
1001 	osi_linux.dmi = 1;	/* DMI knows that this box asks OSI(Linux) */
1002 
1003 	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1004 
1005 	if (enable == -1)
1006 		return;
1007 
1008 	osi_linux.known = 1;	/* DMI knows which OSI(Linux) default needed */
1009 
1010 	set_osi_linux(enable);
1011 
1012 	return;
1013 }
1014 
1015 /*
1016  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1017  *
1018  * empty string disables _OSI
1019  * string starting with '!' disables that string
1020  * otherwise string is added to list, augmenting built-in strings
1021  */
1022 int __init acpi_osi_setup(char *str)
1023 {
1024 	if (str == NULL || *str == '\0') {
1025 		printk(KERN_INFO PREFIX "_OSI method disabled\n");
1026 		acpi_gbl_create_osi_method = FALSE;
1027 	} else if (!strcmp("!Linux", str)) {
1028 		acpi_cmdline_osi_linux(0);	/* !enable */
1029 	} else if (*str == '!') {
1030 		if (acpi_osi_invalidate(++str) == AE_OK)
1031 			printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1032 	} else if (!strcmp("Linux", str)) {
1033 		acpi_cmdline_osi_linux(1);	/* enable */
1034 	} else if (*osi_additional_string == '\0') {
1035 		strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1036 		printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1037 	}
1038 
1039 	return 1;
1040 }
1041 
1042 __setup("acpi_osi=", acpi_osi_setup);
1043 
1044 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1045 static int __init acpi_serialize_setup(char *str)
1046 {
1047 	printk(KERN_INFO PREFIX "serialize enabled\n");
1048 
1049 	acpi_gbl_all_methods_serialized = TRUE;
1050 
1051 	return 1;
1052 }
1053 
1054 __setup("acpi_serialize", acpi_serialize_setup);
1055 
1056 /*
1057  * Wake and Run-Time GPES are expected to be separate.
1058  * We disable wake-GPEs at run-time to prevent spurious
1059  * interrupts.
1060  *
1061  * However, if a system exists that shares Wake and
1062  * Run-time events on the same GPE this flag is available
1063  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1064  */
1065 static int __init acpi_wake_gpes_always_on_setup(char *str)
1066 {
1067 	printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1068 
1069 	acpi_gbl_leave_wake_gpes_disabled = FALSE;
1070 
1071 	return 1;
1072 }
1073 
1074 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1075 
1076 /* Check of resource interference between native drivers and ACPI
1077  * OperationRegions (SystemIO and System Memory only).
1078  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1079  * in arbitrary AML code and can interfere with legacy drivers.
1080  * acpi_enforce_resources= can be set to:
1081  *
1082  *   - strict (default) (2)
1083  *     -> further driver trying to access the resources will not load
1084  *   - lax              (1)
1085  *     -> further driver trying to access the resources will load, but you
1086  *     get a system message that something might go wrong...
1087  *
1088  *   - no               (0)
1089  *     -> ACPI Operation Region resources will not be registered
1090  *
1091  */
1092 #define ENFORCE_RESOURCES_STRICT 2
1093 #define ENFORCE_RESOURCES_LAX    1
1094 #define ENFORCE_RESOURCES_NO     0
1095 
1096 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1097 
1098 static int __init acpi_enforce_resources_setup(char *str)
1099 {
1100 	if (str == NULL || *str == '\0')
1101 		return 0;
1102 
1103 	if (!strcmp("strict", str))
1104 		acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1105 	else if (!strcmp("lax", str))
1106 		acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1107 	else if (!strcmp("no", str))
1108 		acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1109 
1110 	return 1;
1111 }
1112 
1113 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1114 
1115 /* Check for resource conflicts between ACPI OperationRegions and native
1116  * drivers */
1117 int acpi_check_resource_conflict(struct resource *res)
1118 {
1119 	struct acpi_res_list *res_list_elem;
1120 	int ioport;
1121 	int clash = 0;
1122 
1123 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1124 		return 0;
1125 	if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1126 		return 0;
1127 
1128 	ioport = res->flags & IORESOURCE_IO;
1129 
1130 	spin_lock(&acpi_res_lock);
1131 	list_for_each_entry(res_list_elem, &resource_list_head,
1132 			    resource_list) {
1133 		if (ioport && (res_list_elem->resource_type
1134 			       != ACPI_ADR_SPACE_SYSTEM_IO))
1135 			continue;
1136 		if (!ioport && (res_list_elem->resource_type
1137 				!= ACPI_ADR_SPACE_SYSTEM_MEMORY))
1138 			continue;
1139 
1140 		if (res->end < res_list_elem->start
1141 		    || res_list_elem->end < res->start)
1142 			continue;
1143 		clash = 1;
1144 		break;
1145 	}
1146 	spin_unlock(&acpi_res_lock);
1147 
1148 	if (clash) {
1149 		if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1150 			printk("%sACPI: %s resource %s [0x%llx-0x%llx]"
1151 			       " conflicts with ACPI region %s"
1152 			       " [0x%llx-0x%llx]\n",
1153 			       acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1154 			       ? KERN_WARNING : KERN_ERR,
1155 			       ioport ? "I/O" : "Memory", res->name,
1156 			       (long long) res->start, (long long) res->end,
1157 			       res_list_elem->name,
1158 			       (long long) res_list_elem->start,
1159 			       (long long) res_list_elem->end);
1160 			printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1161 		}
1162 		if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1163 			return -EBUSY;
1164 	}
1165 	return 0;
1166 }
1167 EXPORT_SYMBOL(acpi_check_resource_conflict);
1168 
1169 int acpi_check_region(resource_size_t start, resource_size_t n,
1170 		      const char *name)
1171 {
1172 	struct resource res = {
1173 		.start = start,
1174 		.end   = start + n - 1,
1175 		.name  = name,
1176 		.flags = IORESOURCE_IO,
1177 	};
1178 
1179 	return acpi_check_resource_conflict(&res);
1180 }
1181 EXPORT_SYMBOL(acpi_check_region);
1182 
1183 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1184 		      const char *name)
1185 {
1186 	struct resource res = {
1187 		.start = start,
1188 		.end   = start + n - 1,
1189 		.name  = name,
1190 		.flags = IORESOURCE_MEM,
1191 	};
1192 
1193 	return acpi_check_resource_conflict(&res);
1194 
1195 }
1196 EXPORT_SYMBOL(acpi_check_mem_region);
1197 
1198 /*
1199  * Acquire a spinlock.
1200  *
1201  * handle is a pointer to the spinlock_t.
1202  */
1203 
1204 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1205 {
1206 	acpi_cpu_flags flags;
1207 	spin_lock_irqsave(lockp, flags);
1208 	return flags;
1209 }
1210 
1211 /*
1212  * Release a spinlock. See above.
1213  */
1214 
1215 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1216 {
1217 	spin_unlock_irqrestore(lockp, flags);
1218 }
1219 
1220 #ifndef ACPI_USE_LOCAL_CACHE
1221 
1222 /*******************************************************************************
1223  *
1224  * FUNCTION:    acpi_os_create_cache
1225  *
1226  * PARAMETERS:  name      - Ascii name for the cache
1227  *              size      - Size of each cached object
1228  *              depth     - Maximum depth of the cache (in objects) <ignored>
1229  *              cache     - Where the new cache object is returned
1230  *
1231  * RETURN:      status
1232  *
1233  * DESCRIPTION: Create a cache object
1234  *
1235  ******************************************************************************/
1236 
1237 acpi_status
1238 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1239 {
1240 	*cache = kmem_cache_create(name, size, 0, 0, NULL);
1241 	if (*cache == NULL)
1242 		return AE_ERROR;
1243 	else
1244 		return AE_OK;
1245 }
1246 
1247 /*******************************************************************************
1248  *
1249  * FUNCTION:    acpi_os_purge_cache
1250  *
1251  * PARAMETERS:  Cache           - Handle to cache object
1252  *
1253  * RETURN:      Status
1254  *
1255  * DESCRIPTION: Free all objects within the requested cache.
1256  *
1257  ******************************************************************************/
1258 
1259 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1260 {
1261 	kmem_cache_shrink(cache);
1262 	return (AE_OK);
1263 }
1264 
1265 /*******************************************************************************
1266  *
1267  * FUNCTION:    acpi_os_delete_cache
1268  *
1269  * PARAMETERS:  Cache           - Handle to cache object
1270  *
1271  * RETURN:      Status
1272  *
1273  * DESCRIPTION: Free all objects within the requested cache and delete the
1274  *              cache object.
1275  *
1276  ******************************************************************************/
1277 
1278 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1279 {
1280 	kmem_cache_destroy(cache);
1281 	return (AE_OK);
1282 }
1283 
1284 /*******************************************************************************
1285  *
1286  * FUNCTION:    acpi_os_release_object
1287  *
1288  * PARAMETERS:  Cache       - Handle to cache object
1289  *              Object      - The object to be released
1290  *
1291  * RETURN:      None
1292  *
1293  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1294  *              the object is deleted.
1295  *
1296  ******************************************************************************/
1297 
1298 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1299 {
1300 	kmem_cache_free(cache, object);
1301 	return (AE_OK);
1302 }
1303 
1304 /******************************************************************************
1305  *
1306  * FUNCTION:    acpi_os_validate_interface
1307  *
1308  * PARAMETERS:  interface           - Requested interface to be validated
1309  *
1310  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1311  *
1312  * DESCRIPTION: Match an interface string to the interfaces supported by the
1313  *              host. Strings originate from an AML call to the _OSI method.
1314  *
1315  *****************************************************************************/
1316 
1317 acpi_status
1318 acpi_os_validate_interface (char *interface)
1319 {
1320 	if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1321 		return AE_OK;
1322 	if (!strcmp("Linux", interface)) {
1323 
1324 		printk(KERN_NOTICE PREFIX
1325 			"BIOS _OSI(Linux) query %s%s\n",
1326 			osi_linux.enable ? "honored" : "ignored",
1327 			osi_linux.cmdline ? " via cmdline" :
1328 			osi_linux.dmi ? " via DMI" : "");
1329 
1330 		if (osi_linux.enable)
1331 			return AE_OK;
1332 	}
1333 	return AE_SUPPORT;
1334 }
1335 
1336 /******************************************************************************
1337  *
1338  * FUNCTION:    acpi_os_validate_address
1339  *
1340  * PARAMETERS:  space_id             - ACPI space ID
1341  *              address             - Physical address
1342  *              length              - Address length
1343  *
1344  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1345  *              should return AE_AML_ILLEGAL_ADDRESS.
1346  *
1347  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1348  *              the addresses accessed by AML operation regions.
1349  *
1350  *****************************************************************************/
1351 
1352 acpi_status
1353 acpi_os_validate_address (
1354     u8                   space_id,
1355     acpi_physical_address   address,
1356     acpi_size               length,
1357     char *name)
1358 {
1359 	struct acpi_res_list *res;
1360 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1361 		return AE_OK;
1362 
1363 	switch (space_id) {
1364 	case ACPI_ADR_SPACE_SYSTEM_IO:
1365 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1366 		/* Only interference checks against SystemIO and SytemMemory
1367 		   are needed */
1368 		res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1369 		if (!res)
1370 			return AE_OK;
1371 		/* ACPI names are fixed to 4 bytes, still better use strlcpy */
1372 		strlcpy(res->name, name, 5);
1373 		res->start = address;
1374 		res->end = address + length - 1;
1375 		res->resource_type = space_id;
1376 		spin_lock(&acpi_res_lock);
1377 		list_add(&res->resource_list, &resource_list_head);
1378 		spin_unlock(&acpi_res_lock);
1379 		pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1380 			 "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1381 			 ? "SystemIO" : "System Memory",
1382 			 (unsigned long long)res->start,
1383 			 (unsigned long long)res->end,
1384 			 res->name);
1385 		break;
1386 	case ACPI_ADR_SPACE_PCI_CONFIG:
1387 	case ACPI_ADR_SPACE_EC:
1388 	case ACPI_ADR_SPACE_SMBUS:
1389 	case ACPI_ADR_SPACE_CMOS:
1390 	case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1391 	case ACPI_ADR_SPACE_DATA_TABLE:
1392 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
1393 		break;
1394 	}
1395 	return AE_OK;
1396 }
1397 
1398 #endif
1399