1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright (C) 1999 VA Linux Systems 4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 5 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> 6 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 */ 8 9 #ifndef _ASM_ACPI_H 10 #define _ASM_ACPI_H 11 12 #ifdef __KERNEL__ 13 14 #include <acpi/pdc_intel.h> 15 16 #include <linux/init.h> 17 #include <linux/numa.h> 18 #include <asm/numa.h> 19 20 21 extern int acpi_lapic; 22 #define acpi_disabled 0 /* ACPI always enabled on IA64 */ 23 #define acpi_noirq 0 /* ACPI always enabled on IA64 */ 24 #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 25 #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 26 27 static inline bool acpi_has_cpu_in_madt(void) 28 { 29 return !!acpi_lapic; 30 } 31 32 #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 33 static inline void disable_acpi(void) { } 34 35 int acpi_request_vector (u32 int_type); 36 int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 37 38 /* Low-level suspend routine. */ 39 extern int acpi_suspend_lowlevel(void); 40 41 static inline unsigned long acpi_get_wakeup_address(void) 42 { 43 return 0; 44 } 45 46 /* 47 * Record the cpei override flag and current logical cpu. This is 48 * useful for CPU removal. 49 */ 50 extern unsigned int can_cpei_retarget(void); 51 extern unsigned int is_cpu_cpei_target(unsigned int cpu); 52 extern void set_cpei_target_cpu(unsigned int cpu); 53 extern unsigned int get_cpei_target_cpu(void); 54 extern void prefill_possible_map(void); 55 #ifdef CONFIG_ACPI_HOTPLUG_CPU 56 extern int additional_cpus; 57 #else 58 #define additional_cpus 0 59 #endif 60 61 #ifdef CONFIG_ACPI_NUMA 62 #if MAX_NUMNODES > 256 63 #define MAX_PXM_DOMAINS MAX_NUMNODES 64 #else 65 #define MAX_PXM_DOMAINS (256) 66 #endif 67 extern int pxm_to_nid_map[MAX_PXM_DOMAINS]; 68 extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 69 #endif 70 71 static inline bool arch_has_acpi_pdc(void) { return true; } 72 static inline void arch_acpi_set_pdc_bits(u32 *buf) 73 { 74 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; 75 } 76 77 #ifdef CONFIG_ACPI_NUMA 78 extern cpumask_t early_cpu_possible_map; 79 #define for_each_possible_early_cpu(cpu) \ 80 for_each_cpu((cpu), &early_cpu_possible_map) 81 82 static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) 83 { 84 int low_cpu, high_cpu; 85 int cpu; 86 int next_nid = 0; 87 88 low_cpu = cpumask_weight(&early_cpu_possible_map); 89 90 high_cpu = max(low_cpu, min_cpus); 91 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); 92 93 for (cpu = low_cpu; cpu < high_cpu; cpu++) { 94 cpumask_set_cpu(cpu, &early_cpu_possible_map); 95 if (node_cpuid[cpu].nid == NUMA_NO_NODE) { 96 node_cpuid[cpu].nid = next_nid; 97 next_nid++; 98 if (next_nid >= num_online_nodes()) 99 next_nid = 0; 100 } 101 } 102 } 103 104 extern void acpi_numa_fixup(void); 105 106 #endif /* CONFIG_ACPI_NUMA */ 107 108 #endif /*__KERNEL__*/ 109 110 #endif /*_ASM_ACPI_H*/ 111