11a59d1b8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */ 27f30491cSTony Luck /* 37f30491cSTony Luck * Copyright (C) 1999 VA Linux Systems 47f30491cSTony Luck * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 57f30491cSTony Luck * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> 67f30491cSTony Luck * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 77f30491cSTony Luck */ 87f30491cSTony Luck 97f30491cSTony Luck #ifndef _ASM_ACPI_H 107f30491cSTony Luck #define _ASM_ACPI_H 117f30491cSTony Luck 127f30491cSTony Luck #ifdef __KERNEL__ 137f30491cSTony Luck 147f30491cSTony Luck #include <acpi/pdc_intel.h> 157f30491cSTony Luck 167f30491cSTony Luck #include <linux/init.h> 177f30491cSTony Luck #include <linux/numa.h> 187f30491cSTony Luck #include <asm/numa.h> 197f30491cSTony Luck 202e0f2b16SChristoph Hellwig 21247dba58SBaoquan He extern int acpi_lapic; 227f30491cSTony Luck #define acpi_disabled 0 /* ACPI always enabled on IA64 */ 237f30491cSTony Luck #define acpi_noirq 0 /* ACPI always enabled on IA64 */ 247f30491cSTony Luck #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 257f30491cSTony Luck #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 26b50154d5SGraeme Gregory 27b50154d5SGraeme Gregory static inline bool acpi_has_cpu_in_madt(void) 28b50154d5SGraeme Gregory { 29b50154d5SGraeme Gregory return !!acpi_lapic; 30b50154d5SGraeme Gregory } 312e0f2b16SChristoph Hellwig 327f30491cSTony Luck #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 337f30491cSTony Luck static inline void disable_acpi(void) { } 347f30491cSTony Luck 35d868080dSAlex Chiang #ifdef CONFIG_IA64_GENERIC 367f30491cSTony Luck const char *acpi_get_sysname (void); 37d868080dSAlex Chiang #else 38d868080dSAlex Chiang static inline const char *acpi_get_sysname (void) 39d868080dSAlex Chiang { 40fc5bad03SChristoph Hellwig # if defined (CONFIG_IA64_HP_ZX1) 41d868080dSAlex Chiang return "hpzx1"; 42d868080dSAlex Chiang # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) 43d868080dSAlex Chiang return "hpzx1_swiotlb"; 44d868080dSAlex Chiang # elif defined (CONFIG_IA64_SGI_UV) 45d868080dSAlex Chiang return "uv"; 46d868080dSAlex Chiang # elif defined (CONFIG_IA64_DIG) 47d868080dSAlex Chiang return "dig"; 48d868080dSAlex Chiang # elif defined(CONFIG_IA64_DIG_VTD) 49d868080dSAlex Chiang return "dig_vtd"; 50d868080dSAlex Chiang # else 51d868080dSAlex Chiang # error Unknown platform. Fix acpi.c. 52d868080dSAlex Chiang # endif 53d868080dSAlex Chiang } 54d868080dSAlex Chiang #endif 557f30491cSTony Luck int acpi_request_vector (u32 int_type); 567f30491cSTony Luck int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 577f30491cSTony Luck 58f1a2003eSRafael J. Wysocki /* Low-level suspend routine. */ 59f1a2003eSRafael J. Wysocki extern int acpi_suspend_lowlevel(void); 60c41b93fbSRafael J. Wysocki 617f30491cSTony Luck extern unsigned long acpi_wakeup_address; 627f30491cSTony Luck 637f30491cSTony Luck /* 647f30491cSTony Luck * Record the cpei override flag and current logical cpu. This is 657f30491cSTony Luck * useful for CPU removal. 667f30491cSTony Luck */ 677f30491cSTony Luck extern unsigned int can_cpei_retarget(void); 687f30491cSTony Luck extern unsigned int is_cpu_cpei_target(unsigned int cpu); 697f30491cSTony Luck extern void set_cpei_target_cpu(unsigned int cpu); 707f30491cSTony Luck extern unsigned int get_cpei_target_cpu(void); 717f30491cSTony Luck extern void prefill_possible_map(void); 727f30491cSTony Luck #ifdef CONFIG_ACPI_HOTPLUG_CPU 737f30491cSTony Luck extern int additional_cpus; 747f30491cSTony Luck #else 757f30491cSTony Luck #define additional_cpus 0 767f30491cSTony Luck #endif 777f30491cSTony Luck 787f30491cSTony Luck #ifdef CONFIG_ACPI_NUMA 797f30491cSTony Luck #if MAX_NUMNODES > 256 807f30491cSTony Luck #define MAX_PXM_DOMAINS MAX_NUMNODES 817f30491cSTony Luck #else 827f30491cSTony Luck #define MAX_PXM_DOMAINS (256) 837f30491cSTony Luck #endif 845b5e76e9SGreg Kroah-Hartman extern int pxm_to_nid_map[MAX_PXM_DOMAINS]; 857f30491cSTony Luck extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 867f30491cSTony Luck #endif 877f30491cSTony Luck 881d9cb470SAlex Chiang static inline bool arch_has_acpi_pdc(void) { return true; } 896c5807d7SAlex Chiang static inline void arch_acpi_set_pdc_bits(u32 *buf) 906c5807d7SAlex Chiang { 916c5807d7SAlex Chiang buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; 926c5807d7SAlex Chiang } 931d9cb470SAlex Chiang 9467535736SAndy Lutomirski #define acpi_unlazy_tlb(x) 9567535736SAndy Lutomirski 967f30491cSTony Luck #ifdef CONFIG_ACPI_NUMA 977f30491cSTony Luck extern cpumask_t early_cpu_possible_map; 987f30491cSTony Luck #define for_each_possible_early_cpu(cpu) \ 995d2068daSRusty Russell for_each_cpu((cpu), &early_cpu_possible_map) 1007f30491cSTony Luck 1017f30491cSTony Luck static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus) 1027f30491cSTony Luck { 1037f30491cSTony Luck int low_cpu, high_cpu; 1047f30491cSTony Luck int cpu; 1057f30491cSTony Luck int next_nid = 0; 1067f30491cSTony Luck 1075d2068daSRusty Russell low_cpu = cpumask_weight(&early_cpu_possible_map); 1087f30491cSTony Luck 1097f30491cSTony Luck high_cpu = max(low_cpu, min_cpus); 1107f30491cSTony Luck high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); 1117f30491cSTony Luck 1127f30491cSTony Luck for (cpu = low_cpu; cpu < high_cpu; cpu++) { 1135d2068daSRusty Russell cpumask_set_cpu(cpu, &early_cpu_possible_map); 1147f30491cSTony Luck if (node_cpuid[cpu].nid == NUMA_NO_NODE) { 1157f30491cSTony Luck node_cpuid[cpu].nid = next_nid; 1167f30491cSTony Luck next_nid++; 1177f30491cSTony Luck if (next_nid >= num_online_nodes()) 1187f30491cSTony Luck next_nid = 0; 1197f30491cSTony Luck } 1207f30491cSTony Luck } 1217f30491cSTony Luck } 122312521d0SRobert Richter 123312521d0SRobert Richter extern void acpi_numa_fixup(void); 124312521d0SRobert Richter 1257f30491cSTony Luck #endif /* CONFIG_ACPI_NUMA */ 1267f30491cSTony Luck 1277f30491cSTony Luck #endif /*__KERNEL__*/ 1287f30491cSTony Luck 1297f30491cSTony Luck #endif /*_ASM_ACPI_H*/ 130