1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support 4 * 5 * Author: Jianmin Lv <lvjianmin@loongson.cn> 6 * Huacai Chen <chenhuacai@loongson.cn> 7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 8 */ 9 10 #include <linux/init.h> 11 #include <linux/acpi.h> 12 #include <linux/irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/memblock.h> 15 #include <linux/serial_core.h> 16 #include <asm/io.h> 17 #include <asm/numa.h> 18 #include <asm/loongson.h> 19 20 int acpi_disabled; 21 EXPORT_SYMBOL(acpi_disabled); 22 int acpi_noirq; 23 int acpi_pci_disabled; 24 EXPORT_SYMBOL(acpi_pci_disabled); 25 int acpi_strict = 1; /* We have no workarounds on LoongArch */ 26 int num_processors; 27 int disabled_cpus; 28 29 u64 acpi_saved_sp; 30 31 #define MAX_CORE_PIC 256 32 33 #define PREFIX "ACPI: " 34 35 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size) 36 { 37 38 if (!phys || !size) 39 return NULL; 40 41 return early_memremap(phys, size); 42 } 43 void __init __acpi_unmap_table(void __iomem *map, unsigned long size) 44 { 45 if (!map || !size) 46 return; 47 48 early_memunmap(map, size); 49 } 50 51 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) 52 { 53 if (!memblock_is_memory(phys)) 54 return ioremap(phys, size); 55 else 56 return ioremap_cache(phys, size); 57 } 58 59 void __init acpi_boot_table_init(void) 60 { 61 /* 62 * If acpi_disabled, bail out 63 */ 64 if (acpi_disabled) 65 return; 66 67 /* 68 * Initialize the ACPI boot-time table parser. 69 */ 70 if (acpi_table_init()) { 71 disable_acpi(); 72 return; 73 } 74 } 75 76 #ifdef CONFIG_SMP 77 static int set_processor_mask(u32 id, u32 flags) 78 { 79 80 int cpu, cpuid = id; 81 82 if (num_processors >= nr_cpu_ids) { 83 pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached." 84 " processor 0x%x ignored.\n", nr_cpu_ids, cpuid); 85 86 return -ENODEV; 87 88 } 89 if (cpuid == loongson_sysconf.boot_cpu_id) 90 cpu = 0; 91 else 92 cpu = cpumask_next_zero(-1, cpu_present_mask); 93 94 if (flags & ACPI_MADT_ENABLED) { 95 num_processors++; 96 set_cpu_possible(cpu, true); 97 set_cpu_present(cpu, true); 98 __cpu_number_map[cpuid] = cpu; 99 __cpu_logical_map[cpu] = cpuid; 100 } else 101 disabled_cpus++; 102 103 return cpu; 104 } 105 #endif 106 107 static int __init 108 acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end) 109 { 110 struct acpi_madt_core_pic *processor = NULL; 111 112 processor = (struct acpi_madt_core_pic *)header; 113 if (BAD_MADT_ENTRY(processor, end)) 114 return -EINVAL; 115 116 acpi_table_print_madt_entry(&header->common); 117 #ifdef CONFIG_SMP 118 set_processor_mask(processor->core_id, processor->flags); 119 #endif 120 121 return 0; 122 } 123 124 static int __init 125 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end) 126 { 127 static int core = 0; 128 struct acpi_madt_eio_pic *eiointc = NULL; 129 130 eiointc = (struct acpi_madt_eio_pic *)header; 131 if (BAD_MADT_ENTRY(eiointc, end)) 132 return -EINVAL; 133 134 core = eiointc->node * CORES_PER_EIO_NODE; 135 set_bit(core, &(loongson_sysconf.cores_io_master)); 136 137 return 0; 138 } 139 140 static void __init acpi_process_madt(void) 141 { 142 #ifdef CONFIG_SMP 143 int i; 144 145 for (i = 0; i < NR_CPUS; i++) { 146 __cpu_number_map[i] = -1; 147 __cpu_logical_map[i] = -1; 148 } 149 #endif 150 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, 151 acpi_parse_processor, MAX_CORE_PIC); 152 153 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC, 154 acpi_parse_eio_master, MAX_IO_PICS); 155 156 loongson_sysconf.nr_cpus = num_processors; 157 } 158 159 int __init acpi_boot_init(void) 160 { 161 /* 162 * If acpi_disabled, bail out 163 */ 164 if (acpi_disabled) 165 return -1; 166 167 loongson_sysconf.boot_cpu_id = read_csr_cpuid(); 168 169 /* 170 * Process the Multiple APIC Description Table (MADT), if present 171 */ 172 acpi_process_madt(); 173 174 /* Do not enable ACPI SPCR console by default */ 175 acpi_parse_spcr(earlycon_acpi_spcr_enable, false); 176 177 return 0; 178 } 179 180 #ifdef CONFIG_ACPI_NUMA 181 182 static __init int setup_node(int pxm) 183 { 184 return acpi_map_pxm_to_node(pxm); 185 } 186 187 /* 188 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for 189 * I/O localities since SRAT does not list them. I/O localities are 190 * not supported at this point. 191 */ 192 unsigned int numa_distance_cnt; 193 194 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) 195 { 196 return slit->locality_count; 197 } 198 199 void __init numa_set_distance(int from, int to, int distance) 200 { 201 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { 202 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", 203 from, to, distance); 204 return; 205 } 206 207 node_distances[from][to] = distance; 208 } 209 210 /* Callback for Proximity Domain -> CPUID mapping */ 211 void __init 212 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) 213 { 214 int pxm, node; 215 216 if (srat_disabled()) 217 return; 218 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { 219 bad_srat(); 220 return; 221 } 222 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) 223 return; 224 pxm = pa->proximity_domain_lo; 225 if (acpi_srat_revision >= 2) { 226 pxm |= (pa->proximity_domain_hi[0] << 8); 227 pxm |= (pa->proximity_domain_hi[1] << 16); 228 pxm |= (pa->proximity_domain_hi[2] << 24); 229 } 230 node = setup_node(pxm); 231 if (node < 0) { 232 pr_err("SRAT: Too many proximity domains %x\n", pxm); 233 bad_srat(); 234 return; 235 } 236 237 if (pa->apic_id >= CONFIG_NR_CPUS) { 238 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", 239 pxm, pa->apic_id, node); 240 return; 241 } 242 243 early_numa_add_cpu(pa->apic_id, node); 244 245 set_cpuid_to_node(pa->apic_id, node); 246 node_set(node, numa_nodes_parsed); 247 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); 248 } 249 250 void __init acpi_numa_arch_fixup(void) {} 251 #endif 252 253 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) 254 { 255 memblock_reserve(addr, size); 256 } 257 258 #ifdef CONFIG_ACPI_HOTPLUG_CPU 259 260 #include <acpi/processor.h> 261 262 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 263 { 264 #ifdef CONFIG_ACPI_NUMA 265 int nid; 266 267 nid = acpi_get_node(handle); 268 if (nid != NUMA_NO_NODE) { 269 set_cpuid_to_node(physid, nid); 270 node_set(nid, numa_nodes_parsed); 271 set_cpu_numa_node(cpu, nid); 272 cpumask_set_cpu(cpu, cpumask_of_node(nid)); 273 } 274 #endif 275 return 0; 276 } 277 278 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu) 279 { 280 int cpu; 281 282 cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); 283 if (cpu < 0) { 284 pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); 285 return cpu; 286 } 287 288 acpi_map_cpu2node(handle, cpu, physid); 289 290 *pcpu = cpu; 291 292 return 0; 293 } 294 EXPORT_SYMBOL(acpi_map_cpu); 295 296 int acpi_unmap_cpu(int cpu) 297 { 298 #ifdef CONFIG_ACPI_NUMA 299 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE); 300 #endif 301 set_cpu_present(cpu, false); 302 num_processors--; 303 304 pr_info("cpu%d hot remove!\n", cpu); 305 306 return 0; 307 } 308 EXPORT_SYMBOL(acpi_unmap_cpu); 309 310 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 311