1 #include <linux/kernel.h> 2 #include <linux/module.h> 3 #include <linux/init.h> 4 #include <linux/bootmem.h> 5 #include <linux/percpu.h> 6 #include <linux/kexec.h> 7 #include <linux/crash_dump.h> 8 #include <linux/smp.h> 9 #include <linux/topology.h> 10 #include <linux/pfn.h> 11 #include <asm/sections.h> 12 #include <asm/processor.h> 13 #include <asm/setup.h> 14 #include <asm/mpspec.h> 15 #include <asm/apicdef.h> 16 #include <asm/highmem.h> 17 #include <asm/proto.h> 18 #include <asm/cpumask.h> 19 #include <asm/cpu.h> 20 #include <asm/stackprotector.h> 21 22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS 23 # define DBG(x...) printk(KERN_DEBUG x) 24 #else 25 # define DBG(x...) 26 #endif 27 28 DEFINE_PER_CPU(int, cpu_number); 29 EXPORT_PER_CPU_SYMBOL(cpu_number); 30 31 #ifdef CONFIG_X86_64 32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) 33 #else 34 #define BOOT_PERCPU_OFFSET 0 35 #endif 36 37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; 38 EXPORT_PER_CPU_SYMBOL(this_cpu_off); 39 40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { 41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, 42 }; 43 EXPORT_SYMBOL(__per_cpu_offset); 44 45 /* 46 * On x86_64 symbols referenced from code should be reachable using 47 * 32bit relocations. Reserve space for static percpu variables in 48 * modules so that they are always served from the first chunk which 49 * is located at the percpu segment base. On x86_32, anything can 50 * address anywhere. No need to reserve space in the first chunk. 51 */ 52 #ifdef CONFIG_X86_64 53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE 54 #else 55 #define PERCPU_FIRST_CHUNK_RESERVE 0 56 #endif 57 58 /** 59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA 60 * 61 * If NUMA is not configured or there is only one NUMA node available, 62 * there is no reason to consider NUMA. This function determines 63 * whether percpu allocation should consider NUMA or not. 64 * 65 * RETURNS: 66 * true if NUMA should be considered; otherwise, false. 67 */ 68 static bool __init pcpu_need_numa(void) 69 { 70 #ifdef CONFIG_NEED_MULTIPLE_NODES 71 pg_data_t *last = NULL; 72 unsigned int cpu; 73 74 for_each_possible_cpu(cpu) { 75 int node = early_cpu_to_node(cpu); 76 77 if (node_online(node) && NODE_DATA(node) && 78 last && last != NODE_DATA(node)) 79 return true; 80 81 last = NODE_DATA(node); 82 } 83 #endif 84 return false; 85 } 86 87 /** 88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu 89 * @cpu: cpu to allocate for 90 * @size: size allocation in bytes 91 * @align: alignment 92 * 93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper 94 * does the right thing for NUMA regardless of the current 95 * configuration. 96 * 97 * RETURNS: 98 * Pointer to the allocated area on success, NULL on failure. 99 */ 100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, 101 unsigned long align) 102 { 103 const unsigned long goal = __pa(MAX_DMA_ADDRESS); 104 #ifdef CONFIG_NEED_MULTIPLE_NODES 105 int node = early_cpu_to_node(cpu); 106 void *ptr; 107 108 if (!node_online(node) || !NODE_DATA(node)) { 109 ptr = __alloc_bootmem_nopanic(size, align, goal); 110 pr_info("cpu %d has no node %d or node-local memory\n", 111 cpu, node); 112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", 113 cpu, size, __pa(ptr)); 114 } else { 115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), 116 size, align, goal); 117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at " 118 "%016lx\n", cpu, size, node, __pa(ptr)); 119 } 120 return ptr; 121 #else 122 return __alloc_bootmem_nopanic(size, align, goal); 123 #endif 124 } 125 126 /* 127 * Remap allocator 128 * 129 * This allocator uses PMD page as unit. A PMD page is allocated for 130 * each cpu and each is remapped into vmalloc area using PMD mapping. 131 * As PMD page is quite large, only part of it is used for the first 132 * chunk. Unused part is returned to the bootmem allocator. 133 * 134 * So, the PMD pages are mapped twice - once to the physical mapping 135 * and to the vmalloc area for the first percpu chunk. The double 136 * mapping does add one more PMD TLB entry pressure but still is much 137 * better than only using 4k mappings while still being NUMA friendly. 138 */ 139 #ifdef CONFIG_NEED_MULTIPLE_NODES 140 static size_t pcpur_size __initdata; 141 static void **pcpur_ptrs __initdata; 142 143 static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) 144 { 145 size_t off = (size_t)pageno << PAGE_SHIFT; 146 147 if (off >= pcpur_size) 148 return NULL; 149 150 return virt_to_page(pcpur_ptrs[cpu] + off); 151 } 152 153 static ssize_t __init setup_pcpu_remap(size_t static_size) 154 { 155 static struct vm_struct vm; 156 pg_data_t *last; 157 size_t ptrs_size, dyn_size; 158 unsigned int cpu; 159 ssize_t ret; 160 161 /* 162 * If large page isn't supported, there's no benefit in doing 163 * this. Also, on non-NUMA, embedding is better. 164 */ 165 if (!cpu_has_pse || pcpu_need_numa()) 166 return -EINVAL; 167 168 last = NULL; 169 for_each_possible_cpu(cpu) { 170 int node = early_cpu_to_node(cpu); 171 172 if (node_online(node) && NODE_DATA(node) && 173 last && last != NODE_DATA(node)) 174 goto proceed; 175 176 last = NODE_DATA(node); 177 } 178 return -EINVAL; 179 180 proceed: 181 /* 182 * Currently supports only single page. Supporting multiple 183 * pages won't be too difficult if it ever becomes necessary. 184 */ 185 pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE + 186 PERCPU_DYNAMIC_RESERVE); 187 if (pcpur_size > PMD_SIZE) { 188 pr_warning("PERCPU: static data is larger than large page, " 189 "can't use large page\n"); 190 return -EINVAL; 191 } 192 dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE; 193 194 /* allocate pointer array and alloc large pages */ 195 ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); 196 pcpur_ptrs = alloc_bootmem(ptrs_size); 197 198 for_each_possible_cpu(cpu) { 199 pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); 200 if (!pcpur_ptrs[cpu]) 201 goto enomem; 202 203 /* 204 * Only use pcpur_size bytes and give back the rest. 205 * 206 * Ingo: The 2MB up-rounding bootmem is needed to make 207 * sure the partial 2MB page is still fully RAM - it's 208 * not well-specified to have a PAT-incompatible area 209 * (unmapped RAM, device memory, etc.) in that hole. 210 */ 211 free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), 212 PMD_SIZE - pcpur_size); 213 214 memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); 215 } 216 217 /* allocate address and map */ 218 vm.flags = VM_ALLOC; 219 vm.size = num_possible_cpus() * PMD_SIZE; 220 vm_area_register_early(&vm, PMD_SIZE); 221 222 for_each_possible_cpu(cpu) { 223 pmd_t *pmd; 224 225 pmd = populate_extra_pmd((unsigned long)vm.addr 226 + cpu * PMD_SIZE); 227 set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), 228 PAGE_KERNEL_LARGE)); 229 } 230 231 /* we're ready, commit */ 232 pr_info("PERCPU: Remapped at %p with large pages, static data " 233 "%zu bytes\n", vm.addr, static_size); 234 235 ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, 236 PERCPU_FIRST_CHUNK_RESERVE, dyn_size, 237 PMD_SIZE, vm.addr, NULL); 238 goto out_free_ar; 239 240 enomem: 241 for_each_possible_cpu(cpu) 242 if (pcpur_ptrs[cpu]) 243 free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); 244 ret = -ENOMEM; 245 out_free_ar: 246 free_bootmem(__pa(pcpur_ptrs), ptrs_size); 247 return ret; 248 } 249 #else 250 static ssize_t __init setup_pcpu_remap(size_t static_size) 251 { 252 return -EINVAL; 253 } 254 #endif 255 256 /* 257 * Embedding allocator 258 * 259 * The first chunk is sized to just contain the static area plus 260 * module and dynamic reserves and embedded into linear physical 261 * mapping so that it can use PMD mapping without additional TLB 262 * pressure. 263 */ 264 static ssize_t __init setup_pcpu_embed(size_t static_size) 265 { 266 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; 267 268 /* 269 * If large page isn't supported, there's no benefit in doing 270 * this. Also, embedding allocation doesn't play well with 271 * NUMA. 272 */ 273 if (!cpu_has_pse || pcpu_need_numa()) 274 return -EINVAL; 275 276 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE, 277 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1); 278 } 279 280 /* 281 * 4k page allocator 282 * 283 * This is the basic allocator. Static percpu area is allocated 284 * page-by-page and most of initialization is done by the generic 285 * setup function. 286 */ 287 static struct page **pcpu4k_pages __initdata; 288 static int pcpu4k_nr_static_pages __initdata; 289 290 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) 291 { 292 if (pageno < pcpu4k_nr_static_pages) 293 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; 294 return NULL; 295 } 296 297 static void __init pcpu4k_populate_pte(unsigned long addr) 298 { 299 populate_extra_pte(addr); 300 } 301 302 static ssize_t __init setup_pcpu_4k(size_t static_size) 303 { 304 size_t pages_size; 305 unsigned int cpu; 306 int i, j; 307 ssize_t ret; 308 309 pcpu4k_nr_static_pages = PFN_UP(static_size); 310 311 /* unaligned allocations can't be freed, round up to page size */ 312 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() 313 * sizeof(pcpu4k_pages[0])); 314 pcpu4k_pages = alloc_bootmem(pages_size); 315 316 /* allocate and copy */ 317 j = 0; 318 for_each_possible_cpu(cpu) 319 for (i = 0; i < pcpu4k_nr_static_pages; i++) { 320 void *ptr; 321 322 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); 323 if (!ptr) 324 goto enomem; 325 326 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); 327 pcpu4k_pages[j++] = virt_to_page(ptr); 328 } 329 330 /* we're ready, commit */ 331 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", 332 pcpu4k_nr_static_pages, static_size); 333 334 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 335 PERCPU_FIRST_CHUNK_RESERVE, -1, 336 -1, NULL, pcpu4k_populate_pte); 337 goto out_free_ar; 338 339 enomem: 340 while (--j >= 0) 341 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); 342 ret = -ENOMEM; 343 out_free_ar: 344 free_bootmem(__pa(pcpu4k_pages), pages_size); 345 return ret; 346 } 347 348 static inline void setup_percpu_segment(int cpu) 349 { 350 #ifdef CONFIG_X86_32 351 struct desc_struct gdt; 352 353 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, 354 0x2 | DESCTYPE_S, 0x8); 355 gdt.s = 1; 356 write_gdt_entry(get_cpu_gdt_table(cpu), 357 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); 358 #endif 359 } 360 361 /* 362 * Great future plan: 363 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. 364 * Always point %gs to its beginning 365 */ 366 void __init setup_per_cpu_areas(void) 367 { 368 size_t static_size = __per_cpu_end - __per_cpu_start; 369 unsigned int cpu; 370 unsigned long delta; 371 size_t pcpu_unit_size; 372 ssize_t ret; 373 374 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 375 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 376 377 /* 378 * Allocate percpu area. If PSE is supported, try to make use 379 * of large page mappings. Please read comments on top of 380 * each allocator for details. 381 */ 382 ret = setup_pcpu_remap(static_size); 383 if (ret < 0) 384 ret = setup_pcpu_embed(static_size); 385 if (ret < 0) 386 ret = setup_pcpu_4k(static_size); 387 if (ret < 0) 388 panic("cannot allocate static percpu area (%zu bytes, err=%zd)", 389 static_size, ret); 390 391 pcpu_unit_size = ret; 392 393 /* alrighty, percpu areas up and running */ 394 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 395 for_each_possible_cpu(cpu) { 396 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 397 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 398 per_cpu(cpu_number, cpu) = cpu; 399 setup_percpu_segment(cpu); 400 setup_stack_canary_segment(cpu); 401 /* 402 * Copy data used in early init routines from the 403 * initial arrays to the per cpu data areas. These 404 * arrays then become expendable and the *_early_ptr's 405 * are zeroed indicating that the static arrays are 406 * gone. 407 */ 408 #ifdef CONFIG_X86_LOCAL_APIC 409 per_cpu(x86_cpu_to_apicid, cpu) = 410 early_per_cpu_map(x86_cpu_to_apicid, cpu); 411 per_cpu(x86_bios_cpu_apicid, cpu) = 412 early_per_cpu_map(x86_bios_cpu_apicid, cpu); 413 #endif 414 #ifdef CONFIG_X86_64 415 per_cpu(irq_stack_ptr, cpu) = 416 per_cpu(irq_stack_union.irq_stack, cpu) + 417 IRQ_STACK_SIZE - 64; 418 #ifdef CONFIG_NUMA 419 per_cpu(x86_cpu_to_node_map, cpu) = 420 early_per_cpu_map(x86_cpu_to_node_map, cpu); 421 #endif 422 #endif 423 /* 424 * Up to this point, the boot CPU has been using .data.init 425 * area. Reload any changed state for the boot CPU. 426 */ 427 if (cpu == boot_cpu_id) 428 switch_to_new_gdt(cpu); 429 } 430 431 /* indicate the early static arrays will soon be gone */ 432 #ifdef CONFIG_X86_LOCAL_APIC 433 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 434 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 435 #endif 436 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) 437 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; 438 #endif 439 440 /* Setup node to cpumask map */ 441 setup_node_to_cpumask_map(); 442 443 /* Setup cpu initialized, callin, callout masks */ 444 setup_cpu_local_masks(); 445 } 446