1 /* 2 * Generic VM initialization for x86-64 NUMA setups. 3 * Copyright 2002,2003 Andi Kleen, SuSE Labs. 4 */ 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 #include <linux/string.h> 8 #include <linux/init.h> 9 #include <linux/bootmem.h> 10 #include <linux/mmzone.h> 11 #include <linux/ctype.h> 12 #include <linux/module.h> 13 #include <linux/nodemask.h> 14 #include <linux/sched.h> 15 16 #include <asm/e820.h> 17 #include <asm/proto.h> 18 #include <asm/dma.h> 19 #include <asm/numa.h> 20 #include <asm/acpi.h> 21 #include <asm/k8.h> 22 23 #ifndef Dprintk 24 #define Dprintk(x...) 25 #endif 26 27 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; 28 EXPORT_SYMBOL(node_data); 29 30 bootmem_data_t plat_node_bdata[MAX_NUMNODES]; 31 32 struct memnode memnode; 33 34 #ifdef CONFIG_SMP 35 int x86_cpu_to_node_map_init[NR_CPUS] = { 36 [0 ... NR_CPUS-1] = NUMA_NO_NODE 37 }; 38 void *x86_cpu_to_node_map_early_ptr; 39 EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr); 40 #endif 41 DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE; 42 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map); 43 44 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { 45 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 46 }; 47 48 cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly; 49 EXPORT_SYMBOL(node_to_cpumask_map); 50 51 int numa_off __initdata; 52 unsigned long __initdata nodemap_addr; 53 unsigned long __initdata nodemap_size; 54 55 /* 56 * Given a shift value, try to populate memnodemap[] 57 * Returns : 58 * 1 if OK 59 * 0 if memnodmap[] too small (of shift too small) 60 * -1 if node overlap or lost ram (shift too big) 61 */ 62 static int __init populate_memnodemap(const struct bootnode *nodes, 63 int numnodes, int shift) 64 { 65 unsigned long addr, end; 66 int i, res = -1; 67 68 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize); 69 for (i = 0; i < numnodes; i++) { 70 addr = nodes[i].start; 71 end = nodes[i].end; 72 if (addr >= end) 73 continue; 74 if ((end >> shift) >= memnodemapsize) 75 return 0; 76 do { 77 if (memnodemap[addr >> shift] != NUMA_NO_NODE) 78 return -1; 79 memnodemap[addr >> shift] = i; 80 addr += (1UL << shift); 81 } while (addr < end); 82 res = 1; 83 } 84 return res; 85 } 86 87 static int __init allocate_cachealigned_memnodemap(void) 88 { 89 unsigned long addr; 90 91 memnodemap = memnode.embedded_map; 92 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map)) 93 return 0; 94 95 addr = 0x8000; 96 nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES); 97 nodemap_addr = find_e820_area(addr, end_pfn<<PAGE_SHIFT, 98 nodemap_size, L1_CACHE_BYTES); 99 if (nodemap_addr == -1UL) { 100 printk(KERN_ERR 101 "NUMA: Unable to allocate Memory to Node hash map\n"); 102 nodemap_addr = nodemap_size = 0; 103 return -1; 104 } 105 memnodemap = phys_to_virt(nodemap_addr); 106 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP"); 107 108 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n", 109 nodemap_addr, nodemap_addr + nodemap_size); 110 return 0; 111 } 112 113 /* 114 * The LSB of all start and end addresses in the node map is the value of the 115 * maximum possible shift. 116 */ 117 static int __init extract_lsb_from_nodes(const struct bootnode *nodes, 118 int numnodes) 119 { 120 int i, nodes_used = 0; 121 unsigned long start, end; 122 unsigned long bitfield = 0, memtop = 0; 123 124 for (i = 0; i < numnodes; i++) { 125 start = nodes[i].start; 126 end = nodes[i].end; 127 if (start >= end) 128 continue; 129 bitfield |= start; 130 nodes_used++; 131 if (end > memtop) 132 memtop = end; 133 } 134 if (nodes_used <= 1) 135 i = 63; 136 else 137 i = find_first_bit(&bitfield, sizeof(unsigned long)*8); 138 memnodemapsize = (memtop >> i)+1; 139 return i; 140 } 141 142 int __init compute_hash_shift(struct bootnode *nodes, int numnodes) 143 { 144 int shift; 145 146 shift = extract_lsb_from_nodes(nodes, numnodes); 147 if (allocate_cachealigned_memnodemap()) 148 return -1; 149 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n", 150 shift); 151 152 if (populate_memnodemap(nodes, numnodes, shift) != 1) { 153 printk(KERN_INFO "Your memory is not aligned you need to " 154 "rebuild your kernel with a bigger NODEMAPSIZE " 155 "shift=%d\n", shift); 156 return -1; 157 } 158 return shift; 159 } 160 161 int early_pfn_to_nid(unsigned long pfn) 162 { 163 return phys_to_nid(pfn << PAGE_SHIFT); 164 } 165 166 static void * __init early_node_mem(int nodeid, unsigned long start, 167 unsigned long end, unsigned long size, 168 unsigned long align) 169 { 170 unsigned long mem = find_e820_area(start, end, size, align); 171 void *ptr; 172 173 if (mem != -1L) 174 return __va(mem); 175 176 ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 177 if (ptr == NULL) { 178 printk(KERN_ERR "Cannot find %lu bytes in node %d\n", 179 size, nodeid); 180 return NULL; 181 } 182 return ptr; 183 } 184 185 /* Initialize bootmem allocator for a node */ 186 void __init setup_node_bootmem(int nodeid, unsigned long start, 187 unsigned long end) 188 { 189 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size; 190 unsigned long bootmap_start, nodedata_phys; 191 void *bootmap; 192 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE); 193 194 start = round_up(start, ZONE_ALIGN); 195 196 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, 197 start, end); 198 199 start_pfn = start >> PAGE_SHIFT; 200 end_pfn = end >> PAGE_SHIFT; 201 202 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size, 203 SMP_CACHE_BYTES); 204 if (node_data[nodeid] == NULL) 205 return; 206 nodedata_phys = __pa(node_data[nodeid]); 207 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys, 208 nodedata_phys + pgdat_size - 1); 209 210 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t)); 211 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid]; 212 NODE_DATA(nodeid)->node_start_pfn = start_pfn; 213 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn; 214 215 /* Find a place for the bootmem map */ 216 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 217 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE); 218 /* 219 * SMP_CAHCE_BYTES could be enough, but init_bootmem_node like 220 * to use that to align to PAGE_SIZE 221 */ 222 bootmap = early_node_mem(nodeid, bootmap_start, end, 223 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE); 224 if (bootmap == NULL) { 225 if (nodedata_phys < start || nodedata_phys >= end) 226 free_bootmem(nodedata_phys, pgdat_size); 227 node_data[nodeid] = NULL; 228 return; 229 } 230 bootmap_start = __pa(bootmap); 231 232 bootmap_size = init_bootmem_node(NODE_DATA(nodeid), 233 bootmap_start >> PAGE_SHIFT, 234 start_pfn, end_pfn); 235 236 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n", 237 bootmap_start, bootmap_start + bootmap_size - 1, 238 bootmap_pages); 239 240 free_bootmem_with_active_regions(nodeid, end); 241 242 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size, 243 BOOTMEM_DEFAULT); 244 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start, 245 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT); 246 #ifdef CONFIG_ACPI_NUMA 247 srat_reserve_add_area(nodeid); 248 #endif 249 node_set_online(nodeid); 250 } 251 252 /* 253 * There are unfortunately some poorly designed mainboards around that 254 * only connect memory to a single CPU. This breaks the 1:1 cpu->node 255 * mapping. To avoid this fill in the mapping for all possible CPUs, 256 * as the number of CPUs is not known yet. We round robin the existing 257 * nodes. 258 */ 259 void __init numa_init_array(void) 260 { 261 int rr, i; 262 263 rr = first_node(node_online_map); 264 for (i = 0; i < NR_CPUS; i++) { 265 if (early_cpu_to_node(i) != NUMA_NO_NODE) 266 continue; 267 numa_set_node(i, rr); 268 rr = next_node(rr, node_online_map); 269 if (rr == MAX_NUMNODES) 270 rr = first_node(node_online_map); 271 } 272 } 273 274 #ifdef CONFIG_NUMA_EMU 275 /* Numa emulation */ 276 char *cmdline __initdata; 277 278 /* 279 * Setups up nid to range from addr to addr + size. If the end 280 * boundary is greater than max_addr, then max_addr is used instead. 281 * The return value is 0 if there is additional memory left for 282 * allocation past addr and -1 otherwise. addr is adjusted to be at 283 * the end of the node. 284 */ 285 static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr, 286 u64 size, u64 max_addr) 287 { 288 int ret = 0; 289 290 nodes[nid].start = *addr; 291 *addr += size; 292 if (*addr >= max_addr) { 293 *addr = max_addr; 294 ret = -1; 295 } 296 nodes[nid].end = *addr; 297 node_set(nid, node_possible_map); 298 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, 299 nodes[nid].start, nodes[nid].end, 300 (nodes[nid].end - nodes[nid].start) >> 20); 301 return ret; 302 } 303 304 /* 305 * Splits num_nodes nodes up equally starting at node_start. The return value 306 * is the number of nodes split up and addr is adjusted to be at the end of the 307 * last node allocated. 308 */ 309 static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr, 310 u64 max_addr, int node_start, 311 int num_nodes) 312 { 313 unsigned int big; 314 u64 size; 315 int i; 316 317 if (num_nodes <= 0) 318 return -1; 319 if (num_nodes > MAX_NUMNODES) 320 num_nodes = MAX_NUMNODES; 321 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) / 322 num_nodes; 323 /* 324 * Calculate the number of big nodes that can be allocated as a result 325 * of consolidating the leftovers. 326 */ 327 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) / 328 FAKE_NODE_MIN_SIZE; 329 330 /* Round down to nearest FAKE_NODE_MIN_SIZE. */ 331 size &= FAKE_NODE_MIN_HASH_MASK; 332 if (!size) { 333 printk(KERN_ERR "Not enough memory for each node. " 334 "NUMA emulation disabled.\n"); 335 return -1; 336 } 337 338 for (i = node_start; i < num_nodes + node_start; i++) { 339 u64 end = *addr + size; 340 341 if (i < big) 342 end += FAKE_NODE_MIN_SIZE; 343 /* 344 * The final node can have the remaining system RAM. Other 345 * nodes receive roughly the same amount of available pages. 346 */ 347 if (i == num_nodes + node_start - 1) 348 end = max_addr; 349 else 350 while (end - *addr - e820_hole_size(*addr, end) < 351 size) { 352 end += FAKE_NODE_MIN_SIZE; 353 if (end > max_addr) { 354 end = max_addr; 355 break; 356 } 357 } 358 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0) 359 break; 360 } 361 return i - node_start + 1; 362 } 363 364 /* 365 * Splits the remaining system RAM into chunks of size. The remaining memory is 366 * always assigned to a final node and can be asymmetric. Returns the number of 367 * nodes split. 368 */ 369 static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr, 370 u64 max_addr, int node_start, u64 size) 371 { 372 int i = node_start; 373 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK; 374 while (!setup_node_range(i++, nodes, addr, size, max_addr)) 375 ; 376 return i - node_start; 377 } 378 379 /* 380 * Sets up the system RAM area from start_pfn to end_pfn according to the 381 * numa=fake command-line option. 382 */ 383 static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn) 384 { 385 struct bootnode nodes[MAX_NUMNODES]; 386 u64 size, addr = start_pfn << PAGE_SHIFT; 387 u64 max_addr = end_pfn << PAGE_SHIFT; 388 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i; 389 390 memset(&nodes, 0, sizeof(nodes)); 391 /* 392 * If the numa=fake command-line is just a single number N, split the 393 * system RAM into N fake nodes. 394 */ 395 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) { 396 long n = simple_strtol(cmdline, NULL, 0); 397 398 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n); 399 if (num_nodes < 0) 400 return num_nodes; 401 goto out; 402 } 403 404 /* Parse the command line. */ 405 for (coeff_flag = 0; ; cmdline++) { 406 if (*cmdline && isdigit(*cmdline)) { 407 num = num * 10 + *cmdline - '0'; 408 continue; 409 } 410 if (*cmdline == '*') { 411 if (num > 0) 412 coeff = num; 413 coeff_flag = 1; 414 } 415 if (!*cmdline || *cmdline == ',') { 416 if (!coeff_flag) 417 coeff = 1; 418 /* 419 * Round down to the nearest FAKE_NODE_MIN_SIZE. 420 * Command-line coefficients are in megabytes. 421 */ 422 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK; 423 if (size) 424 for (i = 0; i < coeff; i++, num_nodes++) 425 if (setup_node_range(num_nodes, nodes, 426 &addr, size, max_addr) < 0) 427 goto done; 428 if (!*cmdline) 429 break; 430 coeff_flag = 0; 431 coeff = -1; 432 } 433 num = 0; 434 } 435 done: 436 if (!num_nodes) 437 return -1; 438 /* Fill remainder of system RAM, if appropriate. */ 439 if (addr < max_addr) { 440 if (coeff_flag && coeff < 0) { 441 /* Split remaining nodes into num-sized chunks */ 442 num_nodes += split_nodes_by_size(nodes, &addr, max_addr, 443 num_nodes, num); 444 goto out; 445 } 446 switch (*(cmdline - 1)) { 447 case '*': 448 /* Split remaining nodes into coeff chunks */ 449 if (coeff <= 0) 450 break; 451 num_nodes += split_nodes_equally(nodes, &addr, max_addr, 452 num_nodes, coeff); 453 break; 454 case ',': 455 /* Do not allocate remaining system RAM */ 456 break; 457 default: 458 /* Give one final node */ 459 setup_node_range(num_nodes, nodes, &addr, 460 max_addr - addr, max_addr); 461 num_nodes++; 462 } 463 } 464 out: 465 memnode_shift = compute_hash_shift(nodes, num_nodes); 466 if (memnode_shift < 0) { 467 memnode_shift = 0; 468 printk(KERN_ERR "No NUMA hash function found. NUMA emulation " 469 "disabled.\n"); 470 return -1; 471 } 472 473 /* 474 * We need to vacate all active ranges that may have been registered by 475 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns 476 * true. NUMA emulation has succeeded so we will not scan ACPI nodes. 477 */ 478 remove_all_active_ranges(); 479 #ifdef CONFIG_ACPI_NUMA 480 acpi_numa = -1; 481 #endif 482 for_each_node_mask(i, node_possible_map) { 483 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, 484 nodes[i].end >> PAGE_SHIFT); 485 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 486 } 487 acpi_fake_nodes(nodes, num_nodes); 488 numa_init_array(); 489 return 0; 490 } 491 #endif /* CONFIG_NUMA_EMU */ 492 493 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) 494 { 495 int i; 496 497 nodes_clear(node_possible_map); 498 nodes_clear(node_online_map); 499 500 #ifdef CONFIG_NUMA_EMU 501 if (cmdline && !numa_emulation(start_pfn, end_pfn)) 502 return; 503 nodes_clear(node_possible_map); 504 nodes_clear(node_online_map); 505 #endif 506 507 #ifdef CONFIG_ACPI_NUMA 508 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT, 509 end_pfn << PAGE_SHIFT)) 510 return; 511 nodes_clear(node_possible_map); 512 nodes_clear(node_online_map); 513 #endif 514 515 #ifdef CONFIG_K8_NUMA 516 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT, 517 end_pfn<<PAGE_SHIFT)) 518 return; 519 nodes_clear(node_possible_map); 520 nodes_clear(node_online_map); 521 #endif 522 printk(KERN_INFO "%s\n", 523 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 524 525 printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 526 start_pfn << PAGE_SHIFT, 527 end_pfn << PAGE_SHIFT); 528 /* setup dummy node covering all memory */ 529 memnode_shift = 63; 530 memnodemap = memnode.embedded_map; 531 memnodemap[0] = 0; 532 node_set_online(0); 533 node_set(0, node_possible_map); 534 for (i = 0; i < NR_CPUS; i++) 535 numa_set_node(i, 0); 536 /* cpumask_of_cpu() may not be available during early startup */ 537 memset(&node_to_cpumask_map[0], 0, sizeof(node_to_cpumask_map[0])); 538 cpu_set(0, node_to_cpumask_map[0]); 539 e820_register_active_regions(0, start_pfn, end_pfn); 540 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT); 541 } 542 543 __cpuinit void numa_add_cpu(int cpu) 544 { 545 set_bit(cpu, 546 (unsigned long *)&node_to_cpumask_map[early_cpu_to_node(cpu)]); 547 } 548 549 void __cpuinit numa_set_node(int cpu, int node) 550 { 551 int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr; 552 553 if(cpu_to_node_map) 554 cpu_to_node_map[cpu] = node; 555 else if(per_cpu_offset(cpu)) 556 per_cpu(x86_cpu_to_node_map, cpu) = node; 557 else 558 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu); 559 } 560 561 unsigned long __init numa_free_all_bootmem(void) 562 { 563 unsigned long pages = 0; 564 int i; 565 566 for_each_online_node(i) 567 pages += free_all_bootmem_node(NODE_DATA(i)); 568 569 return pages; 570 } 571 572 void __init paging_init(void) 573 { 574 unsigned long max_zone_pfns[MAX_NR_ZONES]; 575 576 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 577 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; 578 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; 579 max_zone_pfns[ZONE_NORMAL] = end_pfn; 580 581 sparse_memory_present_with_active_regions(MAX_NUMNODES); 582 sparse_init(); 583 584 free_area_init_nodes(max_zone_pfns); 585 } 586 587 static __init int numa_setup(char *opt) 588 { 589 if (!opt) 590 return -EINVAL; 591 if (!strncmp(opt, "off", 3)) 592 numa_off = 1; 593 #ifdef CONFIG_NUMA_EMU 594 if (!strncmp(opt, "fake=", 5)) 595 cmdline = opt + 5; 596 #endif 597 #ifdef CONFIG_ACPI_NUMA 598 if (!strncmp(opt, "noacpi", 6)) 599 acpi_numa = -1; 600 if (!strncmp(opt, "hotadd=", 7)) 601 hotadd_percent = simple_strtoul(opt+7, NULL, 10); 602 #endif 603 return 0; 604 } 605 early_param("numa", numa_setup); 606 607 /* 608 * Setup early cpu_to_node. 609 * 610 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 611 * and apicid_to_node[] tables have valid entries for a CPU. 612 * This means we skip cpu_to_node[] initialisation for NUMA 613 * emulation and faking node case (when running a kernel compiled 614 * for NUMA on a non NUMA box), which is OK as cpu_to_node[] 615 * is already initialized in a round robin manner at numa_init_array, 616 * prior to this call, and this initialization is good enough 617 * for the fake NUMA cases. 618 */ 619 void __init init_cpu_to_node(void) 620 { 621 int i; 622 623 for (i = 0; i < NR_CPUS; i++) { 624 int node; 625 u16 apicid = x86_cpu_to_apicid_init[i]; 626 627 if (apicid == BAD_APICID) 628 continue; 629 node = apicid_to_node[apicid]; 630 if (node == NUMA_NO_NODE) 631 continue; 632 if (!node_online(node)) 633 continue; 634 numa_set_node(i, node); 635 } 636 } 637 638 639