1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV APIC functions (note: not an Intel compatible APIC) 7 * 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 9 */ 10 #include <linux/cpumask.h> 11 #include <linux/hardirq.h> 12 #include <linux/proc_fs.h> 13 #include <linux/threads.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/string.h> 17 #include <linux/ctype.h> 18 #include <linux/sched.h> 19 #include <linux/timer.h> 20 #include <linux/cpu.h> 21 #include <linux/init.h> 22 #include <linux/io.h> 23 24 #include <asm/uv/uv_mmrs.h> 25 #include <asm/uv/uv_hub.h> 26 #include <asm/current.h> 27 #include <asm/pgtable.h> 28 #include <asm/uv/bios.h> 29 #include <asm/uv/uv.h> 30 #include <asm/apic.h> 31 #include <asm/ipi.h> 32 #include <asm/smp.h> 33 #include <asm/x86_init.h> 34 35 DEFINE_PER_CPU(int, x2apic_extra_bits); 36 37 static enum uv_system_type uv_system_type; 38 static u64 gru_start_paddr, gru_end_paddr; 39 int uv_min_hub_revision_id; 40 EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); 41 42 static inline bool is_GRU_range(u64 start, u64 end) 43 { 44 return start >= gru_start_paddr && end <= gru_end_paddr; 45 } 46 47 static bool uv_is_untracked_pat_range(u64 start, u64 end) 48 { 49 return is_ISA_range(start, end) || is_GRU_range(start, end); 50 } 51 52 static int early_get_nodeid(void) 53 { 54 union uvh_node_id_u node_id; 55 unsigned long *mmr; 56 57 mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); 58 node_id.v = *mmr; 59 early_iounmap(mmr, sizeof(*mmr)); 60 61 /* Currently, all blades have same revision number */ 62 uv_min_hub_revision_id = node_id.s.revision; 63 64 return node_id.s.node_id; 65 } 66 67 static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 68 { 69 int nodeid; 70 71 if (!strcmp(oem_id, "SGI")) { 72 nodeid = early_get_nodeid(); 73 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 74 if (!strcmp(oem_table_id, "UVL")) 75 uv_system_type = UV_LEGACY_APIC; 76 else if (!strcmp(oem_table_id, "UVX")) 77 uv_system_type = UV_X2APIC; 78 else if (!strcmp(oem_table_id, "UVH")) { 79 __get_cpu_var(x2apic_extra_bits) = 80 nodeid << (UV_APIC_PNODE_SHIFT - 1); 81 uv_system_type = UV_NON_UNIQUE_APIC; 82 return 1; 83 } 84 } 85 return 0; 86 } 87 88 enum uv_system_type get_uv_system_type(void) 89 { 90 return uv_system_type; 91 } 92 93 int is_uv_system(void) 94 { 95 return uv_system_type != UV_NONE; 96 } 97 EXPORT_SYMBOL_GPL(is_uv_system); 98 99 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 100 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); 101 102 struct uv_blade_info *uv_blade_info; 103 EXPORT_SYMBOL_GPL(uv_blade_info); 104 105 short *uv_node_to_blade; 106 EXPORT_SYMBOL_GPL(uv_node_to_blade); 107 108 short *uv_cpu_to_blade; 109 EXPORT_SYMBOL_GPL(uv_cpu_to_blade); 110 111 short uv_possible_blades; 112 EXPORT_SYMBOL_GPL(uv_possible_blades); 113 114 unsigned long sn_rtc_cycles_per_second; 115 EXPORT_SYMBOL(sn_rtc_cycles_per_second); 116 117 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 118 119 static const struct cpumask *uv_target_cpus(void) 120 { 121 return cpumask_of(0); 122 } 123 124 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) 125 { 126 cpumask_clear(retmask); 127 cpumask_set_cpu(cpu, retmask); 128 } 129 130 static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 131 { 132 #ifdef CONFIG_SMP 133 unsigned long val; 134 int pnode; 135 136 pnode = uv_apicid_to_pnode(phys_apicid); 137 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 138 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 139 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 140 APIC_DM_INIT; 141 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 142 mdelay(10); 143 144 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 145 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 146 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 147 APIC_DM_STARTUP; 148 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 149 150 atomic_set(&init_deasserted, 1); 151 #endif 152 return 0; 153 } 154 155 static void uv_send_IPI_one(int cpu, int vector) 156 { 157 unsigned long apicid; 158 int pnode; 159 160 apicid = per_cpu(x86_cpu_to_apicid, cpu); 161 pnode = uv_apicid_to_pnode(apicid); 162 uv_hub_send_ipi(pnode, apicid, vector); 163 } 164 165 static void uv_send_IPI_mask(const struct cpumask *mask, int vector) 166 { 167 unsigned int cpu; 168 169 for_each_cpu(cpu, mask) 170 uv_send_IPI_one(cpu, vector); 171 } 172 173 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 174 { 175 unsigned int this_cpu = smp_processor_id(); 176 unsigned int cpu; 177 178 for_each_cpu(cpu, mask) { 179 if (cpu != this_cpu) 180 uv_send_IPI_one(cpu, vector); 181 } 182 } 183 184 static void uv_send_IPI_allbutself(int vector) 185 { 186 unsigned int this_cpu = smp_processor_id(); 187 unsigned int cpu; 188 189 for_each_online_cpu(cpu) { 190 if (cpu != this_cpu) 191 uv_send_IPI_one(cpu, vector); 192 } 193 } 194 195 static void uv_send_IPI_all(int vector) 196 { 197 uv_send_IPI_mask(cpu_online_mask, vector); 198 } 199 200 static int uv_apic_id_registered(void) 201 { 202 return 1; 203 } 204 205 static void uv_init_apic_ldr(void) 206 { 207 } 208 209 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 210 { 211 /* 212 * We're using fixed IRQ delivery, can only return one phys APIC ID. 213 * May as well be the first. 214 */ 215 int cpu = cpumask_first(cpumask); 216 217 if ((unsigned)cpu < nr_cpu_ids) 218 return per_cpu(x86_cpu_to_apicid, cpu); 219 else 220 return BAD_APICID; 221 } 222 223 static unsigned int 224 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 225 const struct cpumask *andmask) 226 { 227 int cpu; 228 229 /* 230 * We're using fixed IRQ delivery, can only return one phys APIC ID. 231 * May as well be the first. 232 */ 233 for_each_cpu_and(cpu, cpumask, andmask) { 234 if (cpumask_test_cpu(cpu, cpu_online_mask)) 235 break; 236 } 237 return per_cpu(x86_cpu_to_apicid, cpu); 238 } 239 240 static unsigned int x2apic_get_apic_id(unsigned long x) 241 { 242 unsigned int id; 243 244 WARN_ON(preemptible() && num_online_cpus() > 1); 245 id = x | __get_cpu_var(x2apic_extra_bits); 246 247 return id; 248 } 249 250 static unsigned long set_apic_id(unsigned int id) 251 { 252 unsigned long x; 253 254 /* maskout x2apic_extra_bits ? */ 255 x = id; 256 return x; 257 } 258 259 static unsigned int uv_read_apic_id(void) 260 { 261 262 return x2apic_get_apic_id(apic_read(APIC_ID)); 263 } 264 265 static int uv_phys_pkg_id(int initial_apicid, int index_msb) 266 { 267 return uv_read_apic_id() >> index_msb; 268 } 269 270 static void uv_send_IPI_self(int vector) 271 { 272 apic_write(APIC_SELF_IPI, vector); 273 } 274 275 struct apic __refdata apic_x2apic_uv_x = { 276 277 .name = "UV large system", 278 .probe = NULL, 279 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 280 .apic_id_registered = uv_apic_id_registered, 281 282 .irq_delivery_mode = dest_Fixed, 283 .irq_dest_mode = 0, /* physical */ 284 285 .target_cpus = uv_target_cpus, 286 .disable_esr = 0, 287 .dest_logical = APIC_DEST_LOGICAL, 288 .check_apicid_used = NULL, 289 .check_apicid_present = NULL, 290 291 .vector_allocation_domain = uv_vector_allocation_domain, 292 .init_apic_ldr = uv_init_apic_ldr, 293 294 .ioapic_phys_id_map = NULL, 295 .setup_apic_routing = NULL, 296 .multi_timer_check = NULL, 297 .apicid_to_node = NULL, 298 .cpu_to_logical_apicid = NULL, 299 .cpu_present_to_apicid = default_cpu_present_to_apicid, 300 .apicid_to_cpu_present = NULL, 301 .setup_portio_remap = NULL, 302 .check_phys_apicid_present = default_check_phys_apicid_present, 303 .enable_apic_mode = NULL, 304 .phys_pkg_id = uv_phys_pkg_id, 305 .mps_oem_check = NULL, 306 307 .get_apic_id = x2apic_get_apic_id, 308 .set_apic_id = set_apic_id, 309 .apic_id_mask = 0xFFFFFFFFu, 310 311 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 312 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 313 314 .send_IPI_mask = uv_send_IPI_mask, 315 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 316 .send_IPI_allbutself = uv_send_IPI_allbutself, 317 .send_IPI_all = uv_send_IPI_all, 318 .send_IPI_self = uv_send_IPI_self, 319 320 .wakeup_secondary_cpu = uv_wakeup_secondary, 321 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 322 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 323 .wait_for_init_deassert = NULL, 324 .smp_callin_clear_local_apic = NULL, 325 .inquire_remote_apic = NULL, 326 327 .read = native_apic_msr_read, 328 .write = native_apic_msr_write, 329 .icr_read = native_x2apic_icr_read, 330 .icr_write = native_x2apic_icr_write, 331 .wait_icr_idle = native_x2apic_wait_icr_idle, 332 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 333 }; 334 335 static __cpuinit void set_x2apic_extra_bits(int pnode) 336 { 337 __get_cpu_var(x2apic_extra_bits) = (pnode << 6); 338 } 339 340 /* 341 * Called on boot cpu. 342 */ 343 static __init int boot_pnode_to_blade(int pnode) 344 { 345 int blade; 346 347 for (blade = 0; blade < uv_num_possible_blades(); blade++) 348 if (pnode == uv_blade_info[blade].pnode) 349 return blade; 350 BUG(); 351 } 352 353 struct redir_addr { 354 unsigned long redirect; 355 unsigned long alias; 356 }; 357 358 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 359 360 static __initdata struct redir_addr redir_addrs[] = { 361 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, 362 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, 363 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, 364 }; 365 366 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 367 { 368 union uvh_si_alias0_overlay_config_u alias; 369 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 370 int i; 371 372 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { 373 alias.v = uv_read_local_mmr(redir_addrs[i].alias); 374 if (alias.s.enable && alias.s.base == 0) { 375 *size = (1UL << alias.s.m_alias); 376 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); 377 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; 378 return; 379 } 380 } 381 *base = *size = 0; 382 } 383 384 enum map_type {map_wb, map_uc}; 385 386 static __init void map_high(char *id, unsigned long base, int pshift, 387 int bshift, int max_pnode, enum map_type map_type) 388 { 389 unsigned long bytes, paddr; 390 391 paddr = base << pshift; 392 bytes = (1UL << bshift) * (max_pnode + 1); 393 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 394 paddr + bytes); 395 if (map_type == map_uc) 396 init_extra_mapping_uc(paddr, bytes); 397 else 398 init_extra_mapping_wb(paddr, bytes); 399 400 } 401 static __init void map_gru_high(int max_pnode) 402 { 403 union uvh_rh_gam_gru_overlay_config_mmr_u gru; 404 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; 405 406 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 407 if (gru.s.enable) { 408 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); 409 gru_start_paddr = ((u64)gru.s.base << shift); 410 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 411 412 } 413 } 414 415 static __init void map_mmr_high(int max_pnode) 416 { 417 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; 418 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; 419 420 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 421 if (mmr.s.enable) 422 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); 423 } 424 425 static __init void map_mmioh_high(int max_pnode) 426 { 427 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 428 int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 429 430 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 431 if (mmioh.s.enable) 432 map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io, 433 max_pnode, map_uc); 434 } 435 436 static __init void map_low_mmrs(void) 437 { 438 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); 439 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); 440 } 441 442 static __init void uv_rtc_init(void) 443 { 444 long status; 445 u64 ticks_per_sec; 446 447 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, 448 &ticks_per_sec); 449 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { 450 printk(KERN_WARNING 451 "unable to determine platform RTC clock frequency, " 452 "guessing.\n"); 453 /* BIOS gives wrong value for clock freq. so guess */ 454 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; 455 } else 456 sn_rtc_cycles_per_second = ticks_per_sec; 457 } 458 459 /* 460 * percpu heartbeat timer 461 */ 462 static void uv_heartbeat(unsigned long ignored) 463 { 464 struct timer_list *timer = &uv_hub_info->scir.timer; 465 unsigned char bits = uv_hub_info->scir.state; 466 467 /* flip heartbeat bit */ 468 bits ^= SCIR_CPU_HEARTBEAT; 469 470 /* is this cpu idle? */ 471 if (idle_cpu(raw_smp_processor_id())) 472 bits &= ~SCIR_CPU_ACTIVITY; 473 else 474 bits |= SCIR_CPU_ACTIVITY; 475 476 /* update system controller interface reg */ 477 uv_set_scir_bits(bits); 478 479 /* enable next timer period */ 480 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 481 } 482 483 static void __cpuinit uv_heartbeat_enable(int cpu) 484 { 485 if (!uv_cpu_hub_info(cpu)->scir.enabled) { 486 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 487 488 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); 489 setup_timer(timer, uv_heartbeat, cpu); 490 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; 491 add_timer_on(timer, cpu); 492 uv_cpu_hub_info(cpu)->scir.enabled = 1; 493 } 494 495 /* check boot cpu */ 496 if (!uv_cpu_hub_info(0)->scir.enabled) 497 uv_heartbeat_enable(0); 498 } 499 500 #ifdef CONFIG_HOTPLUG_CPU 501 static void __cpuinit uv_heartbeat_disable(int cpu) 502 { 503 if (uv_cpu_hub_info(cpu)->scir.enabled) { 504 uv_cpu_hub_info(cpu)->scir.enabled = 0; 505 del_timer(&uv_cpu_hub_info(cpu)->scir.timer); 506 } 507 uv_set_cpu_scir_bits(cpu, 0xff); 508 } 509 510 /* 511 * cpu hotplug notifier 512 */ 513 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 514 unsigned long action, void *hcpu) 515 { 516 long cpu = (long)hcpu; 517 518 switch (action) { 519 case CPU_ONLINE: 520 uv_heartbeat_enable(cpu); 521 break; 522 case CPU_DOWN_PREPARE: 523 uv_heartbeat_disable(cpu); 524 break; 525 default: 526 break; 527 } 528 return NOTIFY_OK; 529 } 530 531 static __init void uv_scir_register_cpu_notifier(void) 532 { 533 hotcpu_notifier(uv_scir_cpu_notify, 0); 534 } 535 536 #else /* !CONFIG_HOTPLUG_CPU */ 537 538 static __init void uv_scir_register_cpu_notifier(void) 539 { 540 } 541 542 static __init int uv_init_heartbeat(void) 543 { 544 int cpu; 545 546 if (is_uv_system()) 547 for_each_online_cpu(cpu) 548 uv_heartbeat_enable(cpu); 549 return 0; 550 } 551 552 late_initcall(uv_init_heartbeat); 553 554 #endif /* !CONFIG_HOTPLUG_CPU */ 555 556 /* 557 * Called on each cpu to initialize the per_cpu UV data area. 558 * FIXME: hotplug not supported yet 559 */ 560 void __cpuinit uv_cpu_init(void) 561 { 562 /* CPU 0 initilization will be done via uv_system_init. */ 563 if (!uv_blade_info) 564 return; 565 566 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 567 568 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) 569 set_x2apic_extra_bits(uv_hub_info->pnode); 570 } 571 572 573 void __init uv_system_init(void) 574 { 575 union uvh_si_addr_map_config_u m_n_config; 576 union uvh_node_id_u node_id; 577 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 578 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 579 int gnode_extra, max_pnode = 0; 580 unsigned long mmr_base, present, paddr; 581 unsigned short pnode_mask; 582 583 map_low_mmrs(); 584 585 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 586 m_val = m_n_config.s.m_skt; 587 n_val = m_n_config.s.n_skt; 588 mmr_base = 589 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 590 ~UV_MMR_ENABLE; 591 pnode_mask = (1 << n_val) - 1; 592 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 593 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; 594 gnode_upper = ((unsigned long)gnode_extra << m_val); 595 printk(KERN_DEBUG "UV: N %d, M %d, gnode_upper 0x%lx, gnode_extra 0x%x\n", 596 n_val, m_val, gnode_upper, gnode_extra); 597 598 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 599 600 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) 601 uv_possible_blades += 602 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); 603 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 604 605 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 606 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 607 BUG_ON(!uv_blade_info); 608 for (blade = 0; blade < uv_num_possible_blades(); blade++) 609 uv_blade_info[blade].memory_nid = -1; 610 611 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 612 613 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); 614 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); 615 BUG_ON(!uv_node_to_blade); 616 memset(uv_node_to_blade, 255, bytes); 617 618 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); 619 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); 620 BUG_ON(!uv_cpu_to_blade); 621 memset(uv_cpu_to_blade, 255, bytes); 622 623 blade = 0; 624 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { 625 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); 626 for (j = 0; j < 64; j++) { 627 if (!test_bit(j, &present)) 628 continue; 629 uv_blade_info[blade].pnode = (i * 64 + j); 630 uv_blade_info[blade].nr_possible_cpus = 0; 631 uv_blade_info[blade].nr_online_cpus = 0; 632 blade++; 633 } 634 } 635 636 uv_bios_init(); 637 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, 638 &sn_coherency_id, &sn_region_size); 639 uv_rtc_init(); 640 641 for_each_present_cpu(cpu) { 642 int apicid = per_cpu(x86_cpu_to_apicid, cpu); 643 644 nid = cpu_to_node(cpu); 645 pnode = uv_apicid_to_pnode(apicid); 646 blade = boot_pnode_to_blade(pnode); 647 lcpu = uv_blade_info[blade].nr_possible_cpus; 648 uv_blade_info[blade].nr_possible_cpus++; 649 650 /* Any node on the blade, else will contain -1. */ 651 uv_blade_info[blade].memory_nid = nid; 652 653 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; 654 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; 655 uv_cpu_hub_info(cpu)->m_val = m_val; 656 uv_cpu_hub_info(cpu)->n_val = n_val; 657 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 658 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 659 uv_cpu_hub_info(cpu)->pnode = pnode; 660 uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask; 661 uv_cpu_hub_info(cpu)->gpa_mask = (1UL << (m_val + n_val)) - 1; 662 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 663 uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra; 664 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 665 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 666 uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid); 667 uv_node_to_blade[nid] = blade; 668 uv_cpu_to_blade[cpu] = blade; 669 max_pnode = max(pnode, max_pnode); 670 671 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n", 672 cpu, apicid, pnode, nid, lcpu, blade); 673 } 674 675 /* Add blade/pnode info for nodes without cpus */ 676 for_each_online_node(nid) { 677 if (uv_node_to_blade[nid] >= 0) 678 continue; 679 paddr = node_start_pfn(nid) << PAGE_SHIFT; 680 paddr = uv_soc_phys_ram_to_gpa(paddr); 681 pnode = (paddr >> m_val) & pnode_mask; 682 blade = boot_pnode_to_blade(pnode); 683 uv_node_to_blade[nid] = blade; 684 max_pnode = max(pnode, max_pnode); 685 } 686 687 map_gru_high(max_pnode); 688 map_mmr_high(max_pnode); 689 map_mmioh_high(max_pnode); 690 691 uv_cpu_init(); 692 uv_scir_register_cpu_notifier(); 693 proc_mkdir("sgi_uv", NULL); 694 } 695