1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV APIC functions (note: not an Intel compatible APIC) 7 * 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 9 */ 10 #include <linux/cpumask.h> 11 #include <linux/hardirq.h> 12 #include <linux/proc_fs.h> 13 #include <linux/threads.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/string.h> 17 #include <linux/ctype.h> 18 #include <linux/sched.h> 19 #include <linux/timer.h> 20 #include <linux/cpu.h> 21 #include <linux/init.h> 22 23 #include <asm/uv/uv_mmrs.h> 24 #include <asm/uv/uv_hub.h> 25 #include <asm/current.h> 26 #include <asm/pgtable.h> 27 #include <asm/uv/bios.h> 28 #include <asm/uv/uv.h> 29 #include <asm/apic.h> 30 #include <asm/ipi.h> 31 #include <asm/smp.h> 32 33 DEFINE_PER_CPU(int, x2apic_extra_bits); 34 35 static enum uv_system_type uv_system_type; 36 37 static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 38 { 39 if (!strcmp(oem_id, "SGI")) { 40 if (!strcmp(oem_table_id, "UVL")) 41 uv_system_type = UV_LEGACY_APIC; 42 else if (!strcmp(oem_table_id, "UVX")) 43 uv_system_type = UV_X2APIC; 44 else if (!strcmp(oem_table_id, "UVH")) { 45 uv_system_type = UV_NON_UNIQUE_APIC; 46 return 1; 47 } 48 } 49 return 0; 50 } 51 52 enum uv_system_type get_uv_system_type(void) 53 { 54 return uv_system_type; 55 } 56 57 int is_uv_system(void) 58 { 59 return uv_system_type != UV_NONE; 60 } 61 EXPORT_SYMBOL_GPL(is_uv_system); 62 63 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 64 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); 65 66 struct uv_blade_info *uv_blade_info; 67 EXPORT_SYMBOL_GPL(uv_blade_info); 68 69 short *uv_node_to_blade; 70 EXPORT_SYMBOL_GPL(uv_node_to_blade); 71 72 short *uv_cpu_to_blade; 73 EXPORT_SYMBOL_GPL(uv_cpu_to_blade); 74 75 short uv_possible_blades; 76 EXPORT_SYMBOL_GPL(uv_possible_blades); 77 78 unsigned long sn_rtc_cycles_per_second; 79 EXPORT_SYMBOL(sn_rtc_cycles_per_second); 80 81 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 82 83 static const struct cpumask *uv_target_cpus(void) 84 { 85 return cpumask_of(0); 86 } 87 88 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) 89 { 90 cpumask_clear(retmask); 91 cpumask_set_cpu(cpu, retmask); 92 } 93 94 static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 95 { 96 #ifdef CONFIG_SMP 97 unsigned long val; 98 int pnode; 99 100 pnode = uv_apicid_to_pnode(phys_apicid); 101 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 102 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 103 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 104 APIC_DM_INIT; 105 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 106 mdelay(10); 107 108 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 109 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 110 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 111 APIC_DM_STARTUP; 112 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 113 114 atomic_set(&init_deasserted, 1); 115 #endif 116 return 0; 117 } 118 119 static void uv_send_IPI_one(int cpu, int vector) 120 { 121 unsigned long val, apicid; 122 int pnode; 123 124 apicid = per_cpu(x86_cpu_to_apicid, cpu); 125 pnode = uv_apicid_to_pnode(apicid); 126 127 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 128 (apicid << UVH_IPI_INT_APIC_ID_SHFT) | 129 (vector << UVH_IPI_INT_VECTOR_SHFT); 130 131 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 132 } 133 134 static void uv_send_IPI_mask(const struct cpumask *mask, int vector) 135 { 136 unsigned int cpu; 137 138 for_each_cpu(cpu, mask) 139 uv_send_IPI_one(cpu, vector); 140 } 141 142 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 143 { 144 unsigned int this_cpu = smp_processor_id(); 145 unsigned int cpu; 146 147 for_each_cpu(cpu, mask) { 148 if (cpu != this_cpu) 149 uv_send_IPI_one(cpu, vector); 150 } 151 } 152 153 static void uv_send_IPI_allbutself(int vector) 154 { 155 unsigned int this_cpu = smp_processor_id(); 156 unsigned int cpu; 157 158 for_each_online_cpu(cpu) { 159 if (cpu != this_cpu) 160 uv_send_IPI_one(cpu, vector); 161 } 162 } 163 164 static void uv_send_IPI_all(int vector) 165 { 166 uv_send_IPI_mask(cpu_online_mask, vector); 167 } 168 169 static int uv_apic_id_registered(void) 170 { 171 return 1; 172 } 173 174 static void uv_init_apic_ldr(void) 175 { 176 } 177 178 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 179 { 180 /* 181 * We're using fixed IRQ delivery, can only return one phys APIC ID. 182 * May as well be the first. 183 */ 184 int cpu = cpumask_first(cpumask); 185 186 if ((unsigned)cpu < nr_cpu_ids) 187 return per_cpu(x86_cpu_to_apicid, cpu); 188 else 189 return BAD_APICID; 190 } 191 192 static unsigned int 193 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 194 const struct cpumask *andmask) 195 { 196 int cpu; 197 198 /* 199 * We're using fixed IRQ delivery, can only return one phys APIC ID. 200 * May as well be the first. 201 */ 202 for_each_cpu_and(cpu, cpumask, andmask) { 203 if (cpumask_test_cpu(cpu, cpu_online_mask)) 204 break; 205 } 206 if (cpu < nr_cpu_ids) 207 return per_cpu(x86_cpu_to_apicid, cpu); 208 209 return BAD_APICID; 210 } 211 212 static unsigned int x2apic_get_apic_id(unsigned long x) 213 { 214 unsigned int id; 215 216 WARN_ON(preemptible() && num_online_cpus() > 1); 217 id = x | __get_cpu_var(x2apic_extra_bits); 218 219 return id; 220 } 221 222 static unsigned long set_apic_id(unsigned int id) 223 { 224 unsigned long x; 225 226 /* maskout x2apic_extra_bits ? */ 227 x = id; 228 return x; 229 } 230 231 static unsigned int uv_read_apic_id(void) 232 { 233 234 return x2apic_get_apic_id(apic_read(APIC_ID)); 235 } 236 237 static int uv_phys_pkg_id(int initial_apicid, int index_msb) 238 { 239 return uv_read_apic_id() >> index_msb; 240 } 241 242 static void uv_send_IPI_self(int vector) 243 { 244 apic_write(APIC_SELF_IPI, vector); 245 } 246 247 struct apic apic_x2apic_uv_x = { 248 249 .name = "UV large system", 250 .probe = NULL, 251 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 252 .apic_id_registered = uv_apic_id_registered, 253 254 .irq_delivery_mode = dest_Fixed, 255 .irq_dest_mode = 1, /* logical */ 256 257 .target_cpus = uv_target_cpus, 258 .disable_esr = 0, 259 .dest_logical = APIC_DEST_LOGICAL, 260 .check_apicid_used = NULL, 261 .check_apicid_present = NULL, 262 263 .vector_allocation_domain = uv_vector_allocation_domain, 264 .init_apic_ldr = uv_init_apic_ldr, 265 266 .ioapic_phys_id_map = NULL, 267 .setup_apic_routing = NULL, 268 .multi_timer_check = NULL, 269 .apicid_to_node = NULL, 270 .cpu_to_logical_apicid = NULL, 271 .cpu_present_to_apicid = default_cpu_present_to_apicid, 272 .apicid_to_cpu_present = NULL, 273 .setup_portio_remap = NULL, 274 .check_phys_apicid_present = default_check_phys_apicid_present, 275 .enable_apic_mode = NULL, 276 .phys_pkg_id = uv_phys_pkg_id, 277 .mps_oem_check = NULL, 278 279 .get_apic_id = x2apic_get_apic_id, 280 .set_apic_id = set_apic_id, 281 .apic_id_mask = 0xFFFFFFFFu, 282 283 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 284 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 285 286 .send_IPI_mask = uv_send_IPI_mask, 287 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 288 .send_IPI_allbutself = uv_send_IPI_allbutself, 289 .send_IPI_all = uv_send_IPI_all, 290 .send_IPI_self = uv_send_IPI_self, 291 292 .wakeup_secondary_cpu = uv_wakeup_secondary, 293 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 294 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 295 .wait_for_init_deassert = NULL, 296 .smp_callin_clear_local_apic = NULL, 297 .inquire_remote_apic = NULL, 298 299 .read = native_apic_msr_read, 300 .write = native_apic_msr_write, 301 .icr_read = native_x2apic_icr_read, 302 .icr_write = native_x2apic_icr_write, 303 .wait_icr_idle = native_x2apic_wait_icr_idle, 304 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 305 }; 306 307 static __cpuinit void set_x2apic_extra_bits(int pnode) 308 { 309 __get_cpu_var(x2apic_extra_bits) = (pnode << 6); 310 } 311 312 /* 313 * Called on boot cpu. 314 */ 315 static __init int boot_pnode_to_blade(int pnode) 316 { 317 int blade; 318 319 for (blade = 0; blade < uv_num_possible_blades(); blade++) 320 if (pnode == uv_blade_info[blade].pnode) 321 return blade; 322 BUG(); 323 } 324 325 struct redir_addr { 326 unsigned long redirect; 327 unsigned long alias; 328 }; 329 330 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 331 332 static __initdata struct redir_addr redir_addrs[] = { 333 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, 334 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, 335 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, 336 }; 337 338 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 339 { 340 union uvh_si_alias0_overlay_config_u alias; 341 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 342 int i; 343 344 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { 345 alias.v = uv_read_local_mmr(redir_addrs[i].alias); 346 if (alias.s.base == 0) { 347 *size = (1UL << alias.s.m_alias); 348 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); 349 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; 350 return; 351 } 352 } 353 BUG(); 354 } 355 356 static __init void map_low_mmrs(void) 357 { 358 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); 359 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); 360 } 361 362 enum map_type {map_wb, map_uc}; 363 364 static __init void map_high(char *id, unsigned long base, int shift, 365 int max_pnode, enum map_type map_type) 366 { 367 unsigned long bytes, paddr; 368 369 paddr = base << shift; 370 bytes = (1UL << shift) * (max_pnode + 1); 371 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 372 paddr + bytes); 373 if (map_type == map_uc) 374 init_extra_mapping_uc(paddr, bytes); 375 else 376 init_extra_mapping_wb(paddr, bytes); 377 378 } 379 static __init void map_gru_high(int max_pnode) 380 { 381 union uvh_rh_gam_gru_overlay_config_mmr_u gru; 382 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; 383 384 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 385 if (gru.s.enable) 386 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 387 } 388 389 static __init void map_config_high(int max_pnode) 390 { 391 union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; 392 int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; 393 394 cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); 395 if (cfg.s.enable) 396 map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); 397 } 398 399 static __init void map_mmr_high(int max_pnode) 400 { 401 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; 402 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; 403 404 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 405 if (mmr.s.enable) 406 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 407 } 408 409 static __init void map_mmioh_high(int max_pnode) 410 { 411 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 412 int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 413 414 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 415 if (mmioh.s.enable) 416 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); 417 } 418 419 static __init void uv_rtc_init(void) 420 { 421 long status; 422 u64 ticks_per_sec; 423 424 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, 425 &ticks_per_sec); 426 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { 427 printk(KERN_WARNING 428 "unable to determine platform RTC clock frequency, " 429 "guessing.\n"); 430 /* BIOS gives wrong value for clock freq. so guess */ 431 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; 432 } else 433 sn_rtc_cycles_per_second = ticks_per_sec; 434 } 435 436 /* 437 * percpu heartbeat timer 438 */ 439 static void uv_heartbeat(unsigned long ignored) 440 { 441 struct timer_list *timer = &uv_hub_info->scir.timer; 442 unsigned char bits = uv_hub_info->scir.state; 443 444 /* flip heartbeat bit */ 445 bits ^= SCIR_CPU_HEARTBEAT; 446 447 /* is this cpu idle? */ 448 if (idle_cpu(raw_smp_processor_id())) 449 bits &= ~SCIR_CPU_ACTIVITY; 450 else 451 bits |= SCIR_CPU_ACTIVITY; 452 453 /* update system controller interface reg */ 454 uv_set_scir_bits(bits); 455 456 /* enable next timer period */ 457 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); 458 } 459 460 static void __cpuinit uv_heartbeat_enable(int cpu) 461 { 462 if (!uv_cpu_hub_info(cpu)->scir.enabled) { 463 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 464 465 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); 466 setup_timer(timer, uv_heartbeat, cpu); 467 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; 468 add_timer_on(timer, cpu); 469 uv_cpu_hub_info(cpu)->scir.enabled = 1; 470 } 471 472 /* check boot cpu */ 473 if (!uv_cpu_hub_info(0)->scir.enabled) 474 uv_heartbeat_enable(0); 475 } 476 477 #ifdef CONFIG_HOTPLUG_CPU 478 static void __cpuinit uv_heartbeat_disable(int cpu) 479 { 480 if (uv_cpu_hub_info(cpu)->scir.enabled) { 481 uv_cpu_hub_info(cpu)->scir.enabled = 0; 482 del_timer(&uv_cpu_hub_info(cpu)->scir.timer); 483 } 484 uv_set_cpu_scir_bits(cpu, 0xff); 485 } 486 487 /* 488 * cpu hotplug notifier 489 */ 490 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 491 unsigned long action, void *hcpu) 492 { 493 long cpu = (long)hcpu; 494 495 switch (action) { 496 case CPU_ONLINE: 497 uv_heartbeat_enable(cpu); 498 break; 499 case CPU_DOWN_PREPARE: 500 uv_heartbeat_disable(cpu); 501 break; 502 default: 503 break; 504 } 505 return NOTIFY_OK; 506 } 507 508 static __init void uv_scir_register_cpu_notifier(void) 509 { 510 hotcpu_notifier(uv_scir_cpu_notify, 0); 511 } 512 513 #else /* !CONFIG_HOTPLUG_CPU */ 514 515 static __init void uv_scir_register_cpu_notifier(void) 516 { 517 } 518 519 static __init int uv_init_heartbeat(void) 520 { 521 int cpu; 522 523 if (is_uv_system()) 524 for_each_online_cpu(cpu) 525 uv_heartbeat_enable(cpu); 526 return 0; 527 } 528 529 late_initcall(uv_init_heartbeat); 530 531 #endif /* !CONFIG_HOTPLUG_CPU */ 532 533 /* 534 * Called on each cpu to initialize the per_cpu UV data area. 535 * FIXME: hotplug not supported yet 536 */ 537 void __cpuinit uv_cpu_init(void) 538 { 539 /* CPU 0 initilization will be done via uv_system_init. */ 540 if (!uv_blade_info) 541 return; 542 543 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 544 545 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) 546 set_x2apic_extra_bits(uv_hub_info->pnode); 547 } 548 549 550 void __init uv_system_init(void) 551 { 552 union uvh_si_addr_map_config_u m_n_config; 553 union uvh_node_id_u node_id; 554 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 555 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 556 int max_pnode = 0; 557 unsigned long mmr_base, present; 558 559 map_low_mmrs(); 560 561 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 562 m_val = m_n_config.s.m_skt; 563 n_val = m_n_config.s.n_skt; 564 mmr_base = 565 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 566 ~UV_MMR_ENABLE; 567 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 568 569 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) 570 uv_possible_blades += 571 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); 572 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 573 574 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 575 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 576 577 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 578 579 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); 580 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); 581 memset(uv_node_to_blade, 255, bytes); 582 583 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); 584 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); 585 memset(uv_cpu_to_blade, 255, bytes); 586 587 blade = 0; 588 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { 589 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); 590 for (j = 0; j < 64; j++) { 591 if (!test_bit(j, &present)) 592 continue; 593 uv_blade_info[blade].pnode = (i * 64 + j); 594 uv_blade_info[blade].nr_possible_cpus = 0; 595 uv_blade_info[blade].nr_online_cpus = 0; 596 blade++; 597 } 598 } 599 600 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 601 gnode_upper = (((unsigned long)node_id.s.node_id) & 602 ~((1 << n_val) - 1)) << m_val; 603 604 uv_bios_init(); 605 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, 606 &sn_coherency_id, &sn_region_size); 607 uv_rtc_init(); 608 609 for_each_present_cpu(cpu) { 610 nid = cpu_to_node(cpu); 611 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); 612 blade = boot_pnode_to_blade(pnode); 613 lcpu = uv_blade_info[blade].nr_possible_cpus; 614 uv_blade_info[blade].nr_possible_cpus++; 615 616 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; 617 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; 618 uv_cpu_hub_info(cpu)->m_val = m_val; 619 uv_cpu_hub_info(cpu)->n_val = m_val; 620 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 621 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 622 uv_cpu_hub_info(cpu)->pnode = pnode; 623 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 624 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 625 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 626 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 627 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 628 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; 629 uv_node_to_blade[nid] = blade; 630 uv_cpu_to_blade[cpu] = blade; 631 max_pnode = max(pnode, max_pnode); 632 633 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " 634 "lcpu %d, blade %d\n", 635 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, 636 lcpu, blade); 637 } 638 639 map_gru_high(max_pnode); 640 map_mmr_high(max_pnode); 641 map_config_high(max_pnode); 642 map_mmioh_high(max_pnode); 643 644 uv_cpu_init(); 645 uv_scir_register_cpu_notifier(); 646 proc_mkdir("sgi_uv", NULL); 647 } 648