1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * SGI UV APIC functions (note: not an Intel compatible APIC) 7 * 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 9 */ 10 #include <linux/cpumask.h> 11 #include <linux/hardirq.h> 12 #include <linux/proc_fs.h> 13 #include <linux/threads.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/string.h> 17 #include <linux/ctype.h> 18 #include <linux/sched.h> 19 #include <linux/timer.h> 20 #include <linux/cpu.h> 21 #include <linux/init.h> 22 23 #include <asm/uv/uv_mmrs.h> 24 #include <asm/uv/uv_hub.h> 25 #include <asm/current.h> 26 #include <asm/pgtable.h> 27 #include <asm/uv/bios.h> 28 #include <asm/uv/uv.h> 29 #include <asm/apic.h> 30 #include <asm/ipi.h> 31 #include <asm/smp.h> 32 33 DEFINE_PER_CPU(int, x2apic_extra_bits); 34 35 static enum uv_system_type uv_system_type; 36 37 static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 38 { 39 if (!strcmp(oem_id, "SGI")) { 40 if (!strcmp(oem_table_id, "UVL")) 41 uv_system_type = UV_LEGACY_APIC; 42 else if (!strcmp(oem_table_id, "UVX")) 43 uv_system_type = UV_X2APIC; 44 else if (!strcmp(oem_table_id, "UVH")) { 45 uv_system_type = UV_NON_UNIQUE_APIC; 46 return 1; 47 } 48 } 49 return 0; 50 } 51 52 enum uv_system_type get_uv_system_type(void) 53 { 54 return uv_system_type; 55 } 56 57 int is_uv_system(void) 58 { 59 return uv_system_type != UV_NONE; 60 } 61 EXPORT_SYMBOL_GPL(is_uv_system); 62 63 DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 64 EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); 65 66 struct uv_blade_info *uv_blade_info; 67 EXPORT_SYMBOL_GPL(uv_blade_info); 68 69 short *uv_node_to_blade; 70 EXPORT_SYMBOL_GPL(uv_node_to_blade); 71 72 short *uv_cpu_to_blade; 73 EXPORT_SYMBOL_GPL(uv_cpu_to_blade); 74 75 short uv_possible_blades; 76 EXPORT_SYMBOL_GPL(uv_possible_blades); 77 78 unsigned long sn_rtc_cycles_per_second; 79 EXPORT_SYMBOL(sn_rtc_cycles_per_second); 80 81 /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ 82 83 static const struct cpumask *uv_target_cpus(void) 84 { 85 return cpumask_of(0); 86 } 87 88 static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) 89 { 90 cpumask_clear(retmask); 91 cpumask_set_cpu(cpu, retmask); 92 } 93 94 static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 95 { 96 #ifdef CONFIG_SMP 97 unsigned long val; 98 int pnode; 99 100 pnode = uv_apicid_to_pnode(phys_apicid); 101 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 102 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 103 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 104 APIC_DM_INIT; 105 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 106 mdelay(10); 107 108 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 109 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 110 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 111 APIC_DM_STARTUP; 112 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 113 114 atomic_set(&init_deasserted, 1); 115 #endif 116 return 0; 117 } 118 119 static void uv_send_IPI_one(int cpu, int vector) 120 { 121 unsigned long apicid; 122 int pnode; 123 124 apicid = per_cpu(x86_cpu_to_apicid, cpu); 125 pnode = uv_apicid_to_pnode(apicid); 126 uv_hub_send_ipi(pnode, apicid, vector); 127 } 128 129 static void uv_send_IPI_mask(const struct cpumask *mask, int vector) 130 { 131 unsigned int cpu; 132 133 for_each_cpu(cpu, mask) 134 uv_send_IPI_one(cpu, vector); 135 } 136 137 static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector) 138 { 139 unsigned int this_cpu = smp_processor_id(); 140 unsigned int cpu; 141 142 for_each_cpu(cpu, mask) { 143 if (cpu != this_cpu) 144 uv_send_IPI_one(cpu, vector); 145 } 146 } 147 148 static void uv_send_IPI_allbutself(int vector) 149 { 150 unsigned int this_cpu = smp_processor_id(); 151 unsigned int cpu; 152 153 for_each_online_cpu(cpu) { 154 if (cpu != this_cpu) 155 uv_send_IPI_one(cpu, vector); 156 } 157 } 158 159 static void uv_send_IPI_all(int vector) 160 { 161 uv_send_IPI_mask(cpu_online_mask, vector); 162 } 163 164 static int uv_apic_id_registered(void) 165 { 166 return 1; 167 } 168 169 static void uv_init_apic_ldr(void) 170 { 171 } 172 173 static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) 174 { 175 /* 176 * We're using fixed IRQ delivery, can only return one phys APIC ID. 177 * May as well be the first. 178 */ 179 int cpu = cpumask_first(cpumask); 180 181 if ((unsigned)cpu < nr_cpu_ids) 182 return per_cpu(x86_cpu_to_apicid, cpu); 183 else 184 return BAD_APICID; 185 } 186 187 static unsigned int 188 uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 189 const struct cpumask *andmask) 190 { 191 int cpu; 192 193 /* 194 * We're using fixed IRQ delivery, can only return one phys APIC ID. 195 * May as well be the first. 196 */ 197 for_each_cpu_and(cpu, cpumask, andmask) { 198 if (cpumask_test_cpu(cpu, cpu_online_mask)) 199 break; 200 } 201 if (cpu < nr_cpu_ids) 202 return per_cpu(x86_cpu_to_apicid, cpu); 203 204 return BAD_APICID; 205 } 206 207 static unsigned int x2apic_get_apic_id(unsigned long x) 208 { 209 unsigned int id; 210 211 WARN_ON(preemptible() && num_online_cpus() > 1); 212 id = x | __get_cpu_var(x2apic_extra_bits); 213 214 return id; 215 } 216 217 static unsigned long set_apic_id(unsigned int id) 218 { 219 unsigned long x; 220 221 /* maskout x2apic_extra_bits ? */ 222 x = id; 223 return x; 224 } 225 226 static unsigned int uv_read_apic_id(void) 227 { 228 229 return x2apic_get_apic_id(apic_read(APIC_ID)); 230 } 231 232 static int uv_phys_pkg_id(int initial_apicid, int index_msb) 233 { 234 return uv_read_apic_id() >> index_msb; 235 } 236 237 static void uv_send_IPI_self(int vector) 238 { 239 apic_write(APIC_SELF_IPI, vector); 240 } 241 242 struct apic apic_x2apic_uv_x = { 243 244 .name = "UV large system", 245 .probe = NULL, 246 .acpi_madt_oem_check = uv_acpi_madt_oem_check, 247 .apic_id_registered = uv_apic_id_registered, 248 249 .irq_delivery_mode = dest_Fixed, 250 .irq_dest_mode = 1, /* logical */ 251 252 .target_cpus = uv_target_cpus, 253 .disable_esr = 0, 254 .dest_logical = APIC_DEST_LOGICAL, 255 .check_apicid_used = NULL, 256 .check_apicid_present = NULL, 257 258 .vector_allocation_domain = uv_vector_allocation_domain, 259 .init_apic_ldr = uv_init_apic_ldr, 260 261 .ioapic_phys_id_map = NULL, 262 .setup_apic_routing = NULL, 263 .multi_timer_check = NULL, 264 .apicid_to_node = NULL, 265 .cpu_to_logical_apicid = NULL, 266 .cpu_present_to_apicid = default_cpu_present_to_apicid, 267 .apicid_to_cpu_present = NULL, 268 .setup_portio_remap = NULL, 269 .check_phys_apicid_present = default_check_phys_apicid_present, 270 .enable_apic_mode = NULL, 271 .phys_pkg_id = uv_phys_pkg_id, 272 .mps_oem_check = NULL, 273 274 .get_apic_id = x2apic_get_apic_id, 275 .set_apic_id = set_apic_id, 276 .apic_id_mask = 0xFFFFFFFFu, 277 278 .cpu_mask_to_apicid = uv_cpu_mask_to_apicid, 279 .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and, 280 281 .send_IPI_mask = uv_send_IPI_mask, 282 .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself, 283 .send_IPI_allbutself = uv_send_IPI_allbutself, 284 .send_IPI_all = uv_send_IPI_all, 285 .send_IPI_self = uv_send_IPI_self, 286 287 .wakeup_secondary_cpu = uv_wakeup_secondary, 288 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 289 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 290 .wait_for_init_deassert = NULL, 291 .smp_callin_clear_local_apic = NULL, 292 .inquire_remote_apic = NULL, 293 294 .read = native_apic_msr_read, 295 .write = native_apic_msr_write, 296 .icr_read = native_x2apic_icr_read, 297 .icr_write = native_x2apic_icr_write, 298 .wait_icr_idle = native_x2apic_wait_icr_idle, 299 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 300 }; 301 302 static __cpuinit void set_x2apic_extra_bits(int pnode) 303 { 304 __get_cpu_var(x2apic_extra_bits) = (pnode << 6); 305 } 306 307 /* 308 * Called on boot cpu. 309 */ 310 static __init int boot_pnode_to_blade(int pnode) 311 { 312 int blade; 313 314 for (blade = 0; blade < uv_num_possible_blades(); blade++) 315 if (pnode == uv_blade_info[blade].pnode) 316 return blade; 317 BUG(); 318 } 319 320 struct redir_addr { 321 unsigned long redirect; 322 unsigned long alias; 323 }; 324 325 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 326 327 static __initdata struct redir_addr redir_addrs[] = { 328 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, 329 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, 330 {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, 331 }; 332 333 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) 334 { 335 union uvh_si_alias0_overlay_config_u alias; 336 union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; 337 int i; 338 339 for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { 340 alias.v = uv_read_local_mmr(redir_addrs[i].alias); 341 if (alias.s.base == 0) { 342 *size = (1UL << alias.s.m_alias); 343 redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); 344 *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; 345 return; 346 } 347 } 348 BUG(); 349 } 350 351 static __init void map_low_mmrs(void) 352 { 353 init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); 354 init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); 355 } 356 357 enum map_type {map_wb, map_uc}; 358 359 static __init void map_high(char *id, unsigned long base, int shift, 360 int max_pnode, enum map_type map_type) 361 { 362 unsigned long bytes, paddr; 363 364 paddr = base << shift; 365 bytes = (1UL << shift) * (max_pnode + 1); 366 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 367 paddr + bytes); 368 if (map_type == map_uc) 369 init_extra_mapping_uc(paddr, bytes); 370 else 371 init_extra_mapping_wb(paddr, bytes); 372 373 } 374 static __init void map_gru_high(int max_pnode) 375 { 376 union uvh_rh_gam_gru_overlay_config_mmr_u gru; 377 int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT; 378 379 gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); 380 if (gru.s.enable) 381 map_high("GRU", gru.s.base, shift, max_pnode, map_wb); 382 } 383 384 static __init void map_config_high(int max_pnode) 385 { 386 union uvh_rh_gam_cfg_overlay_config_mmr_u cfg; 387 int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT; 388 389 cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR); 390 if (cfg.s.enable) 391 map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc); 392 } 393 394 static __init void map_mmr_high(int max_pnode) 395 { 396 union uvh_rh_gam_mmr_overlay_config_mmr_u mmr; 397 int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT; 398 399 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 400 if (mmr.s.enable) 401 map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); 402 } 403 404 static __init void map_mmioh_high(int max_pnode) 405 { 406 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 407 int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 408 409 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 410 if (mmioh.s.enable) 411 map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); 412 } 413 414 static __init void uv_rtc_init(void) 415 { 416 long status; 417 u64 ticks_per_sec; 418 419 status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, 420 &ticks_per_sec); 421 if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) { 422 printk(KERN_WARNING 423 "unable to determine platform RTC clock frequency, " 424 "guessing.\n"); 425 /* BIOS gives wrong value for clock freq. so guess */ 426 sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; 427 } else 428 sn_rtc_cycles_per_second = ticks_per_sec; 429 } 430 431 /* 432 * percpu heartbeat timer 433 */ 434 static void uv_heartbeat(unsigned long ignored) 435 { 436 struct timer_list *timer = &uv_hub_info->scir.timer; 437 unsigned char bits = uv_hub_info->scir.state; 438 439 /* flip heartbeat bit */ 440 bits ^= SCIR_CPU_HEARTBEAT; 441 442 /* is this cpu idle? */ 443 if (idle_cpu(raw_smp_processor_id())) 444 bits &= ~SCIR_CPU_ACTIVITY; 445 else 446 bits |= SCIR_CPU_ACTIVITY; 447 448 /* update system controller interface reg */ 449 uv_set_scir_bits(bits); 450 451 /* enable next timer period */ 452 mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL); 453 } 454 455 static void __cpuinit uv_heartbeat_enable(int cpu) 456 { 457 if (!uv_cpu_hub_info(cpu)->scir.enabled) { 458 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 459 460 uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY); 461 setup_timer(timer, uv_heartbeat, cpu); 462 timer->expires = jiffies + SCIR_CPU_HB_INTERVAL; 463 add_timer_on(timer, cpu); 464 uv_cpu_hub_info(cpu)->scir.enabled = 1; 465 } 466 467 /* check boot cpu */ 468 if (!uv_cpu_hub_info(0)->scir.enabled) 469 uv_heartbeat_enable(0); 470 } 471 472 #ifdef CONFIG_HOTPLUG_CPU 473 static void __cpuinit uv_heartbeat_disable(int cpu) 474 { 475 if (uv_cpu_hub_info(cpu)->scir.enabled) { 476 uv_cpu_hub_info(cpu)->scir.enabled = 0; 477 del_timer(&uv_cpu_hub_info(cpu)->scir.timer); 478 } 479 uv_set_cpu_scir_bits(cpu, 0xff); 480 } 481 482 /* 483 * cpu hotplug notifier 484 */ 485 static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 486 unsigned long action, void *hcpu) 487 { 488 long cpu = (long)hcpu; 489 490 switch (action) { 491 case CPU_ONLINE: 492 uv_heartbeat_enable(cpu); 493 break; 494 case CPU_DOWN_PREPARE: 495 uv_heartbeat_disable(cpu); 496 break; 497 default: 498 break; 499 } 500 return NOTIFY_OK; 501 } 502 503 static __init void uv_scir_register_cpu_notifier(void) 504 { 505 hotcpu_notifier(uv_scir_cpu_notify, 0); 506 } 507 508 #else /* !CONFIG_HOTPLUG_CPU */ 509 510 static __init void uv_scir_register_cpu_notifier(void) 511 { 512 } 513 514 static __init int uv_init_heartbeat(void) 515 { 516 int cpu; 517 518 if (is_uv_system()) 519 for_each_online_cpu(cpu) 520 uv_heartbeat_enable(cpu); 521 return 0; 522 } 523 524 late_initcall(uv_init_heartbeat); 525 526 #endif /* !CONFIG_HOTPLUG_CPU */ 527 528 /* 529 * Called on each cpu to initialize the per_cpu UV data area. 530 * FIXME: hotplug not supported yet 531 */ 532 void __cpuinit uv_cpu_init(void) 533 { 534 /* CPU 0 initilization will be done via uv_system_init. */ 535 if (!uv_blade_info) 536 return; 537 538 uv_blade_info[uv_numa_blade_id()].nr_online_cpus++; 539 540 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) 541 set_x2apic_extra_bits(uv_hub_info->pnode); 542 } 543 544 545 void __init uv_system_init(void) 546 { 547 union uvh_si_addr_map_config_u m_n_config; 548 union uvh_node_id_u node_id; 549 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 550 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; 551 int max_pnode = 0; 552 unsigned long mmr_base, present; 553 554 map_low_mmrs(); 555 556 m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); 557 m_val = m_n_config.s.m_skt; 558 n_val = m_n_config.s.n_skt; 559 mmr_base = 560 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 561 ~UV_MMR_ENABLE; 562 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 563 564 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) 565 uv_possible_blades += 566 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); 567 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades()); 568 569 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades(); 570 uv_blade_info = kmalloc(bytes, GFP_KERNEL); 571 572 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); 573 574 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes(); 575 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL); 576 memset(uv_node_to_blade, 255, bytes); 577 578 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus(); 579 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL); 580 memset(uv_cpu_to_blade, 255, bytes); 581 582 blade = 0; 583 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) { 584 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8); 585 for (j = 0; j < 64; j++) { 586 if (!test_bit(j, &present)) 587 continue; 588 uv_blade_info[blade].pnode = (i * 64 + j); 589 uv_blade_info[blade].nr_possible_cpus = 0; 590 uv_blade_info[blade].nr_online_cpus = 0; 591 blade++; 592 } 593 } 594 595 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 596 gnode_upper = (((unsigned long)node_id.s.node_id) & 597 ~((1 << n_val) - 1)) << m_val; 598 599 uv_bios_init(); 600 uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, 601 &sn_coherency_id, &sn_region_size); 602 uv_rtc_init(); 603 604 for_each_present_cpu(cpu) { 605 nid = cpu_to_node(cpu); 606 pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu)); 607 blade = boot_pnode_to_blade(pnode); 608 lcpu = uv_blade_info[blade].nr_possible_cpus; 609 uv_blade_info[blade].nr_possible_cpus++; 610 611 uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; 612 uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size; 613 uv_cpu_hub_info(cpu)->m_val = m_val; 614 uv_cpu_hub_info(cpu)->n_val = m_val; 615 uv_cpu_hub_info(cpu)->numa_blade_id = blade; 616 uv_cpu_hub_info(cpu)->blade_processor_id = lcpu; 617 uv_cpu_hub_info(cpu)->pnode = pnode; 618 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1; 619 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 620 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 621 uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; 622 uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id; 623 uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu; 624 uv_node_to_blade[nid] = blade; 625 uv_cpu_to_blade[cpu] = blade; 626 max_pnode = max(pnode, max_pnode); 627 628 printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, " 629 "lcpu %d, blade %d\n", 630 cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid, 631 lcpu, blade); 632 } 633 634 map_gru_high(max_pnode); 635 map_mmr_high(max_pnode); 636 map_config_high(max_pnode); 637 map_mmioh_high(max_pnode); 638 639 uv_cpu_init(); 640 uv_scir_register_cpu_notifier(); 641 proc_mkdir("sgi_uv", NULL); 642 } 643