1 /* 2 * SGI NMI support routines 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved. 19 * Copyright (c) Mike Travis 20 */ 21 22 #include <linux/cpu.h> 23 #include <linux/delay.h> 24 #include <linux/kdb.h> 25 #include <linux/kexec.h> 26 #include <linux/kgdb.h> 27 #include <linux/moduleparam.h> 28 #include <linux/nmi.h> 29 #include <linux/sched.h> 30 #include <linux/slab.h> 31 #include <linux/clocksource.h> 32 33 #include <asm/apic.h> 34 #include <asm/current.h> 35 #include <asm/kdebug.h> 36 #include <asm/local64.h> 37 #include <asm/nmi.h> 38 #include <asm/traps.h> 39 #include <asm/uv/uv.h> 40 #include <asm/uv/uv_hub.h> 41 #include <asm/uv/uv_mmrs.h> 42 43 /* 44 * UV handler for NMI 45 * 46 * Handle system-wide NMI events generated by the global 'power nmi' command. 47 * 48 * Basic operation is to field the NMI interrupt on each CPU and wait 49 * until all CPU's have arrived into the nmi handler. If some CPU's do not 50 * make it into the handler, try and force them in with the IPI(NMI) signal. 51 * 52 * We also have to lessen UV Hub MMR accesses as much as possible as this 53 * disrupts the UV Hub's primary mission of directing NumaLink traffic and 54 * can cause system problems to occur. 55 * 56 * To do this we register our primary NMI notifier on the NMI_UNKNOWN 57 * chain. This reduces the number of false NMI calls when the perf 58 * tools are running which generate an enormous number of NMIs per 59 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is 60 * very short as it only checks that if it has been "pinged" with the 61 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. 62 * 63 */ 64 65 static struct uv_hub_nmi_s **uv_hub_nmi_list; 66 67 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi); 68 69 /* UV hubless values */ 70 #define NMI_CONTROL_PORT 0x70 71 #define NMI_DUMMY_PORT 0x71 72 #define PAD_OWN_GPP_D_0 0x2c 73 #define GPI_NMI_STS_GPP_D_0 0x164 74 #define GPI_NMI_ENA_GPP_D_0 0x174 75 #define STS_GPP_D_0_MASK 0x1 76 #define PAD_CFG_DW0_GPP_D_0 0x4c0 77 #define GPIROUTNMI (1ul << 17) 78 #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul 79 #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset)) 80 81 static u64 *pch_base; 82 static unsigned long nmi_mmr; 83 static unsigned long nmi_mmr_clear; 84 static unsigned long nmi_mmr_pending; 85 86 static atomic_t uv_in_nmi; 87 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1); 88 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1); 89 static atomic_t uv_nmi_slave_continue; 90 static cpumask_var_t uv_nmi_cpu_mask; 91 92 /* Values for uv_nmi_slave_continue */ 93 #define SLAVE_CLEAR 0 94 #define SLAVE_CONTINUE 1 95 #define SLAVE_EXIT 2 96 97 /* 98 * Default is all stack dumps go to the console and buffer. 99 * Lower level to send to log buffer only. 100 */ 101 static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; 102 module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644); 103 104 /* 105 * The following values show statistics on how perf events are affecting 106 * this system. 107 */ 108 static int param_get_local64(char *buffer, const struct kernel_param *kp) 109 { 110 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg)); 111 } 112 113 static int param_set_local64(const char *val, const struct kernel_param *kp) 114 { 115 /* Clear on any write */ 116 local64_set((local64_t *)kp->arg, 0); 117 return 0; 118 } 119 120 static const struct kernel_param_ops param_ops_local64 = { 121 .get = param_get_local64, 122 .set = param_set_local64, 123 }; 124 #define param_check_local64(name, p) __param_check(name, p, local64_t) 125 126 static local64_t uv_nmi_count; 127 module_param_named(nmi_count, uv_nmi_count, local64, 0644); 128 129 static local64_t uv_nmi_misses; 130 module_param_named(nmi_misses, uv_nmi_misses, local64, 0644); 131 132 static local64_t uv_nmi_ping_count; 133 module_param_named(ping_count, uv_nmi_ping_count, local64, 0644); 134 135 static local64_t uv_nmi_ping_misses; 136 module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644); 137 138 /* 139 * Following values allow tuning for large systems under heavy loading 140 */ 141 static int uv_nmi_initial_delay = 100; 142 module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644); 143 144 static int uv_nmi_slave_delay = 100; 145 module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644); 146 147 static int uv_nmi_loop_delay = 100; 148 module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644); 149 150 static int uv_nmi_trigger_delay = 10000; 151 module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644); 152 153 static int uv_nmi_wait_count = 100; 154 module_param_named(wait_count, uv_nmi_wait_count, int, 0644); 155 156 static int uv_nmi_retry_count = 500; 157 module_param_named(retry_count, uv_nmi_retry_count, int, 0644); 158 159 static bool uv_pch_intr_enable = true; 160 static bool uv_pch_intr_now_enabled; 161 module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644); 162 163 static bool uv_pch_init_enable = true; 164 module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644); 165 166 static int uv_nmi_debug; 167 module_param_named(debug, uv_nmi_debug, int, 0644); 168 169 #define nmi_debug(fmt, ...) \ 170 do { \ 171 if (uv_nmi_debug) \ 172 pr_info(fmt, ##__VA_ARGS__); \ 173 } while (0) 174 175 /* Valid NMI Actions */ 176 #define ACTION_LEN 16 177 static struct nmi_action { 178 char *action; 179 char *desc; 180 } valid_acts[] = { 181 { "kdump", "do kernel crash dump" }, 182 { "dump", "dump process stack for each cpu" }, 183 { "ips", "dump Inst Ptr info for each cpu" }, 184 { "kdb", "enter KDB (needs kgdboc= assignment)" }, 185 { "kgdb", "enter KGDB (needs gdb target remote)" }, 186 { "health", "check if CPUs respond to NMI" }, 187 }; 188 typedef char action_t[ACTION_LEN]; 189 static action_t uv_nmi_action = { "dump" }; 190 191 static int param_get_action(char *buffer, const struct kernel_param *kp) 192 { 193 return sprintf(buffer, "%s\n", uv_nmi_action); 194 } 195 196 static int param_set_action(const char *val, const struct kernel_param *kp) 197 { 198 int i; 199 int n = ARRAY_SIZE(valid_acts); 200 char arg[ACTION_LEN], *p; 201 202 /* (remove possible '\n') */ 203 strncpy(arg, val, ACTION_LEN - 1); 204 arg[ACTION_LEN - 1] = '\0'; 205 p = strchr(arg, '\n'); 206 if (p) 207 *p = '\0'; 208 209 for (i = 0; i < n; i++) 210 if (!strcmp(arg, valid_acts[i].action)) 211 break; 212 213 if (i < n) { 214 strcpy(uv_nmi_action, arg); 215 pr_info("UV: New NMI action:%s\n", uv_nmi_action); 216 return 0; 217 } 218 219 pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg); 220 for (i = 0; i < n; i++) 221 pr_err("UV: %-8s - %s\n", 222 valid_acts[i].action, valid_acts[i].desc); 223 return -EINVAL; 224 } 225 226 static const struct kernel_param_ops param_ops_action = { 227 .get = param_get_action, 228 .set = param_set_action, 229 }; 230 #define param_check_action(name, p) __param_check(name, p, action_t) 231 232 module_param_named(action, uv_nmi_action, action, 0644); 233 234 static inline bool uv_nmi_action_is(const char *action) 235 { 236 return (strncmp(uv_nmi_action, action, strlen(action)) == 0); 237 } 238 239 /* Setup which NMI support is present in system */ 240 static void uv_nmi_setup_mmrs(void) 241 { 242 if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) { 243 uv_write_local_mmr(UVH_NMI_MMRX_REQ, 244 1UL << UVH_NMI_MMRX_REQ_SHIFT); 245 nmi_mmr = UVH_NMI_MMRX; 246 nmi_mmr_clear = UVH_NMI_MMRX_CLEAR; 247 nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT; 248 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE); 249 } else { 250 nmi_mmr = UVH_NMI_MMR; 251 nmi_mmr_clear = UVH_NMI_MMR_CLEAR; 252 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT; 253 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE); 254 } 255 } 256 257 /* Read NMI MMR and check if NMI flag was set by BMC. */ 258 static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi) 259 { 260 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr); 261 atomic_inc(&hub_nmi->read_mmr_count); 262 return !!(hub_nmi->nmi_value & nmi_mmr_pending); 263 } 264 265 static inline void uv_local_mmr_clear_nmi(void) 266 { 267 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending); 268 } 269 270 /* 271 * UV hubless NMI handler functions 272 */ 273 static inline void uv_reassert_nmi(void) 274 { 275 /* (from arch/x86/include/asm/mach_traps.h) */ 276 outb(0x8f, NMI_CONTROL_PORT); 277 inb(NMI_DUMMY_PORT); /* dummy read */ 278 outb(0x0f, NMI_CONTROL_PORT); 279 inb(NMI_DUMMY_PORT); /* dummy read */ 280 } 281 282 static void uv_init_hubless_pch_io(int offset, int mask, int data) 283 { 284 int *addr = PCH_PCR_GPIO_ADDRESS(offset); 285 int readd = readl(addr); 286 287 if (mask) { /* OR in new data */ 288 int writed = (readd & ~mask) | data; 289 290 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n", 291 addr, readd, ~mask, data, writed); 292 writel(writed, addr); 293 } else if (readd & data) { /* clear status bit */ 294 nmi_debug("UV:PCH: %p = %x\n", addr, data); 295 writel(data, addr); 296 } 297 298 (void)readl(addr); /* flush write data */ 299 } 300 301 static void uv_nmi_setup_hubless_intr(void) 302 { 303 uv_pch_intr_now_enabled = uv_pch_intr_enable; 304 305 uv_init_hubless_pch_io( 306 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI, 307 uv_pch_intr_now_enabled ? GPIROUTNMI : 0); 308 309 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n", 310 uv_pch_intr_now_enabled ? "enabled" : "disabled"); 311 } 312 313 static struct init_nmi { 314 unsigned int offset; 315 unsigned int mask; 316 unsigned int data; 317 } init_nmi[] = { 318 { /* HOSTSW_OWN_GPP_D_0 */ 319 .offset = 0x84, 320 .mask = 0x1, 321 .data = 0x0, /* ACPI Mode */ 322 }, 323 324 /* Clear status: */ 325 { /* GPI_INT_STS_GPP_D_0 */ 326 .offset = 0x104, 327 .mask = 0x0, 328 .data = 0x1, /* Clear Status */ 329 }, 330 { /* GPI_GPE_STS_GPP_D_0 */ 331 .offset = 0x124, 332 .mask = 0x0, 333 .data = 0x1, /* Clear Status */ 334 }, 335 { /* GPI_SMI_STS_GPP_D_0 */ 336 .offset = 0x144, 337 .mask = 0x0, 338 .data = 0x1, /* Clear Status */ 339 }, 340 { /* GPI_NMI_STS_GPP_D_0 */ 341 .offset = 0x164, 342 .mask = 0x0, 343 .data = 0x1, /* Clear Status */ 344 }, 345 346 /* Disable interrupts: */ 347 { /* GPI_INT_EN_GPP_D_0 */ 348 .offset = 0x114, 349 .mask = 0x1, 350 .data = 0x0, /* Disable interrupt generation */ 351 }, 352 { /* GPI_GPE_EN_GPP_D_0 */ 353 .offset = 0x134, 354 .mask = 0x1, 355 .data = 0x0, /* Disable interrupt generation */ 356 }, 357 { /* GPI_SMI_EN_GPP_D_0 */ 358 .offset = 0x154, 359 .mask = 0x1, 360 .data = 0x0, /* Disable interrupt generation */ 361 }, 362 { /* GPI_NMI_EN_GPP_D_0 */ 363 .offset = 0x174, 364 .mask = 0x1, 365 .data = 0x0, /* Disable interrupt generation */ 366 }, 367 368 /* Setup GPP_D_0 Pad Config: */ 369 { /* PAD_CFG_DW0_GPP_D_0 */ 370 .offset = 0x4c0, 371 .mask = 0xffffffff, 372 .data = 0x82020100, 373 /* 374 * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default) 375 * 376 * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly 377 * from RX buffer (default) 378 * 379 * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override 380 * 381 * 26:25 RX Level/Edge Configuration (RXEVCFG): 382 * = 0h # Level 383 * = 1h # Edge 384 * 385 * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high) 386 * 387 * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC): 388 * = 0 # Routing does not cause peripheral IRQ... 389 * # (we want an NMI not an IRQ) 390 * 391 * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI. 392 * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI. 393 * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI. 394 * 395 * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad. 396 * 9 GPIO RX Disable (GPIORXDIS): 397 * = 0 # Enable the input buffer (active low enable) 398 * 399 * 8 GPIO TX Disable (GPIOTXDIS): 400 * = 1 # Disable the output buffer; i.e. Hi-Z 401 * 402 * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state.. 403 * 0 GPIO TX State (GPIOTXSTATE): 404 * = 0 # (Leave at default) 405 */ 406 }, 407 408 /* Pad Config DW1 */ 409 { /* PAD_CFG_DW1_GPP_D_0 */ 410 .offset = 0x4c4, 411 .mask = 0x3c00, 412 .data = 0, /* Termination = none (default) */ 413 }, 414 }; 415 416 static void uv_init_hubless_pch_d0(void) 417 { 418 int i, read; 419 420 read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0); 421 if (read != 0) { 422 pr_info("UV: Hubless NMI already configured\n"); 423 return; 424 } 425 426 nmi_debug("UV: Initializing UV Hubless NMI on PCH\n"); 427 for (i = 0; i < ARRAY_SIZE(init_nmi); i++) { 428 uv_init_hubless_pch_io(init_nmi[i].offset, 429 init_nmi[i].mask, 430 init_nmi[i].data); 431 } 432 } 433 434 static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi) 435 { 436 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0); 437 int status = *pstat; 438 439 hub_nmi->nmi_value = status; 440 atomic_inc(&hub_nmi->read_mmr_count); 441 442 if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */ 443 return 0; 444 445 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */ 446 (void)*pstat; /* Flush write */ 447 448 return 1; 449 } 450 451 static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi) 452 { 453 if (hub_nmi->hub_present) 454 return uv_nmi_test_mmr(hub_nmi); 455 456 if (hub_nmi->pch_owner) /* Only PCH owner can check status */ 457 return uv_nmi_test_hubless(hub_nmi); 458 459 return -1; 460 } 461 462 /* 463 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and 464 * return true. If first CPU in on the system, set global "in_nmi" flag. 465 */ 466 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) 467 { 468 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1); 469 470 if (first) { 471 atomic_set(&hub_nmi->cpu_owner, cpu); 472 if (atomic_add_unless(&uv_in_nmi, 1, 1)) 473 atomic_set(&uv_nmi_cpu, cpu); 474 475 atomic_inc(&hub_nmi->nmi_count); 476 } 477 return first; 478 } 479 480 /* Check if this is a system NMI event */ 481 static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) 482 { 483 int cpu = smp_processor_id(); 484 int nmi = 0; 485 int nmi_detected = 0; 486 487 local64_inc(&uv_nmi_count); 488 this_cpu_inc(uv_cpu_nmi.queries); 489 490 do { 491 nmi = atomic_read(&hub_nmi->in_nmi); 492 if (nmi) 493 break; 494 495 if (raw_spin_trylock(&hub_nmi->nmi_lock)) { 496 nmi_detected = uv_test_nmi(hub_nmi); 497 498 /* Check flag for UV external NMI */ 499 if (nmi_detected > 0) { 500 uv_set_in_nmi(cpu, hub_nmi); 501 nmi = 1; 502 break; 503 } 504 505 /* A non-PCH node in a hubless system waits for NMI */ 506 else if (nmi_detected < 0) 507 goto slave_wait; 508 509 /* MMR/PCH NMI flag is clear */ 510 raw_spin_unlock(&hub_nmi->nmi_lock); 511 512 } else { 513 514 /* Wait a moment for the HUB NMI locker to set flag */ 515 slave_wait: cpu_relax(); 516 udelay(uv_nmi_slave_delay); 517 518 /* Re-check hub in_nmi flag */ 519 nmi = atomic_read(&hub_nmi->in_nmi); 520 if (nmi) 521 break; 522 } 523 524 /* 525 * Check if this BMC missed setting the MMR NMI flag (or) 526 * UV hubless system where only PCH owner can check flag 527 */ 528 if (!nmi) { 529 nmi = atomic_read(&uv_in_nmi); 530 if (nmi) 531 uv_set_in_nmi(cpu, hub_nmi); 532 } 533 534 /* If we're holding the hub lock, release it now */ 535 if (nmi_detected < 0) 536 raw_spin_unlock(&hub_nmi->nmi_lock); 537 538 } while (0); 539 540 if (!nmi) 541 local64_inc(&uv_nmi_misses); 542 543 return nmi; 544 } 545 546 /* Need to reset the NMI MMR register, but only once per hub. */ 547 static inline void uv_clear_nmi(int cpu) 548 { 549 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi; 550 551 if (cpu == atomic_read(&hub_nmi->cpu_owner)) { 552 atomic_set(&hub_nmi->cpu_owner, -1); 553 atomic_set(&hub_nmi->in_nmi, 0); 554 if (hub_nmi->hub_present) 555 uv_local_mmr_clear_nmi(); 556 else 557 uv_reassert_nmi(); 558 raw_spin_unlock(&hub_nmi->nmi_lock); 559 } 560 } 561 562 /* Ping non-responding CPU's attemping to force them into the NMI handler */ 563 static void uv_nmi_nr_cpus_ping(void) 564 { 565 int cpu; 566 567 for_each_cpu(cpu, uv_nmi_cpu_mask) 568 uv_cpu_nmi_per(cpu).pinging = 1; 569 570 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); 571 } 572 573 /* Clean up flags for CPU's that ignored both NMI and ping */ 574 static void uv_nmi_cleanup_mask(void) 575 { 576 int cpu; 577 578 for_each_cpu(cpu, uv_nmi_cpu_mask) { 579 uv_cpu_nmi_per(cpu).pinging = 0; 580 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT; 581 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); 582 } 583 } 584 585 /* Loop waiting as CPU's enter NMI handler */ 586 static int uv_nmi_wait_cpus(int first) 587 { 588 int i, j, k, n = num_online_cpus(); 589 int last_k = 0, waiting = 0; 590 int cpu = smp_processor_id(); 591 592 if (first) { 593 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask); 594 k = 0; 595 } else { 596 k = n - cpumask_weight(uv_nmi_cpu_mask); 597 } 598 599 /* PCH NMI causes only one CPU to respond */ 600 if (first && uv_pch_intr_now_enabled) { 601 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); 602 return n - k - 1; 603 } 604 605 udelay(uv_nmi_initial_delay); 606 for (i = 0; i < uv_nmi_retry_count; i++) { 607 int loop_delay = uv_nmi_loop_delay; 608 609 for_each_cpu(j, uv_nmi_cpu_mask) { 610 if (uv_cpu_nmi_per(j).state) { 611 cpumask_clear_cpu(j, uv_nmi_cpu_mask); 612 if (++k >= n) 613 break; 614 } 615 } 616 if (k >= n) { /* all in? */ 617 k = n; 618 break; 619 } 620 if (last_k != k) { /* abort if no new CPU's coming in */ 621 last_k = k; 622 waiting = 0; 623 } else if (++waiting > uv_nmi_wait_count) 624 break; 625 626 /* Extend delay if waiting only for CPU 0: */ 627 if (waiting && (n - k) == 1 && 628 cpumask_test_cpu(0, uv_nmi_cpu_mask)) 629 loop_delay *= 100; 630 631 udelay(loop_delay); 632 } 633 atomic_set(&uv_nmi_cpus_in_nmi, k); 634 return n - k; 635 } 636 637 /* Wait until all slave CPU's have entered UV NMI handler */ 638 static void uv_nmi_wait(int master) 639 { 640 /* Indicate this CPU is in: */ 641 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); 642 643 /* If not the first CPU in (the master), then we are a slave CPU */ 644 if (!master) 645 return; 646 647 do { 648 /* Wait for all other CPU's to gather here */ 649 if (!uv_nmi_wait_cpus(1)) 650 break; 651 652 /* If not all made it in, send IPI NMI to them */ 653 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n", 654 cpumask_weight(uv_nmi_cpu_mask), 655 cpumask_pr_args(uv_nmi_cpu_mask)); 656 657 uv_nmi_nr_cpus_ping(); 658 659 /* If all CPU's are in, then done */ 660 if (!uv_nmi_wait_cpus(0)) 661 break; 662 663 pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n", 664 cpumask_weight(uv_nmi_cpu_mask), 665 cpumask_pr_args(uv_nmi_cpu_mask)); 666 } while (0); 667 668 pr_alert("UV: %d of %d CPUs in NMI\n", 669 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus()); 670 } 671 672 /* Dump Instruction Pointer header */ 673 static void uv_nmi_dump_cpu_ip_hdr(void) 674 { 675 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n", 676 "CPU", "PID", "COMMAND", "IP"); 677 } 678 679 /* Dump Instruction Pointer info */ 680 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs) 681 { 682 pr_info("UV: %4d %6d %-32.32s %pS", 683 cpu, current->pid, current->comm, (void *)regs->ip); 684 } 685 686 /* 687 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec 688 * failed, then we provide "dump" as an alternate action. Action "dump" now 689 * also includes the show "ips" (instruction pointers) action whereas the 690 * action "ips" only displays instruction pointers for the non-idle CPU's. 691 * This is an abbreviated form of the "ps" command. 692 */ 693 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) 694 { 695 const char *dots = " ................................. "; 696 697 if (cpu == 0) 698 uv_nmi_dump_cpu_ip_hdr(); 699 700 if (current->pid != 0 || !uv_nmi_action_is("ips")) 701 uv_nmi_dump_cpu_ip(cpu, regs); 702 703 if (uv_nmi_action_is("dump")) { 704 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu); 705 show_regs(regs); 706 } 707 708 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); 709 } 710 711 /* Trigger a slave CPU to dump it's state */ 712 static void uv_nmi_trigger_dump(int cpu) 713 { 714 int retry = uv_nmi_trigger_delay; 715 716 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN) 717 return; 718 719 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP; 720 do { 721 cpu_relax(); 722 udelay(10); 723 if (uv_cpu_nmi_per(cpu).state 724 != UV_NMI_STATE_DUMP) 725 return; 726 } while (--retry > 0); 727 728 pr_crit("UV: CPU %d stuck in process dump function\n", cpu); 729 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; 730 } 731 732 /* Wait until all CPU's ready to exit */ 733 static void uv_nmi_sync_exit(int master) 734 { 735 atomic_dec(&uv_nmi_cpus_in_nmi); 736 if (master) { 737 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0) 738 cpu_relax(); 739 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR); 740 } else { 741 while (atomic_read(&uv_nmi_slave_continue)) 742 cpu_relax(); 743 } 744 } 745 746 /* Current "health" check is to check which CPU's are responsive */ 747 static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master) 748 { 749 if (master) { 750 int in = atomic_read(&uv_nmi_cpus_in_nmi); 751 int out = num_online_cpus() - in; 752 753 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out); 754 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT); 755 } else { 756 while (!atomic_read(&uv_nmi_slave_continue)) 757 cpu_relax(); 758 } 759 uv_nmi_sync_exit(master); 760 } 761 762 /* Walk through CPU list and dump state of each */ 763 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) 764 { 765 if (master) { 766 int tcpu; 767 int ignored = 0; 768 int saved_console_loglevel = console_loglevel; 769 770 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n", 771 uv_nmi_action_is("ips") ? "IPs" : "processes", 772 atomic_read(&uv_nmi_cpus_in_nmi), cpu); 773 774 console_loglevel = uv_nmi_loglevel; 775 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT); 776 for_each_online_cpu(tcpu) { 777 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask)) 778 ignored++; 779 else if (tcpu == cpu) 780 uv_nmi_dump_state_cpu(tcpu, regs); 781 else 782 uv_nmi_trigger_dump(tcpu); 783 } 784 if (ignored) 785 pr_alert("UV: %d CPUs ignored NMI\n", ignored); 786 787 console_loglevel = saved_console_loglevel; 788 pr_alert("UV: process trace complete\n"); 789 } else { 790 while (!atomic_read(&uv_nmi_slave_continue)) 791 cpu_relax(); 792 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP) 793 cpu_relax(); 794 uv_nmi_dump_state_cpu(cpu, regs); 795 } 796 uv_nmi_sync_exit(master); 797 } 798 799 static void uv_nmi_touch_watchdogs(void) 800 { 801 touch_softlockup_watchdog_sync(); 802 clocksource_touch_watchdog(); 803 rcu_cpu_stall_reset(); 804 touch_nmi_watchdog(); 805 } 806 807 static atomic_t uv_nmi_kexec_failed; 808 809 #if defined(CONFIG_KEXEC_CORE) 810 static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) 811 { 812 /* Call crash to dump system state */ 813 if (master) { 814 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu); 815 crash_kexec(regs); 816 817 pr_emerg("UV: crash_kexec unexpectedly returned, "); 818 atomic_set(&uv_nmi_kexec_failed, 1); 819 if (!kexec_crash_image) { 820 pr_cont("crash kernel not loaded\n"); 821 return; 822 } 823 pr_cont("kexec busy, stalling cpus while waiting\n"); 824 } 825 826 /* If crash exec fails the slaves should return, otherwise stall */ 827 while (atomic_read(&uv_nmi_kexec_failed) == 0) 828 mdelay(10); 829 } 830 831 #else /* !CONFIG_KEXEC_CORE */ 832 static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) 833 { 834 if (master) 835 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n"); 836 atomic_set(&uv_nmi_kexec_failed, 1); 837 } 838 #endif /* !CONFIG_KEXEC_CORE */ 839 840 #ifdef CONFIG_KGDB 841 #ifdef CONFIG_KGDB_KDB 842 static inline int uv_nmi_kdb_reason(void) 843 { 844 return KDB_REASON_SYSTEM_NMI; 845 } 846 #else /* !CONFIG_KGDB_KDB */ 847 static inline int uv_nmi_kdb_reason(void) 848 { 849 /* Ensure user is expecting to attach gdb remote */ 850 if (uv_nmi_action_is("kgdb")) 851 return 0; 852 853 pr_err("UV: NMI error: KDB is not enabled in this kernel\n"); 854 return -1; 855 } 856 #endif /* CONFIG_KGDB_KDB */ 857 858 /* 859 * Call KGDB/KDB from NMI handler 860 * 861 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or 862 * 'kdb' has no affect on which is used. See the KGDB documention for further 863 * information. 864 */ 865 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) 866 { 867 if (master) { 868 int reason = uv_nmi_kdb_reason(); 869 int ret; 870 871 if (reason < 0) 872 return; 873 874 /* Call KGDB NMI handler as MASTER */ 875 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, 876 &uv_nmi_slave_continue); 877 if (ret) { 878 pr_alert("KGDB returned error, is kgdboc set?\n"); 879 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT); 880 } 881 } else { 882 /* Wait for KGDB signal that it's ready for slaves to enter */ 883 int sig; 884 885 do { 886 cpu_relax(); 887 sig = atomic_read(&uv_nmi_slave_continue); 888 } while (!sig); 889 890 /* Call KGDB as slave */ 891 if (sig == SLAVE_CONTINUE) 892 kgdb_nmicallback(cpu, regs); 893 } 894 uv_nmi_sync_exit(master); 895 } 896 897 #else /* !CONFIG_KGDB */ 898 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) 899 { 900 pr_err("UV: NMI error: KGDB is not enabled in this kernel\n"); 901 } 902 #endif /* !CONFIG_KGDB */ 903 904 /* 905 * UV NMI handler 906 */ 907 int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) 908 { 909 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi; 910 int cpu = smp_processor_id(); 911 int master = 0; 912 unsigned long flags; 913 914 local_irq_save(flags); 915 916 /* If not a UV System NMI, ignore */ 917 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) { 918 local_irq_restore(flags); 919 return NMI_DONE; 920 } 921 922 /* Indicate we are the first CPU into the NMI handler */ 923 master = (atomic_read(&uv_nmi_cpu) == cpu); 924 925 /* If NMI action is "kdump", then attempt to do it */ 926 if (uv_nmi_action_is("kdump")) { 927 uv_nmi_kdump(cpu, master, regs); 928 929 /* Unexpected return, revert action to "dump" */ 930 if (master) 931 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action)); 932 } 933 934 /* Pause as all CPU's enter the NMI handler */ 935 uv_nmi_wait(master); 936 937 /* Process actions other than "kdump": */ 938 if (uv_nmi_action_is("health")) { 939 uv_nmi_action_health(cpu, regs, master); 940 } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) { 941 uv_nmi_dump_state(cpu, regs, master); 942 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) { 943 uv_call_kgdb_kdb(cpu, regs, master); 944 } else { 945 if (master) 946 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action); 947 uv_nmi_sync_exit(master); 948 } 949 950 /* Clear per_cpu "in_nmi" flag */ 951 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT); 952 953 /* Clear MMR NMI flag on each hub */ 954 uv_clear_nmi(cpu); 955 956 /* Clear global flags */ 957 if (master) { 958 if (cpumask_weight(uv_nmi_cpu_mask)) 959 uv_nmi_cleanup_mask(); 960 atomic_set(&uv_nmi_cpus_in_nmi, -1); 961 atomic_set(&uv_nmi_cpu, -1); 962 atomic_set(&uv_in_nmi, 0); 963 atomic_set(&uv_nmi_kexec_failed, 0); 964 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR); 965 } 966 967 uv_nmi_touch_watchdogs(); 968 local_irq_restore(flags); 969 970 return NMI_HANDLED; 971 } 972 973 /* 974 * NMI handler for pulling in CPU's when perf events are grabbing our NMI 975 */ 976 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) 977 { 978 int ret; 979 980 this_cpu_inc(uv_cpu_nmi.queries); 981 if (!this_cpu_read(uv_cpu_nmi.pinging)) { 982 local64_inc(&uv_nmi_ping_misses); 983 return NMI_DONE; 984 } 985 986 this_cpu_inc(uv_cpu_nmi.pings); 987 local64_inc(&uv_nmi_ping_count); 988 ret = uv_handle_nmi(reason, regs); 989 this_cpu_write(uv_cpu_nmi.pinging, 0); 990 return ret; 991 } 992 993 static void uv_register_nmi_notifier(void) 994 { 995 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv")) 996 pr_warn("UV: NMI handler failed to register\n"); 997 998 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping")) 999 pr_warn("UV: PING NMI handler failed to register\n"); 1000 } 1001 1002 void uv_nmi_init(void) 1003 { 1004 unsigned int value; 1005 1006 /* 1007 * Unmask NMI on all CPU's 1008 */ 1009 value = apic_read(APIC_LVT1) | APIC_DM_NMI; 1010 value &= ~APIC_LVT_MASKED; 1011 apic_write(APIC_LVT1, value); 1012 } 1013 1014 /* Setup HUB NMI info */ 1015 void __init uv_nmi_setup_common(bool hubbed) 1016 { 1017 int size = sizeof(void *) * (1 << NODES_SHIFT); 1018 int cpu; 1019 1020 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL); 1021 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size); 1022 BUG_ON(!uv_hub_nmi_list); 1023 size = sizeof(struct uv_hub_nmi_s); 1024 for_each_present_cpu(cpu) { 1025 int nid = cpu_to_node(cpu); 1026 if (uv_hub_nmi_list[nid] == NULL) { 1027 uv_hub_nmi_list[nid] = kzalloc_node(size, 1028 GFP_KERNEL, nid); 1029 BUG_ON(!uv_hub_nmi_list[nid]); 1030 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock)); 1031 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1); 1032 uv_hub_nmi_list[nid]->hub_present = hubbed; 1033 uv_hub_nmi_list[nid]->pch_owner = (nid == 0); 1034 } 1035 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid]; 1036 } 1037 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL)); 1038 } 1039 1040 /* Setup for UV Hub systems */ 1041 void __init uv_nmi_setup(void) 1042 { 1043 uv_nmi_setup_mmrs(); 1044 uv_nmi_setup_common(true); 1045 uv_register_nmi_notifier(); 1046 pr_info("UV: Hub NMI enabled\n"); 1047 } 1048 1049 /* Setup for UV Hubless systems */ 1050 void __init uv_nmi_setup_hubless(void) 1051 { 1052 uv_nmi_setup_common(false); 1053 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE); 1054 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n", 1055 pch_base, PCH_PCR_GPIO_1_BASE); 1056 if (uv_pch_init_enable) 1057 uv_init_hubless_pch_d0(); 1058 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0, 1059 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK); 1060 uv_nmi_setup_hubless_intr(); 1061 /* Ensure NMI enabled in Processor Interface Reg: */ 1062 uv_reassert_nmi(); 1063 uv_register_nmi_notifier(); 1064 pr_info("UV: Hubless NMI enabled\n"); 1065 } 1066