1 /* 2 * File: mca.c 3 * Purpose: Generic MCA handling layer 4 * 5 * Updated for latest kernel 6 * Copyright (C) 2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 * 9 * Copyright (C) 2002 Dell Inc. 10 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com) 11 * 12 * Copyright (C) 2002 Intel 13 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com) 14 * 15 * Copyright (C) 2001 Intel 16 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com) 17 * 18 * Copyright (C) 2000 Intel 19 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com) 20 * 21 * Copyright (C) 1999, 2004 Silicon Graphics, Inc. 22 * Copyright (C) Vijay Chander(vijay@engr.sgi.com) 23 * 24 * 03/04/15 D. Mosberger Added INIT backtrace support. 25 * 02/03/25 M. Domsch GUID cleanups 26 * 27 * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU 28 * error flag, set SAL default return values, changed 29 * error record structure to linked list, added init call 30 * to sal_get_state_info_size(). 31 * 32 * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected 33 * platform errors, completed code for logging of 34 * corrected & uncorrected machine check errors, and 35 * updated for conformance with Nov. 2000 revision of the 36 * SAL 3.0 spec. 37 * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues, 38 * added min save state dump, added INIT handler. 39 * 40 * 2003-12-08 Keith Owens <kaos@sgi.com> 41 * smp_call_function() must not be called from interrupt context (can 42 * deadlock on tasklist_lock). Use keventd to call smp_call_function(). 43 * 44 * 2004-02-01 Keith Owens <kaos@sgi.com> 45 * Avoid deadlock when using printk() for MCA and INIT records. 46 * Delete all record printing code, moved to salinfo_decode in user space. 47 * Mark variables and functions static where possible. 48 * Delete dead variables and functions. 49 * Reorder to remove the need for forward declarations and to consolidate 50 * related code. 51 */ 52 #include <linux/config.h> 53 #include <linux/types.h> 54 #include <linux/init.h> 55 #include <linux/sched.h> 56 #include <linux/interrupt.h> 57 #include <linux/irq.h> 58 #include <linux/kallsyms.h> 59 #include <linux/smp_lock.h> 60 #include <linux/bootmem.h> 61 #include <linux/acpi.h> 62 #include <linux/timer.h> 63 #include <linux/module.h> 64 #include <linux/kernel.h> 65 #include <linux/smp.h> 66 #include <linux/workqueue.h> 67 68 #include <asm/delay.h> 69 #include <asm/machvec.h> 70 #include <asm/meminit.h> 71 #include <asm/page.h> 72 #include <asm/ptrace.h> 73 #include <asm/system.h> 74 #include <asm/sal.h> 75 #include <asm/mca.h> 76 77 #include <asm/irq.h> 78 #include <asm/hw_irq.h> 79 80 #if defined(IA64_MCA_DEBUG_INFO) 81 # define IA64_MCA_DEBUG(fmt...) printk(fmt) 82 #else 83 # define IA64_MCA_DEBUG(fmt...) 84 #endif 85 86 /* Used by mca_asm.S */ 87 ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state; 88 ia64_mca_os_to_sal_state_t ia64_os_to_sal_handoff_state; 89 u64 ia64_mca_serialize; 90 DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ 91 DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ 92 DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ 93 DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ 94 95 unsigned long __per_cpu_mca[NR_CPUS]; 96 97 /* In mca_asm.S */ 98 extern void ia64_monarch_init_handler (void); 99 extern void ia64_slave_init_handler (void); 100 101 static ia64_mc_info_t ia64_mc_info; 102 103 #define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */ 104 #define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */ 105 #define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */ 106 #define CPE_HISTORY_LENGTH 5 107 #define CMC_HISTORY_LENGTH 5 108 109 static struct timer_list cpe_poll_timer; 110 static struct timer_list cmc_poll_timer; 111 /* 112 * This variable tells whether we are currently in polling mode. 113 * Start with this in the wrong state so we won't play w/ timers 114 * before the system is ready. 115 */ 116 static int cmc_polling_enabled = 1; 117 118 /* 119 * Clearing this variable prevents CPE polling from getting activated 120 * in mca_late_init. Use it if your system doesn't provide a CPEI, 121 * but encounters problems retrieving CPE logs. This should only be 122 * necessary for debugging. 123 */ 124 static int cpe_poll_enabled = 1; 125 126 extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); 127 128 static int mca_init; 129 130 /* 131 * IA64_MCA log support 132 */ 133 #define IA64_MAX_LOGS 2 /* Double-buffering for nested MCAs */ 134 #define IA64_MAX_LOG_TYPES 4 /* MCA, INIT, CMC, CPE */ 135 136 typedef struct ia64_state_log_s 137 { 138 spinlock_t isl_lock; 139 int isl_index; 140 unsigned long isl_count; 141 ia64_err_rec_t *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */ 142 } ia64_state_log_t; 143 144 static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; 145 146 #define IA64_LOG_ALLOCATE(it, size) \ 147 {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ 148 (ia64_err_rec_t *)alloc_bootmem(size); \ 149 ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ 150 (ia64_err_rec_t *)alloc_bootmem(size);} 151 #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) 152 #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) 153 #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) 154 #define IA64_LOG_NEXT_INDEX(it) ia64_state_log[it].isl_index 155 #define IA64_LOG_CURR_INDEX(it) 1 - ia64_state_log[it].isl_index 156 #define IA64_LOG_INDEX_INC(it) \ 157 {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \ 158 ia64_state_log[it].isl_count++;} 159 #define IA64_LOG_INDEX_DEC(it) \ 160 ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index 161 #define IA64_LOG_NEXT_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])) 162 #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) 163 #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count 164 165 /* 166 * ia64_log_init 167 * Reset the OS ia64 log buffer 168 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 169 * Outputs : None 170 */ 171 static void 172 ia64_log_init(int sal_info_type) 173 { 174 u64 max_size = 0; 175 176 IA64_LOG_NEXT_INDEX(sal_info_type) = 0; 177 IA64_LOG_LOCK_INIT(sal_info_type); 178 179 // SAL will tell us the maximum size of any error record of this type 180 max_size = ia64_sal_get_state_info_size(sal_info_type); 181 if (!max_size) 182 /* alloc_bootmem() doesn't like zero-sized allocations! */ 183 return; 184 185 // set up OS data structures to hold error info 186 IA64_LOG_ALLOCATE(sal_info_type, max_size); 187 memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size); 188 memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size); 189 } 190 191 /* 192 * ia64_log_get 193 * 194 * Get the current MCA log from SAL and copy it into the OS log buffer. 195 * 196 * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) 197 * irq_safe whether you can use printk at this point 198 * Outputs : size (total record length) 199 * *buffer (ptr to error record) 200 * 201 */ 202 static u64 203 ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) 204 { 205 sal_log_record_header_t *log_buffer; 206 u64 total_len = 0; 207 int s; 208 209 IA64_LOG_LOCK(sal_info_type); 210 211 /* Get the process state information */ 212 log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type); 213 214 total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer); 215 216 if (total_len) { 217 IA64_LOG_INDEX_INC(sal_info_type); 218 IA64_LOG_UNLOCK(sal_info_type); 219 if (irq_safe) { 220 IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " 221 "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); 222 } 223 *buffer = (u8 *) log_buffer; 224 return total_len; 225 } else { 226 IA64_LOG_UNLOCK(sal_info_type); 227 return 0; 228 } 229 } 230 231 /* 232 * ia64_mca_log_sal_error_record 233 * 234 * This function retrieves a specified error record type from SAL 235 * and wakes up any processes waiting for error records. 236 * 237 * Inputs : sal_info_type (Type of error record MCA/CMC/CPE/INIT) 238 */ 239 static void 240 ia64_mca_log_sal_error_record(int sal_info_type) 241 { 242 u8 *buffer; 243 sal_log_record_header_t *rh; 244 u64 size; 245 int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT; 246 #ifdef IA64_MCA_DEBUG_INFO 247 static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" }; 248 #endif 249 250 size = ia64_log_get(sal_info_type, &buffer, irq_safe); 251 if (!size) 252 return; 253 254 salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe); 255 256 if (irq_safe) 257 IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n", 258 smp_processor_id(), 259 sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN"); 260 261 /* Clear logs from corrected errors in case there's no user-level logger */ 262 rh = (sal_log_record_header_t *)buffer; 263 if (rh->severity == sal_log_severity_corrected) 264 ia64_sal_clear_state_info(sal_info_type); 265 } 266 267 /* 268 * platform dependent error handling 269 */ 270 #ifndef PLATFORM_MCA_HANDLERS 271 272 #ifdef CONFIG_ACPI 273 274 static int cpe_vector = -1; 275 276 static irqreturn_t 277 ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) 278 { 279 static unsigned long cpe_history[CPE_HISTORY_LENGTH]; 280 static int index; 281 static DEFINE_SPINLOCK(cpe_history_lock); 282 283 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 284 __FUNCTION__, cpe_irq, smp_processor_id()); 285 286 /* SAL spec states this should run w/ interrupts enabled */ 287 local_irq_enable(); 288 289 /* Get the CPE error record and log it */ 290 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE); 291 292 spin_lock(&cpe_history_lock); 293 if (!cpe_poll_enabled && cpe_vector >= 0) { 294 295 int i, count = 1; /* we know 1 happened now */ 296 unsigned long now = jiffies; 297 298 for (i = 0; i < CPE_HISTORY_LENGTH; i++) { 299 if (now - cpe_history[i] <= HZ) 300 count++; 301 } 302 303 IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH); 304 if (count >= CPE_HISTORY_LENGTH) { 305 306 cpe_poll_enabled = 1; 307 spin_unlock(&cpe_history_lock); 308 disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR)); 309 310 /* 311 * Corrected errors will still be corrected, but 312 * make sure there's a log somewhere that indicates 313 * something is generating more than we can handle. 314 */ 315 printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n"); 316 317 mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL); 318 319 /* lock already released, get out now */ 320 return IRQ_HANDLED; 321 } else { 322 cpe_history[index++] = now; 323 if (index == CPE_HISTORY_LENGTH) 324 index = 0; 325 } 326 } 327 spin_unlock(&cpe_history_lock); 328 return IRQ_HANDLED; 329 } 330 331 #endif /* CONFIG_ACPI */ 332 333 static void 334 show_min_state (pal_min_state_area_t *minstate) 335 { 336 u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri; 337 u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri; 338 339 printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits); 340 printk("pr\t\t%016lx\n", minstate->pmsa_pr); 341 printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0); 342 printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc); 343 printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip); 344 printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr); 345 printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs); 346 printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip); 347 printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr); 348 printk("xfs\t\t%016lx\n", minstate->pmsa_xfs); 349 printk("b1\t\t%016lx ", minstate->pmsa_br1); 350 print_symbol("%s\n", minstate->pmsa_br1); 351 352 printk("\nstatic registers r0-r15:\n"); 353 printk(" r0- 3 %016lx %016lx %016lx %016lx\n", 354 0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]); 355 printk(" r4- 7 %016lx %016lx %016lx %016lx\n", 356 minstate->pmsa_gr[3], minstate->pmsa_gr[4], 357 minstate->pmsa_gr[5], minstate->pmsa_gr[6]); 358 printk(" r8-11 %016lx %016lx %016lx %016lx\n", 359 minstate->pmsa_gr[7], minstate->pmsa_gr[8], 360 minstate->pmsa_gr[9], minstate->pmsa_gr[10]); 361 printk("r12-15 %016lx %016lx %016lx %016lx\n", 362 minstate->pmsa_gr[11], minstate->pmsa_gr[12], 363 minstate->pmsa_gr[13], minstate->pmsa_gr[14]); 364 365 printk("\nbank 0:\n"); 366 printk("r16-19 %016lx %016lx %016lx %016lx\n", 367 minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1], 368 minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]); 369 printk("r20-23 %016lx %016lx %016lx %016lx\n", 370 minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5], 371 minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]); 372 printk("r24-27 %016lx %016lx %016lx %016lx\n", 373 minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9], 374 minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]); 375 printk("r28-31 %016lx %016lx %016lx %016lx\n", 376 minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13], 377 minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]); 378 379 printk("\nbank 1:\n"); 380 printk("r16-19 %016lx %016lx %016lx %016lx\n", 381 minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1], 382 minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]); 383 printk("r20-23 %016lx %016lx %016lx %016lx\n", 384 minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5], 385 minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]); 386 printk("r24-27 %016lx %016lx %016lx %016lx\n", 387 minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9], 388 minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]); 389 printk("r28-31 %016lx %016lx %016lx %016lx\n", 390 minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13], 391 minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]); 392 } 393 394 static void 395 fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw) 396 { 397 u64 *dst_banked, *src_banked, bit, shift, nat_bits; 398 int i; 399 400 /* 401 * First, update the pt-regs and switch-stack structures with the contents stored 402 * in the min-state area: 403 */ 404 if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) { 405 pt->cr_ipsr = ms->pmsa_xpsr; 406 pt->cr_iip = ms->pmsa_xip; 407 pt->cr_ifs = ms->pmsa_xfs; 408 } else { 409 pt->cr_ipsr = ms->pmsa_ipsr; 410 pt->cr_iip = ms->pmsa_iip; 411 pt->cr_ifs = ms->pmsa_ifs; 412 } 413 pt->ar_rsc = ms->pmsa_rsc; 414 pt->pr = ms->pmsa_pr; 415 pt->r1 = ms->pmsa_gr[0]; 416 pt->r2 = ms->pmsa_gr[1]; 417 pt->r3 = ms->pmsa_gr[2]; 418 sw->r4 = ms->pmsa_gr[3]; 419 sw->r5 = ms->pmsa_gr[4]; 420 sw->r6 = ms->pmsa_gr[5]; 421 sw->r7 = ms->pmsa_gr[6]; 422 pt->r8 = ms->pmsa_gr[7]; 423 pt->r9 = ms->pmsa_gr[8]; 424 pt->r10 = ms->pmsa_gr[9]; 425 pt->r11 = ms->pmsa_gr[10]; 426 pt->r12 = ms->pmsa_gr[11]; 427 pt->r13 = ms->pmsa_gr[12]; 428 pt->r14 = ms->pmsa_gr[13]; 429 pt->r15 = ms->pmsa_gr[14]; 430 dst_banked = &pt->r16; /* r16-r31 are contiguous in struct pt_regs */ 431 src_banked = ms->pmsa_bank1_gr; 432 for (i = 0; i < 16; ++i) 433 dst_banked[i] = src_banked[i]; 434 pt->b0 = ms->pmsa_br0; 435 sw->b1 = ms->pmsa_br1; 436 437 /* construct the NaT bits for the pt-regs structure: */ 438 # define PUT_NAT_BIT(dst, addr) \ 439 do { \ 440 bit = nat_bits & 1; nat_bits >>= 1; \ 441 shift = ((unsigned long) addr >> 3) & 0x3f; \ 442 dst = ((dst) & ~(1UL << shift)) | (bit << shift); \ 443 } while (0) 444 445 /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */ 446 shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f; 447 nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift)); 448 449 PUT_NAT_BIT(sw->caller_unat, &pt->r1); 450 PUT_NAT_BIT(sw->caller_unat, &pt->r2); 451 PUT_NAT_BIT(sw->caller_unat, &pt->r3); 452 PUT_NAT_BIT(sw->ar_unat, &sw->r4); 453 PUT_NAT_BIT(sw->ar_unat, &sw->r5); 454 PUT_NAT_BIT(sw->ar_unat, &sw->r6); 455 PUT_NAT_BIT(sw->ar_unat, &sw->r7); 456 PUT_NAT_BIT(sw->caller_unat, &pt->r8); PUT_NAT_BIT(sw->caller_unat, &pt->r9); 457 PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11); 458 PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13); 459 PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15); 460 nat_bits >>= 16; /* skip over bank0 NaT bits */ 461 PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17); 462 PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19); 463 PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21); 464 PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23); 465 PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25); 466 PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27); 467 PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29); 468 PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31); 469 } 470 471 static void 472 init_handler_platform (pal_min_state_area_t *ms, 473 struct pt_regs *pt, struct switch_stack *sw) 474 { 475 struct unw_frame_info info; 476 477 /* if a kernel debugger is available call it here else just dump the registers */ 478 479 /* 480 * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be 481 * generated via the BMC's command-line interface, but since the console is on the 482 * same serial line, the user will need some time to switch out of the BMC before 483 * the dump begins. 484 */ 485 printk("Delaying for 5 seconds...\n"); 486 udelay(5*1000000); 487 show_min_state(ms); 488 489 printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm); 490 fetch_min_state(ms, pt, sw); 491 unw_init_from_interruption(&info, current, pt, sw); 492 ia64_do_show_stack(&info, NULL); 493 494 #ifdef CONFIG_SMP 495 /* read_trylock() would be handy... */ 496 if (!tasklist_lock.write_lock) 497 read_lock(&tasklist_lock); 498 #endif 499 { 500 struct task_struct *g, *t; 501 do_each_thread (g, t) { 502 if (t == current) 503 continue; 504 505 printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm); 506 show_stack(t, NULL); 507 } while_each_thread (g, t); 508 } 509 #ifdef CONFIG_SMP 510 if (!tasklist_lock.write_lock) 511 read_unlock(&tasklist_lock); 512 #endif 513 514 printk("\nINIT dump complete. Please reboot now.\n"); 515 while (1); /* hang city if no debugger */ 516 } 517 518 #ifdef CONFIG_ACPI 519 /* 520 * ia64_mca_register_cpev 521 * 522 * Register the corrected platform error vector with SAL. 523 * 524 * Inputs 525 * cpev Corrected Platform Error Vector number 526 * 527 * Outputs 528 * None 529 */ 530 static void 531 ia64_mca_register_cpev (int cpev) 532 { 533 /* Register the CPE interrupt vector with SAL */ 534 struct ia64_sal_retval isrv; 535 536 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0); 537 if (isrv.status) { 538 printk(KERN_ERR "Failed to register Corrected Platform " 539 "Error interrupt vector with SAL (status %ld)\n", isrv.status); 540 return; 541 } 542 543 IA64_MCA_DEBUG("%s: corrected platform error " 544 "vector %#x registered\n", __FUNCTION__, cpev); 545 } 546 #endif /* CONFIG_ACPI */ 547 548 #endif /* PLATFORM_MCA_HANDLERS */ 549 550 /* 551 * ia64_mca_cmc_vector_setup 552 * 553 * Setup the corrected machine check vector register in the processor. 554 * (The interrupt is masked on boot. ia64_mca_late_init unmask this.) 555 * This function is invoked on a per-processor basis. 556 * 557 * Inputs 558 * None 559 * 560 * Outputs 561 * None 562 */ 563 void 564 ia64_mca_cmc_vector_setup (void) 565 { 566 cmcv_reg_t cmcv; 567 568 cmcv.cmcv_regval = 0; 569 cmcv.cmcv_mask = 1; /* Mask/disable interrupt at first */ 570 cmcv.cmcv_vector = IA64_CMC_VECTOR; 571 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 572 573 IA64_MCA_DEBUG("%s: CPU %d corrected " 574 "machine check vector %#x registered.\n", 575 __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR); 576 577 IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", 578 __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); 579 } 580 581 /* 582 * ia64_mca_cmc_vector_disable 583 * 584 * Mask the corrected machine check vector register in the processor. 585 * This function is invoked on a per-processor basis. 586 * 587 * Inputs 588 * dummy(unused) 589 * 590 * Outputs 591 * None 592 */ 593 static void 594 ia64_mca_cmc_vector_disable (void *dummy) 595 { 596 cmcv_reg_t cmcv; 597 598 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); 599 600 cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ 601 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 602 603 IA64_MCA_DEBUG("%s: CPU %d corrected " 604 "machine check vector %#x disabled.\n", 605 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector); 606 } 607 608 /* 609 * ia64_mca_cmc_vector_enable 610 * 611 * Unmask the corrected machine check vector register in the processor. 612 * This function is invoked on a per-processor basis. 613 * 614 * Inputs 615 * dummy(unused) 616 * 617 * Outputs 618 * None 619 */ 620 static void 621 ia64_mca_cmc_vector_enable (void *dummy) 622 { 623 cmcv_reg_t cmcv; 624 625 cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV); 626 627 cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ 628 ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); 629 630 IA64_MCA_DEBUG("%s: CPU %d corrected " 631 "machine check vector %#x enabled.\n", 632 __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector); 633 } 634 635 /* 636 * ia64_mca_cmc_vector_disable_keventd 637 * 638 * Called via keventd (smp_call_function() is not safe in interrupt context) to 639 * disable the cmc interrupt vector. 640 */ 641 static void 642 ia64_mca_cmc_vector_disable_keventd(void *unused) 643 { 644 on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); 645 } 646 647 /* 648 * ia64_mca_cmc_vector_enable_keventd 649 * 650 * Called via keventd (smp_call_function() is not safe in interrupt context) to 651 * enable the cmc interrupt vector. 652 */ 653 static void 654 ia64_mca_cmc_vector_enable_keventd(void *unused) 655 { 656 on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); 657 } 658 659 /* 660 * ia64_mca_wakeup_ipi_wait 661 * 662 * Wait for the inter-cpu interrupt to be sent by the 663 * monarch processor once it is done with handling the 664 * MCA. 665 * 666 * Inputs : None 667 * Outputs : None 668 */ 669 static void 670 ia64_mca_wakeup_ipi_wait(void) 671 { 672 int irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6); 673 int irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f); 674 u64 irr = 0; 675 676 do { 677 switch(irr_num) { 678 case 0: 679 irr = ia64_getreg(_IA64_REG_CR_IRR0); 680 break; 681 case 1: 682 irr = ia64_getreg(_IA64_REG_CR_IRR1); 683 break; 684 case 2: 685 irr = ia64_getreg(_IA64_REG_CR_IRR2); 686 break; 687 case 3: 688 irr = ia64_getreg(_IA64_REG_CR_IRR3); 689 break; 690 } 691 cpu_relax(); 692 } while (!(irr & (1UL << irr_bit))) ; 693 } 694 695 /* 696 * ia64_mca_wakeup 697 * 698 * Send an inter-cpu interrupt to wake-up a particular cpu 699 * and mark that cpu to be out of rendez. 700 * 701 * Inputs : cpuid 702 * Outputs : None 703 */ 704 static void 705 ia64_mca_wakeup(int cpu) 706 { 707 platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0); 708 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 709 710 } 711 712 /* 713 * ia64_mca_wakeup_all 714 * 715 * Wakeup all the cpus which have rendez'ed previously. 716 * 717 * Inputs : None 718 * Outputs : None 719 */ 720 static void 721 ia64_mca_wakeup_all(void) 722 { 723 int cpu; 724 725 /* Clear the Rendez checkin flag for all cpus */ 726 for(cpu = 0; cpu < NR_CPUS; cpu++) { 727 if (!cpu_online(cpu)) 728 continue; 729 if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE) 730 ia64_mca_wakeup(cpu); 731 } 732 733 } 734 735 /* 736 * ia64_mca_rendez_interrupt_handler 737 * 738 * This is handler used to put slave processors into spinloop 739 * while the monarch processor does the mca handling and later 740 * wake each slave up once the monarch is done. 741 * 742 * Inputs : None 743 * Outputs : None 744 */ 745 static irqreturn_t 746 ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs) 747 { 748 unsigned long flags; 749 int cpu = smp_processor_id(); 750 751 /* Mask all interrupts */ 752 local_irq_save(flags); 753 754 ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; 755 /* Register with the SAL monarch that the slave has 756 * reached SAL 757 */ 758 ia64_sal_mc_rendez(); 759 760 /* Wait for the wakeup IPI from the monarch 761 * This waiting is done by polling on the wakeup-interrupt 762 * vector bit in the processor's IRRs 763 */ 764 ia64_mca_wakeup_ipi_wait(); 765 766 /* Enable all interrupts */ 767 local_irq_restore(flags); 768 return IRQ_HANDLED; 769 } 770 771 /* 772 * ia64_mca_wakeup_int_handler 773 * 774 * The interrupt handler for processing the inter-cpu interrupt to the 775 * slave cpu which was spinning in the rendez loop. 776 * Since this spinning is done by turning off the interrupts and 777 * polling on the wakeup-interrupt bit in the IRR, there is 778 * nothing useful to be done in the handler. 779 * 780 * Inputs : wakeup_irq (Wakeup-interrupt bit) 781 * arg (Interrupt handler specific argument) 782 * ptregs (Exception frame at the time of the interrupt) 783 * Outputs : None 784 * 785 */ 786 static irqreturn_t 787 ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs) 788 { 789 return IRQ_HANDLED; 790 } 791 792 /* 793 * ia64_return_to_sal_check 794 * 795 * This is function called before going back from the OS_MCA handler 796 * to the OS_MCA dispatch code which finally takes the control back 797 * to the SAL. 798 * The main purpose of this routine is to setup the OS_MCA to SAL 799 * return state which can be used by the OS_MCA dispatch code 800 * just before going back to SAL. 801 * 802 * Inputs : None 803 * Outputs : None 804 */ 805 806 static void 807 ia64_return_to_sal_check(int recover) 808 { 809 810 /* Copy over some relevant stuff from the sal_to_os_mca_handoff 811 * so that it can be used at the time of os_mca_to_sal_handoff 812 */ 813 ia64_os_to_sal_handoff_state.imots_sal_gp = 814 ia64_sal_to_os_handoff_state.imsto_sal_gp; 815 816 ia64_os_to_sal_handoff_state.imots_sal_check_ra = 817 ia64_sal_to_os_handoff_state.imsto_sal_check_ra; 818 819 if (recover) 820 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED; 821 else 822 ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT; 823 824 /* Default = tell SAL to return to same context */ 825 ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT; 826 827 ia64_os_to_sal_handoff_state.imots_new_min_state = 828 (u64 *)ia64_sal_to_os_handoff_state.pal_min_state; 829 830 } 831 832 /* Function pointer for extra MCA recovery */ 833 int (*ia64_mca_ucmc_extension) 834 (void*,ia64_mca_sal_to_os_state_t*,ia64_mca_os_to_sal_state_t*) 835 = NULL; 836 837 int 838 ia64_reg_MCA_extension(void *fn) 839 { 840 if (ia64_mca_ucmc_extension) 841 return 1; 842 843 ia64_mca_ucmc_extension = fn; 844 return 0; 845 } 846 847 void 848 ia64_unreg_MCA_extension(void) 849 { 850 if (ia64_mca_ucmc_extension) 851 ia64_mca_ucmc_extension = NULL; 852 } 853 854 EXPORT_SYMBOL(ia64_reg_MCA_extension); 855 EXPORT_SYMBOL(ia64_unreg_MCA_extension); 856 857 /* 858 * ia64_mca_ucmc_handler 859 * 860 * This is uncorrectable machine check handler called from OS_MCA 861 * dispatch code which is in turn called from SAL_CHECK(). 862 * This is the place where the core of OS MCA handling is done. 863 * Right now the logs are extracted and displayed in a well-defined 864 * format. This handler code is supposed to be run only on the 865 * monarch processor. Once the monarch is done with MCA handling 866 * further MCA logging is enabled by clearing logs. 867 * Monarch also has the duty of sending wakeup-IPIs to pull the 868 * slave processors out of rendezvous spinloop. 869 * 870 * Inputs : None 871 * Outputs : None 872 */ 873 void 874 ia64_mca_ucmc_handler(void) 875 { 876 pal_processor_state_info_t *psp = (pal_processor_state_info_t *) 877 &ia64_sal_to_os_handoff_state.proc_state_param; 878 int recover; 879 880 /* Get the MCA error record and log it */ 881 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA); 882 883 /* TLB error is only exist in this SAL error record */ 884 recover = (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc)) 885 /* other error recovery */ 886 || (ia64_mca_ucmc_extension 887 && ia64_mca_ucmc_extension( 888 IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA), 889 &ia64_sal_to_os_handoff_state, 890 &ia64_os_to_sal_handoff_state)); 891 892 if (recover) { 893 sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA); 894 rh->severity = sal_log_severity_corrected; 895 ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA); 896 } 897 /* 898 * Wakeup all the processors which are spinning in the rendezvous 899 * loop. 900 */ 901 ia64_mca_wakeup_all(); 902 903 /* Return to SAL */ 904 ia64_return_to_sal_check(recover); 905 } 906 907 static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); 908 static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); 909 910 /* 911 * ia64_mca_cmc_int_handler 912 * 913 * This is corrected machine check interrupt handler. 914 * Right now the logs are extracted and displayed in a well-defined 915 * format. 916 * 917 * Inputs 918 * interrupt number 919 * client data arg ptr 920 * saved registers ptr 921 * 922 * Outputs 923 * None 924 */ 925 static irqreturn_t 926 ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) 927 { 928 static unsigned long cmc_history[CMC_HISTORY_LENGTH]; 929 static int index; 930 static DEFINE_SPINLOCK(cmc_history_lock); 931 932 IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", 933 __FUNCTION__, cmc_irq, smp_processor_id()); 934 935 /* SAL spec states this should run w/ interrupts enabled */ 936 local_irq_enable(); 937 938 /* Get the CMC error record and log it */ 939 ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC); 940 941 spin_lock(&cmc_history_lock); 942 if (!cmc_polling_enabled) { 943 int i, count = 1; /* we know 1 happened now */ 944 unsigned long now = jiffies; 945 946 for (i = 0; i < CMC_HISTORY_LENGTH; i++) { 947 if (now - cmc_history[i] <= HZ) 948 count++; 949 } 950 951 IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH); 952 if (count >= CMC_HISTORY_LENGTH) { 953 954 cmc_polling_enabled = 1; 955 spin_unlock(&cmc_history_lock); 956 schedule_work(&cmc_disable_work); 957 958 /* 959 * Corrected errors will still be corrected, but 960 * make sure there's a log somewhere that indicates 961 * something is generating more than we can handle. 962 */ 963 printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n"); 964 965 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 966 967 /* lock already released, get out now */ 968 return IRQ_HANDLED; 969 } else { 970 cmc_history[index++] = now; 971 if (index == CMC_HISTORY_LENGTH) 972 index = 0; 973 } 974 } 975 spin_unlock(&cmc_history_lock); 976 return IRQ_HANDLED; 977 } 978 979 /* 980 * ia64_mca_cmc_int_caller 981 * 982 * Triggered by sw interrupt from CMC polling routine. Calls 983 * real interrupt handler and either triggers a sw interrupt 984 * on the next cpu or does cleanup at the end. 985 * 986 * Inputs 987 * interrupt number 988 * client data arg ptr 989 * saved registers ptr 990 * Outputs 991 * handled 992 */ 993 static irqreturn_t 994 ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs) 995 { 996 static int start_count = -1; 997 unsigned int cpuid; 998 999 cpuid = smp_processor_id(); 1000 1001 /* If first cpu, update count */ 1002 if (start_count == -1) 1003 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC); 1004 1005 ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs); 1006 1007 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1008 1009 if (cpuid < NR_CPUS) { 1010 platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1011 } else { 1012 /* If no log record, switch out of polling mode */ 1013 if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) { 1014 1015 printk(KERN_WARNING "Returning to interrupt driven CMC handler\n"); 1016 schedule_work(&cmc_enable_work); 1017 cmc_polling_enabled = 0; 1018 1019 } else { 1020 1021 mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL); 1022 } 1023 1024 start_count = -1; 1025 } 1026 1027 return IRQ_HANDLED; 1028 } 1029 1030 /* 1031 * ia64_mca_cmc_poll 1032 * 1033 * Poll for Corrected Machine Checks (CMCs) 1034 * 1035 * Inputs : dummy(unused) 1036 * Outputs : None 1037 * 1038 */ 1039 static void 1040 ia64_mca_cmc_poll (unsigned long dummy) 1041 { 1042 /* Trigger a CMC interrupt cascade */ 1043 platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); 1044 } 1045 1046 /* 1047 * ia64_mca_cpe_int_caller 1048 * 1049 * Triggered by sw interrupt from CPE polling routine. Calls 1050 * real interrupt handler and either triggers a sw interrupt 1051 * on the next cpu or does cleanup at the end. 1052 * 1053 * Inputs 1054 * interrupt number 1055 * client data arg ptr 1056 * saved registers ptr 1057 * Outputs 1058 * handled 1059 */ 1060 #ifdef CONFIG_ACPI 1061 1062 static irqreturn_t 1063 ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs) 1064 { 1065 static int start_count = -1; 1066 static int poll_time = MIN_CPE_POLL_INTERVAL; 1067 unsigned int cpuid; 1068 1069 cpuid = smp_processor_id(); 1070 1071 /* If first cpu, update count */ 1072 if (start_count == -1) 1073 start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE); 1074 1075 ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs); 1076 1077 for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); 1078 1079 if (cpuid < NR_CPUS) { 1080 platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1081 } else { 1082 /* 1083 * If a log was recorded, increase our polling frequency, 1084 * otherwise, backoff or return to interrupt mode. 1085 */ 1086 if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) { 1087 poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2); 1088 } else if (cpe_vector < 0) { 1089 poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2); 1090 } else { 1091 poll_time = MIN_CPE_POLL_INTERVAL; 1092 1093 printk(KERN_WARNING "Returning to interrupt driven CPE handler\n"); 1094 enable_irq(local_vector_to_irq(IA64_CPE_VECTOR)); 1095 cpe_poll_enabled = 0; 1096 } 1097 1098 if (cpe_poll_enabled) 1099 mod_timer(&cpe_poll_timer, jiffies + poll_time); 1100 start_count = -1; 1101 } 1102 1103 return IRQ_HANDLED; 1104 } 1105 1106 #endif /* CONFIG_ACPI */ 1107 1108 /* 1109 * ia64_mca_cpe_poll 1110 * 1111 * Poll for Corrected Platform Errors (CPEs), trigger interrupt 1112 * on first cpu, from there it will trickle through all the cpus. 1113 * 1114 * Inputs : dummy(unused) 1115 * Outputs : None 1116 * 1117 */ 1118 static void 1119 ia64_mca_cpe_poll (unsigned long dummy) 1120 { 1121 /* Trigger a CPE interrupt cascade */ 1122 platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); 1123 } 1124 1125 /* 1126 * C portion of the OS INIT handler 1127 * 1128 * Called from ia64_monarch_init_handler 1129 * 1130 * Inputs: pointer to pt_regs where processor info was saved. 1131 * 1132 * Returns: 1133 * 0 if SAL must warm boot the System 1134 * 1 if SAL must return to interrupted context using PAL_MC_RESUME 1135 * 1136 */ 1137 void 1138 ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw) 1139 { 1140 pal_min_state_area_t *ms; 1141 1142 oops_in_progress = 1; /* avoid deadlock in printk, but it makes recovery dodgy */ 1143 console_loglevel = 15; /* make sure printks make it to console */ 1144 1145 printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n", 1146 ia64_sal_to_os_handoff_state.proc_state_param); 1147 1148 /* 1149 * Address of minstate area provided by PAL is physical, 1150 * uncacheable (bit 63 set). Convert to Linux virtual 1151 * address in region 6. 1152 */ 1153 ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61)); 1154 1155 init_handler_platform(ms, pt, sw); /* call platform specific routines */ 1156 } 1157 1158 static int __init 1159 ia64_mca_disable_cpe_polling(char *str) 1160 { 1161 cpe_poll_enabled = 0; 1162 return 1; 1163 } 1164 1165 __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling); 1166 1167 static struct irqaction cmci_irqaction = { 1168 .handler = ia64_mca_cmc_int_handler, 1169 .flags = SA_INTERRUPT, 1170 .name = "cmc_hndlr" 1171 }; 1172 1173 static struct irqaction cmcp_irqaction = { 1174 .handler = ia64_mca_cmc_int_caller, 1175 .flags = SA_INTERRUPT, 1176 .name = "cmc_poll" 1177 }; 1178 1179 static struct irqaction mca_rdzv_irqaction = { 1180 .handler = ia64_mca_rendez_int_handler, 1181 .flags = SA_INTERRUPT, 1182 .name = "mca_rdzv" 1183 }; 1184 1185 static struct irqaction mca_wkup_irqaction = { 1186 .handler = ia64_mca_wakeup_int_handler, 1187 .flags = SA_INTERRUPT, 1188 .name = "mca_wkup" 1189 }; 1190 1191 #ifdef CONFIG_ACPI 1192 static struct irqaction mca_cpe_irqaction = { 1193 .handler = ia64_mca_cpe_int_handler, 1194 .flags = SA_INTERRUPT, 1195 .name = "cpe_hndlr" 1196 }; 1197 1198 static struct irqaction mca_cpep_irqaction = { 1199 .handler = ia64_mca_cpe_int_caller, 1200 .flags = SA_INTERRUPT, 1201 .name = "cpe_poll" 1202 }; 1203 #endif /* CONFIG_ACPI */ 1204 1205 /* Do per-CPU MCA-related initialization. */ 1206 1207 void __devinit 1208 ia64_mca_cpu_init(void *cpu_data) 1209 { 1210 void *pal_vaddr; 1211 1212 if (smp_processor_id() == 0) { 1213 void *mca_data; 1214 int cpu; 1215 1216 mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu) 1217 * NR_CPUS); 1218 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1219 __per_cpu_mca[cpu] = __pa(mca_data); 1220 mca_data += sizeof(struct ia64_mca_cpu); 1221 } 1222 } 1223 1224 /* 1225 * The MCA info structure was allocated earlier and its 1226 * physical address saved in __per_cpu_mca[cpu]. Copy that 1227 * address * to ia64_mca_data so we can access it as a per-CPU 1228 * variable. 1229 */ 1230 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[smp_processor_id()]; 1231 1232 /* 1233 * Stash away a copy of the PTE needed to map the per-CPU page. 1234 * We may need it during MCA recovery. 1235 */ 1236 __get_cpu_var(ia64_mca_per_cpu_pte) = 1237 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); 1238 1239 /* 1240 * Also, stash away a copy of the PAL address and the PTE 1241 * needed to map it. 1242 */ 1243 pal_vaddr = efi_get_pal_addr(); 1244 if (!pal_vaddr) 1245 return; 1246 __get_cpu_var(ia64_mca_pal_base) = 1247 GRANULEROUNDDOWN((unsigned long) pal_vaddr); 1248 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), 1249 PAGE_KERNEL)); 1250 } 1251 1252 /* 1253 * ia64_mca_init 1254 * 1255 * Do all the system level mca specific initialization. 1256 * 1257 * 1. Register spinloop and wakeup request interrupt vectors 1258 * 1259 * 2. Register OS_MCA handler entry point 1260 * 1261 * 3. Register OS_INIT handler entry point 1262 * 1263 * 4. Initialize MCA/CMC/INIT related log buffers maintained by the OS. 1264 * 1265 * Note that this initialization is done very early before some kernel 1266 * services are available. 1267 * 1268 * Inputs : None 1269 * 1270 * Outputs : None 1271 */ 1272 void __init 1273 ia64_mca_init(void) 1274 { 1275 ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler; 1276 ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler; 1277 ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch; 1278 int i; 1279 s64 rc; 1280 struct ia64_sal_retval isrv; 1281 u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */ 1282 1283 IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); 1284 1285 /* Clear the Rendez checkin flag for all cpus */ 1286 for(i = 0 ; i < NR_CPUS; i++) 1287 ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; 1288 1289 /* 1290 * Register the rendezvous spinloop and wakeup mechanism with SAL 1291 */ 1292 1293 /* Register the rendezvous interrupt vector with SAL */ 1294 while (1) { 1295 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT, 1296 SAL_MC_PARAM_MECHANISM_INT, 1297 IA64_MCA_RENDEZ_VECTOR, 1298 timeout, 1299 SAL_MC_PARAM_RZ_ALWAYS); 1300 rc = isrv.status; 1301 if (rc == 0) 1302 break; 1303 if (rc == -2) { 1304 printk(KERN_INFO "Increasing MCA rendezvous timeout from " 1305 "%ld to %ld milliseconds\n", timeout, isrv.v0); 1306 timeout = isrv.v0; 1307 continue; 1308 } 1309 printk(KERN_ERR "Failed to register rendezvous interrupt " 1310 "with SAL (status %ld)\n", rc); 1311 return; 1312 } 1313 1314 /* Register the wakeup interrupt vector with SAL */ 1315 isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP, 1316 SAL_MC_PARAM_MECHANISM_INT, 1317 IA64_MCA_WAKEUP_VECTOR, 1318 0, 0); 1319 rc = isrv.status; 1320 if (rc) { 1321 printk(KERN_ERR "Failed to register wakeup interrupt with SAL " 1322 "(status %ld)\n", rc); 1323 return; 1324 } 1325 1326 IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); 1327 1328 ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); 1329 /* 1330 * XXX - disable SAL checksum by setting size to 0; should be 1331 * ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch); 1332 */ 1333 ia64_mc_info.imi_mca_handler_size = 0; 1334 1335 /* Register the os mca handler with SAL */ 1336 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 1337 ia64_mc_info.imi_mca_handler, 1338 ia64_tpa(mca_hldlr_ptr->gp), 1339 ia64_mc_info.imi_mca_handler_size, 1340 0, 0, 0))) 1341 { 1342 printk(KERN_ERR "Failed to register OS MCA handler with SAL " 1343 "(status %ld)\n", rc); 1344 return; 1345 } 1346 1347 IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, 1348 ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); 1349 1350 /* 1351 * XXX - disable SAL checksum by setting size to 0, should be 1352 * size of the actual init handler in mca_asm.S. 1353 */ 1354 ia64_mc_info.imi_monarch_init_handler = ia64_tpa(mon_init_ptr->fp); 1355 ia64_mc_info.imi_monarch_init_handler_size = 0; 1356 ia64_mc_info.imi_slave_init_handler = ia64_tpa(slave_init_ptr->fp); 1357 ia64_mc_info.imi_slave_init_handler_size = 0; 1358 1359 IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, 1360 ia64_mc_info.imi_monarch_init_handler); 1361 1362 /* Register the os init handler with SAL */ 1363 if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 1364 ia64_mc_info.imi_monarch_init_handler, 1365 ia64_tpa(ia64_getreg(_IA64_REG_GP)), 1366 ia64_mc_info.imi_monarch_init_handler_size, 1367 ia64_mc_info.imi_slave_init_handler, 1368 ia64_tpa(ia64_getreg(_IA64_REG_GP)), 1369 ia64_mc_info.imi_slave_init_handler_size))) 1370 { 1371 printk(KERN_ERR "Failed to register m/s INIT handlers with SAL " 1372 "(status %ld)\n", rc); 1373 return; 1374 } 1375 1376 IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); 1377 1378 /* 1379 * Configure the CMCI/P vector and handler. Interrupts for CMC are 1380 * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). 1381 */ 1382 register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction); 1383 register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction); 1384 ia64_mca_cmc_vector_setup(); /* Setup vector on BSP */ 1385 1386 /* Setup the MCA rendezvous interrupt vector */ 1387 register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction); 1388 1389 /* Setup the MCA wakeup interrupt vector */ 1390 register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction); 1391 1392 #ifdef CONFIG_ACPI 1393 /* Setup the CPEI/P vector and handler */ 1394 cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); 1395 register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); 1396 #endif 1397 1398 /* Initialize the areas set aside by the OS to buffer the 1399 * platform/processor error states for MCA/INIT/CMC 1400 * handling. 1401 */ 1402 ia64_log_init(SAL_INFO_TYPE_MCA); 1403 ia64_log_init(SAL_INFO_TYPE_INIT); 1404 ia64_log_init(SAL_INFO_TYPE_CMC); 1405 ia64_log_init(SAL_INFO_TYPE_CPE); 1406 1407 mca_init = 1; 1408 printk(KERN_INFO "MCA related initialization done\n"); 1409 } 1410 1411 /* 1412 * ia64_mca_late_init 1413 * 1414 * Opportunity to setup things that require initialization later 1415 * than ia64_mca_init. Setup a timer to poll for CPEs if the 1416 * platform doesn't support an interrupt driven mechanism. 1417 * 1418 * Inputs : None 1419 * Outputs : Status 1420 */ 1421 static int __init 1422 ia64_mca_late_init(void) 1423 { 1424 if (!mca_init) 1425 return 0; 1426 1427 /* Setup the CMCI/P vector and handler */ 1428 init_timer(&cmc_poll_timer); 1429 cmc_poll_timer.function = ia64_mca_cmc_poll; 1430 1431 /* Unmask/enable the vector */ 1432 cmc_polling_enabled = 0; 1433 schedule_work(&cmc_enable_work); 1434 1435 IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); 1436 1437 #ifdef CONFIG_ACPI 1438 /* Setup the CPEI/P vector and handler */ 1439 init_timer(&cpe_poll_timer); 1440 cpe_poll_timer.function = ia64_mca_cpe_poll; 1441 1442 { 1443 irq_desc_t *desc; 1444 unsigned int irq; 1445 1446 if (cpe_vector >= 0) { 1447 /* If platform supports CPEI, enable the irq. */ 1448 cpe_poll_enabled = 0; 1449 for (irq = 0; irq < NR_IRQS; ++irq) 1450 if (irq_to_vector(irq) == cpe_vector) { 1451 desc = irq_descp(irq); 1452 desc->status |= IRQ_PER_CPU; 1453 setup_irq(irq, &mca_cpe_irqaction); 1454 } 1455 ia64_mca_register_cpev(cpe_vector); 1456 IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__); 1457 } else { 1458 /* If platform doesn't support CPEI, get the timer going. */ 1459 if (cpe_poll_enabled) { 1460 ia64_mca_cpe_poll(0UL); 1461 IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); 1462 } 1463 } 1464 } 1465 #endif 1466 1467 return 0; 1468 } 1469 1470 device_initcall(ia64_mca_late_init); 1471