xref: /openbmc/linux/arch/sparc/kernel/traps_64.c (revision dea54fba)
1 /* arch/sparc64/kernel/traps.c
2  *
3  * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
4  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5  */
6 
7 /*
8  * I like traps on v9, :))))
9  */
10 
11 #include <linux/extable.h>
12 #include <linux/sched/mm.h>
13 #include <linux/sched/debug.h>
14 #include <linux/linkage.h>
15 #include <linux/kernel.h>
16 #include <linux/signal.h>
17 #include <linux/smp.h>
18 #include <linux/mm.h>
19 #include <linux/init.h>
20 #include <linux/kdebug.h>
21 #include <linux/ftrace.h>
22 #include <linux/reboot.h>
23 #include <linux/gfp.h>
24 #include <linux/context_tracking.h>
25 
26 #include <asm/smp.h>
27 #include <asm/delay.h>
28 #include <asm/ptrace.h>
29 #include <asm/oplib.h>
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/unistd.h>
33 #include <linux/uaccess.h>
34 #include <asm/fpumacro.h>
35 #include <asm/lsu.h>
36 #include <asm/dcu.h>
37 #include <asm/estate.h>
38 #include <asm/chafsr.h>
39 #include <asm/sfafsr.h>
40 #include <asm/psrcompat.h>
41 #include <asm/processor.h>
42 #include <asm/timer.h>
43 #include <asm/head.h>
44 #include <asm/prom.h>
45 #include <asm/memctrl.h>
46 #include <asm/cacheflush.h>
47 #include <asm/setup.h>
48 
49 #include "entry.h"
50 #include "kernel.h"
51 #include "kstack.h"
52 
53 /* When an irrecoverable trap occurs at tl > 0, the trap entry
54  * code logs the trap state registers at every level in the trap
55  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
56  * is as follows:
57  */
58 struct tl1_traplog {
59 	struct {
60 		unsigned long tstate;
61 		unsigned long tpc;
62 		unsigned long tnpc;
63 		unsigned long tt;
64 	} trapstack[4];
65 	unsigned long tl;
66 };
67 
68 static void dump_tl1_traplog(struct tl1_traplog *p)
69 {
70 	int i, limit;
71 
72 	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
73 	       "dumping track stack.\n", p->tl);
74 
75 	limit = (tlb_type == hypervisor) ? 2 : 4;
76 	for (i = 0; i < limit; i++) {
77 		printk(KERN_EMERG
78 		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
79 		       "TNPC[%016lx] TT[%lx]\n",
80 		       i + 1,
81 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
82 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
83 		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
84 	}
85 }
86 
87 void bad_trap(struct pt_regs *regs, long lvl)
88 {
89 	char buffer[36];
90 	siginfo_t info;
91 
92 	if (notify_die(DIE_TRAP, "bad trap", regs,
93 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
94 		return;
95 
96 	if (lvl < 0x100) {
97 		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
98 		die_if_kernel(buffer, regs);
99 	}
100 
101 	lvl -= 0x100;
102 	if (regs->tstate & TSTATE_PRIV) {
103 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
104 		die_if_kernel(buffer, regs);
105 	}
106 	if (test_thread_flag(TIF_32BIT)) {
107 		regs->tpc &= 0xffffffff;
108 		regs->tnpc &= 0xffffffff;
109 	}
110 	info.si_signo = SIGILL;
111 	info.si_errno = 0;
112 	info.si_code = ILL_ILLTRP;
113 	info.si_addr = (void __user *)regs->tpc;
114 	info.si_trapno = lvl;
115 	force_sig_info(SIGILL, &info, current);
116 }
117 
118 void bad_trap_tl1(struct pt_regs *regs, long lvl)
119 {
120 	char buffer[36];
121 
122 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
123 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
124 		return;
125 
126 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
127 
128 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
129 	die_if_kernel (buffer, regs);
130 }
131 
132 #ifdef CONFIG_DEBUG_BUGVERBOSE
133 void do_BUG(const char *file, int line)
134 {
135 	bust_spinlocks(1);
136 	printk("kernel BUG at %s:%d!\n", file, line);
137 }
138 EXPORT_SYMBOL(do_BUG);
139 #endif
140 
141 static DEFINE_SPINLOCK(dimm_handler_lock);
142 static dimm_printer_t dimm_handler;
143 
144 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
145 {
146 	unsigned long flags;
147 	int ret = -ENODEV;
148 
149 	spin_lock_irqsave(&dimm_handler_lock, flags);
150 	if (dimm_handler) {
151 		ret = dimm_handler(synd_code, paddr, buf, buflen);
152 	} else if (tlb_type == spitfire) {
153 		if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
154 			ret = -EINVAL;
155 		else
156 			ret = 0;
157 	} else
158 		ret = -ENODEV;
159 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
160 
161 	return ret;
162 }
163 
164 int register_dimm_printer(dimm_printer_t func)
165 {
166 	unsigned long flags;
167 	int ret = 0;
168 
169 	spin_lock_irqsave(&dimm_handler_lock, flags);
170 	if (!dimm_handler)
171 		dimm_handler = func;
172 	else
173 		ret = -EEXIST;
174 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
175 
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(register_dimm_printer);
179 
180 void unregister_dimm_printer(dimm_printer_t func)
181 {
182 	unsigned long flags;
183 
184 	spin_lock_irqsave(&dimm_handler_lock, flags);
185 	if (dimm_handler == func)
186 		dimm_handler = NULL;
187 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
188 }
189 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
190 
191 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
192 {
193 	enum ctx_state prev_state = exception_enter();
194 	siginfo_t info;
195 
196 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
197 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
198 		goto out;
199 
200 	if (regs->tstate & TSTATE_PRIV) {
201 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
202 		       "SFAR[%016lx], going.\n", sfsr, sfar);
203 		die_if_kernel("Iax", regs);
204 	}
205 	if (test_thread_flag(TIF_32BIT)) {
206 		regs->tpc &= 0xffffffff;
207 		regs->tnpc &= 0xffffffff;
208 	}
209 	info.si_signo = SIGSEGV;
210 	info.si_errno = 0;
211 	info.si_code = SEGV_MAPERR;
212 	info.si_addr = (void __user *)regs->tpc;
213 	info.si_trapno = 0;
214 	force_sig_info(SIGSEGV, &info, current);
215 out:
216 	exception_exit(prev_state);
217 }
218 
219 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
220 {
221 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
222 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
223 		return;
224 
225 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
226 	spitfire_insn_access_exception(regs, sfsr, sfar);
227 }
228 
229 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
230 {
231 	unsigned short type = (type_ctx >> 16);
232 	unsigned short ctx  = (type_ctx & 0xffff);
233 	siginfo_t info;
234 
235 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
236 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
237 		return;
238 
239 	if (regs->tstate & TSTATE_PRIV) {
240 		printk("sun4v_insn_access_exception: ADDR[%016lx] "
241 		       "CTX[%04x] TYPE[%04x], going.\n",
242 		       addr, ctx, type);
243 		die_if_kernel("Iax", regs);
244 	}
245 
246 	if (test_thread_flag(TIF_32BIT)) {
247 		regs->tpc &= 0xffffffff;
248 		regs->tnpc &= 0xffffffff;
249 	}
250 	info.si_signo = SIGSEGV;
251 	info.si_errno = 0;
252 	info.si_code = SEGV_MAPERR;
253 	info.si_addr = (void __user *) addr;
254 	info.si_trapno = 0;
255 	force_sig_info(SIGSEGV, &info, current);
256 }
257 
258 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
259 {
260 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
261 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
262 		return;
263 
264 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
265 	sun4v_insn_access_exception(regs, addr, type_ctx);
266 }
267 
268 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
269 {
270 	enum ctx_state prev_state = exception_enter();
271 	siginfo_t info;
272 
273 	if (notify_die(DIE_TRAP, "data access exception", regs,
274 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
275 		goto out;
276 
277 	if (regs->tstate & TSTATE_PRIV) {
278 		/* Test if this comes from uaccess places. */
279 		const struct exception_table_entry *entry;
280 
281 		entry = search_exception_tables(regs->tpc);
282 		if (entry) {
283 			/* Ouch, somebody is trying VM hole tricks on us... */
284 #ifdef DEBUG_EXCEPTIONS
285 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
286 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
287 			       regs->tpc, entry->fixup);
288 #endif
289 			regs->tpc = entry->fixup;
290 			regs->tnpc = regs->tpc + 4;
291 			goto out;
292 		}
293 		/* Shit... */
294 		printk("spitfire_data_access_exception: SFSR[%016lx] "
295 		       "SFAR[%016lx], going.\n", sfsr, sfar);
296 		die_if_kernel("Dax", regs);
297 	}
298 
299 	info.si_signo = SIGSEGV;
300 	info.si_errno = 0;
301 	info.si_code = SEGV_MAPERR;
302 	info.si_addr = (void __user *)sfar;
303 	info.si_trapno = 0;
304 	force_sig_info(SIGSEGV, &info, current);
305 out:
306 	exception_exit(prev_state);
307 }
308 
309 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
310 {
311 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
312 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
313 		return;
314 
315 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
316 	spitfire_data_access_exception(regs, sfsr, sfar);
317 }
318 
319 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
320 {
321 	unsigned short type = (type_ctx >> 16);
322 	unsigned short ctx  = (type_ctx & 0xffff);
323 	siginfo_t info;
324 
325 	if (notify_die(DIE_TRAP, "data access exception", regs,
326 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
327 		return;
328 
329 	if (regs->tstate & TSTATE_PRIV) {
330 		/* Test if this comes from uaccess places. */
331 		const struct exception_table_entry *entry;
332 
333 		entry = search_exception_tables(regs->tpc);
334 		if (entry) {
335 			/* Ouch, somebody is trying VM hole tricks on us... */
336 #ifdef DEBUG_EXCEPTIONS
337 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
338 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
339 			       regs->tpc, entry->fixup);
340 #endif
341 			regs->tpc = entry->fixup;
342 			regs->tnpc = regs->tpc + 4;
343 			return;
344 		}
345 		printk("sun4v_data_access_exception: ADDR[%016lx] "
346 		       "CTX[%04x] TYPE[%04x], going.\n",
347 		       addr, ctx, type);
348 		die_if_kernel("Dax", regs);
349 	}
350 
351 	if (test_thread_flag(TIF_32BIT)) {
352 		regs->tpc &= 0xffffffff;
353 		regs->tnpc &= 0xffffffff;
354 	}
355 	info.si_signo = SIGSEGV;
356 	info.si_errno = 0;
357 	info.si_code = SEGV_MAPERR;
358 	info.si_addr = (void __user *) addr;
359 	info.si_trapno = 0;
360 	force_sig_info(SIGSEGV, &info, current);
361 }
362 
363 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
364 {
365 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
366 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
367 		return;
368 
369 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
370 	sun4v_data_access_exception(regs, addr, type_ctx);
371 }
372 
373 #ifdef CONFIG_PCI
374 #include "pci_impl.h"
375 #endif
376 
377 /* When access exceptions happen, we must do this. */
378 static void spitfire_clean_and_reenable_l1_caches(void)
379 {
380 	unsigned long va;
381 
382 	if (tlb_type != spitfire)
383 		BUG();
384 
385 	/* Clean 'em. */
386 	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
387 		spitfire_put_icache_tag(va, 0x0);
388 		spitfire_put_dcache_tag(va, 0x0);
389 	}
390 
391 	/* Re-enable in LSU. */
392 	__asm__ __volatile__("flush %%g6\n\t"
393 			     "membar #Sync\n\t"
394 			     "stxa %0, [%%g0] %1\n\t"
395 			     "membar #Sync"
396 			     : /* no outputs */
397 			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
398 				    LSU_CONTROL_IM | LSU_CONTROL_DM),
399 			     "i" (ASI_LSU_CONTROL)
400 			     : "memory");
401 }
402 
403 static void spitfire_enable_estate_errors(void)
404 {
405 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
406 			     "membar	#Sync"
407 			     : /* no outputs */
408 			     : "r" (ESTATE_ERR_ALL),
409 			       "i" (ASI_ESTATE_ERROR_EN));
410 }
411 
412 static char ecc_syndrome_table[] = {
413 	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
414 	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
415 	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
416 	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
417 	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
418 	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
419 	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
420 	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
421 	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
422 	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
423 	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
424 	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
425 	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
426 	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
427 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
428 	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
429 	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
430 	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
431 	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
432 	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
433 	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
434 	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
435 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
436 	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
437 	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
438 	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
439 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
440 	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
441 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
442 	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
443 	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
444 	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
445 };
446 
447 static char *syndrome_unknown = "<Unknown>";
448 
449 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
450 {
451 	unsigned short scode;
452 	char memmod_str[64], *p;
453 
454 	if (udbl & bit) {
455 		scode = ecc_syndrome_table[udbl & 0xff];
456 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
457 			p = syndrome_unknown;
458 		else
459 			p = memmod_str;
460 		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
461 		       "Memory Module \"%s\"\n",
462 		       smp_processor_id(), scode, p);
463 	}
464 
465 	if (udbh & bit) {
466 		scode = ecc_syndrome_table[udbh & 0xff];
467 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
468 			p = syndrome_unknown;
469 		else
470 			p = memmod_str;
471 		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
472 		       "Memory Module \"%s\"\n",
473 		       smp_processor_id(), scode, p);
474 	}
475 
476 }
477 
478 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
479 {
480 
481 	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
482 	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
483 	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
484 
485 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
486 
487 	/* We always log it, even if someone is listening for this
488 	 * trap.
489 	 */
490 	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
491 		   0, TRAP_TYPE_CEE, SIGTRAP);
492 
493 	/* The Correctable ECC Error trap does not disable I/D caches.  So
494 	 * we only have to restore the ESTATE Error Enable register.
495 	 */
496 	spitfire_enable_estate_errors();
497 }
498 
499 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
500 {
501 	siginfo_t info;
502 
503 	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
504 	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
505 	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
506 
507 	/* XXX add more human friendly logging of the error status
508 	 * XXX as is implemented for cheetah
509 	 */
510 
511 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
512 
513 	/* We always log it, even if someone is listening for this
514 	 * trap.
515 	 */
516 	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
517 		   0, tt, SIGTRAP);
518 
519 	if (regs->tstate & TSTATE_PRIV) {
520 		if (tl1)
521 			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
522 		die_if_kernel("UE", regs);
523 	}
524 
525 	/* XXX need more intelligent processing here, such as is implemented
526 	 * XXX for cheetah errors, in fact if the E-cache still holds the
527 	 * XXX line with bad parity this will loop
528 	 */
529 
530 	spitfire_clean_and_reenable_l1_caches();
531 	spitfire_enable_estate_errors();
532 
533 	if (test_thread_flag(TIF_32BIT)) {
534 		regs->tpc &= 0xffffffff;
535 		regs->tnpc &= 0xffffffff;
536 	}
537 	info.si_signo = SIGBUS;
538 	info.si_errno = 0;
539 	info.si_code = BUS_OBJERR;
540 	info.si_addr = (void *)0;
541 	info.si_trapno = 0;
542 	force_sig_info(SIGBUS, &info, current);
543 }
544 
545 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
546 {
547 	unsigned long afsr, tt, udbh, udbl;
548 	int tl1;
549 
550 	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
551 	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
552 	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
553 	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
554 	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
555 
556 #ifdef CONFIG_PCI
557 	if (tt == TRAP_TYPE_DAE &&
558 	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
559 		spitfire_clean_and_reenable_l1_caches();
560 		spitfire_enable_estate_errors();
561 
562 		pci_poke_faulted = 1;
563 		regs->tnpc = regs->tpc + 4;
564 		return;
565 	}
566 #endif
567 
568 	if (afsr & SFAFSR_UE)
569 		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
570 
571 	if (tt == TRAP_TYPE_CEE) {
572 		/* Handle the case where we took a CEE trap, but ACK'd
573 		 * only the UE state in the UDB error registers.
574 		 */
575 		if (afsr & SFAFSR_UE) {
576 			if (udbh & UDBE_CE) {
577 				__asm__ __volatile__(
578 					"stxa	%0, [%1] %2\n\t"
579 					"membar	#Sync"
580 					: /* no outputs */
581 					: "r" (udbh & UDBE_CE),
582 					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
583 			}
584 			if (udbl & UDBE_CE) {
585 				__asm__ __volatile__(
586 					"stxa	%0, [%1] %2\n\t"
587 					"membar	#Sync"
588 					: /* no outputs */
589 					: "r" (udbl & UDBE_CE),
590 					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
591 			}
592 		}
593 
594 		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
595 	}
596 }
597 
598 int cheetah_pcache_forced_on;
599 
600 void cheetah_enable_pcache(void)
601 {
602 	unsigned long dcr;
603 
604 	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
605 	       smp_processor_id());
606 
607 	__asm__ __volatile__("ldxa [%%g0] %1, %0"
608 			     : "=r" (dcr)
609 			     : "i" (ASI_DCU_CONTROL_REG));
610 	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
611 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
612 			     "membar #Sync"
613 			     : /* no outputs */
614 			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
615 }
616 
617 /* Cheetah error trap handling. */
618 static unsigned long ecache_flush_physbase;
619 static unsigned long ecache_flush_linesize;
620 static unsigned long ecache_flush_size;
621 
622 /* This table is ordered in priority of errors and matches the
623  * AFAR overwrite policy as well.
624  */
625 
626 struct afsr_error_table {
627 	unsigned long mask;
628 	const char *name;
629 };
630 
631 static const char CHAFSR_PERR_msg[] =
632 	"System interface protocol error";
633 static const char CHAFSR_IERR_msg[] =
634 	"Internal processor error";
635 static const char CHAFSR_ISAP_msg[] =
636 	"System request parity error on incoming address";
637 static const char CHAFSR_UCU_msg[] =
638 	"Uncorrectable E-cache ECC error for ifetch/data";
639 static const char CHAFSR_UCC_msg[] =
640 	"SW Correctable E-cache ECC error for ifetch/data";
641 static const char CHAFSR_UE_msg[] =
642 	"Uncorrectable system bus data ECC error for read";
643 static const char CHAFSR_EDU_msg[] =
644 	"Uncorrectable E-cache ECC error for stmerge/blkld";
645 static const char CHAFSR_EMU_msg[] =
646 	"Uncorrectable system bus MTAG error";
647 static const char CHAFSR_WDU_msg[] =
648 	"Uncorrectable E-cache ECC error for writeback";
649 static const char CHAFSR_CPU_msg[] =
650 	"Uncorrectable ECC error for copyout";
651 static const char CHAFSR_CE_msg[] =
652 	"HW corrected system bus data ECC error for read";
653 static const char CHAFSR_EDC_msg[] =
654 	"HW corrected E-cache ECC error for stmerge/blkld";
655 static const char CHAFSR_EMC_msg[] =
656 	"HW corrected system bus MTAG ECC error";
657 static const char CHAFSR_WDC_msg[] =
658 	"HW corrected E-cache ECC error for writeback";
659 static const char CHAFSR_CPC_msg[] =
660 	"HW corrected ECC error for copyout";
661 static const char CHAFSR_TO_msg[] =
662 	"Unmapped error from system bus";
663 static const char CHAFSR_BERR_msg[] =
664 	"Bus error response from system bus";
665 static const char CHAFSR_IVC_msg[] =
666 	"HW corrected system bus data ECC error for ivec read";
667 static const char CHAFSR_IVU_msg[] =
668 	"Uncorrectable system bus data ECC error for ivec read";
669 static struct afsr_error_table __cheetah_error_table[] = {
670 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
671 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
672 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
673 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
674 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
675 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
676 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
677 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
678 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
679 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
680 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
681 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
682 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
683 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
684 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
685 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
686 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
687 	/* These two do not update the AFAR. */
688 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
689 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
690 	{	0,		NULL			},
691 };
692 static const char CHPAFSR_DTO_msg[] =
693 	"System bus unmapped error for prefetch/storequeue-read";
694 static const char CHPAFSR_DBERR_msg[] =
695 	"System bus error for prefetch/storequeue-read";
696 static const char CHPAFSR_THCE_msg[] =
697 	"Hardware corrected E-cache Tag ECC error";
698 static const char CHPAFSR_TSCE_msg[] =
699 	"SW handled correctable E-cache Tag ECC error";
700 static const char CHPAFSR_TUE_msg[] =
701 	"Uncorrectable E-cache Tag ECC error";
702 static const char CHPAFSR_DUE_msg[] =
703 	"System bus uncorrectable data ECC error due to prefetch/store-fill";
704 static struct afsr_error_table __cheetah_plus_error_table[] = {
705 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
706 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
707 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
708 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
709 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
710 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
711 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
712 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
713 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
714 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
715 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
716 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
717 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
718 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
719 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
720 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
721 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
722 	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
723 	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
724 	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
725 	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
726 	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
727 	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
728 	/* These two do not update the AFAR. */
729 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
730 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
731 	{	0,		NULL			},
732 };
733 static const char JPAFSR_JETO_msg[] =
734 	"System interface protocol error, hw timeout caused";
735 static const char JPAFSR_SCE_msg[] =
736 	"Parity error on system snoop results";
737 static const char JPAFSR_JEIC_msg[] =
738 	"System interface protocol error, illegal command detected";
739 static const char JPAFSR_JEIT_msg[] =
740 	"System interface protocol error, illegal ADTYPE detected";
741 static const char JPAFSR_OM_msg[] =
742 	"Out of range memory error has occurred";
743 static const char JPAFSR_ETP_msg[] =
744 	"Parity error on L2 cache tag SRAM";
745 static const char JPAFSR_UMS_msg[] =
746 	"Error due to unsupported store";
747 static const char JPAFSR_RUE_msg[] =
748 	"Uncorrectable ECC error from remote cache/memory";
749 static const char JPAFSR_RCE_msg[] =
750 	"Correctable ECC error from remote cache/memory";
751 static const char JPAFSR_BP_msg[] =
752 	"JBUS parity error on returned read data";
753 static const char JPAFSR_WBP_msg[] =
754 	"JBUS parity error on data for writeback or block store";
755 static const char JPAFSR_FRC_msg[] =
756 	"Foreign read to DRAM incurring correctable ECC error";
757 static const char JPAFSR_FRU_msg[] =
758 	"Foreign read to DRAM incurring uncorrectable ECC error";
759 static struct afsr_error_table __jalapeno_error_table[] = {
760 	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
761 	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
762 	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
763 	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
764 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
765 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
766 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
767 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
768 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
769 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
770 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
771 	{	JPAFSR_OM,	JPAFSR_OM_msg		},
772 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
773 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
774 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
775 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
776 	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
777 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
778 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
779 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
780 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
781 	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
782 	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
783 	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
784 	{	JPAFSR_BP,	JPAFSR_BP_msg		},
785 	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
786 	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
787 	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
788 	/* These two do not update the AFAR. */
789 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
790 	{	0,		NULL			},
791 };
792 static struct afsr_error_table *cheetah_error_table;
793 static unsigned long cheetah_afsr_errors;
794 
795 struct cheetah_err_info *cheetah_error_log;
796 
797 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
798 {
799 	struct cheetah_err_info *p;
800 	int cpu = smp_processor_id();
801 
802 	if (!cheetah_error_log)
803 		return NULL;
804 
805 	p = cheetah_error_log + (cpu * 2);
806 	if ((afsr & CHAFSR_TL1) != 0UL)
807 		p++;
808 
809 	return p;
810 }
811 
812 extern unsigned int tl0_icpe[], tl1_icpe[];
813 extern unsigned int tl0_dcpe[], tl1_dcpe[];
814 extern unsigned int tl0_fecc[], tl1_fecc[];
815 extern unsigned int tl0_cee[], tl1_cee[];
816 extern unsigned int tl0_iae[], tl1_iae[];
817 extern unsigned int tl0_dae[], tl1_dae[];
818 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
819 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
820 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
821 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
822 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
823 
824 void __init cheetah_ecache_flush_init(void)
825 {
826 	unsigned long largest_size, smallest_linesize, order, ver;
827 	int i, sz;
828 
829 	/* Scan all cpu device tree nodes, note two values:
830 	 * 1) largest E-cache size
831 	 * 2) smallest E-cache line size
832 	 */
833 	largest_size = 0UL;
834 	smallest_linesize = ~0UL;
835 
836 	for (i = 0; i < NR_CPUS; i++) {
837 		unsigned long val;
838 
839 		val = cpu_data(i).ecache_size;
840 		if (!val)
841 			continue;
842 
843 		if (val > largest_size)
844 			largest_size = val;
845 
846 		val = cpu_data(i).ecache_line_size;
847 		if (val < smallest_linesize)
848 			smallest_linesize = val;
849 
850 	}
851 
852 	if (largest_size == 0UL || smallest_linesize == ~0UL) {
853 		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
854 			    "parameters.\n");
855 		prom_halt();
856 	}
857 
858 	ecache_flush_size = (2 * largest_size);
859 	ecache_flush_linesize = smallest_linesize;
860 
861 	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
862 
863 	if (ecache_flush_physbase == ~0UL) {
864 		prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
865 			    "contiguous physical memory.\n",
866 			    ecache_flush_size);
867 		prom_halt();
868 	}
869 
870 	/* Now allocate error trap reporting scoreboard. */
871 	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
872 	for (order = 0; order < MAX_ORDER; order++) {
873 		if ((PAGE_SIZE << order) >= sz)
874 			break;
875 	}
876 	cheetah_error_log = (struct cheetah_err_info *)
877 		__get_free_pages(GFP_KERNEL, order);
878 	if (!cheetah_error_log) {
879 		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
880 			    "error logging scoreboard (%d bytes).\n", sz);
881 		prom_halt();
882 	}
883 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
884 
885 	/* Mark all AFSRs as invalid so that the trap handler will
886 	 * log new new information there.
887 	 */
888 	for (i = 0; i < 2 * NR_CPUS; i++)
889 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
890 
891 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
892 	if ((ver >> 32) == __JALAPENO_ID ||
893 	    (ver >> 32) == __SERRANO_ID) {
894 		cheetah_error_table = &__jalapeno_error_table[0];
895 		cheetah_afsr_errors = JPAFSR_ERRORS;
896 	} else if ((ver >> 32) == 0x003e0015) {
897 		cheetah_error_table = &__cheetah_plus_error_table[0];
898 		cheetah_afsr_errors = CHPAFSR_ERRORS;
899 	} else {
900 		cheetah_error_table = &__cheetah_error_table[0];
901 		cheetah_afsr_errors = CHAFSR_ERRORS;
902 	}
903 
904 	/* Now patch trap tables. */
905 	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
906 	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
907 	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
908 	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
909 	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
910 	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
911 	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
912 	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
913 	if (tlb_type == cheetah_plus) {
914 		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
915 		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
916 		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
917 		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
918 	}
919 	flushi(PAGE_OFFSET);
920 }
921 
922 static void cheetah_flush_ecache(void)
923 {
924 	unsigned long flush_base = ecache_flush_physbase;
925 	unsigned long flush_linesize = ecache_flush_linesize;
926 	unsigned long flush_size = ecache_flush_size;
927 
928 	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
929 			     "   bne,pt	%%xcc, 1b\n\t"
930 			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
931 			     : "=&r" (flush_size)
932 			     : "0" (flush_size), "r" (flush_base),
933 			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
934 }
935 
936 static void cheetah_flush_ecache_line(unsigned long physaddr)
937 {
938 	unsigned long alias;
939 
940 	physaddr &= ~(8UL - 1UL);
941 	physaddr = (ecache_flush_physbase +
942 		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
943 	alias = physaddr + (ecache_flush_size >> 1UL);
944 	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
945 			     "ldxa [%1] %2, %%g0\n\t"
946 			     "membar #Sync"
947 			     : /* no outputs */
948 			     : "r" (physaddr), "r" (alias),
949 			       "i" (ASI_PHYS_USE_EC));
950 }
951 
952 /* Unfortunately, the diagnostic access to the I-cache tags we need to
953  * use to clear the thing interferes with I-cache coherency transactions.
954  *
955  * So we must only flush the I-cache when it is disabled.
956  */
957 static void __cheetah_flush_icache(void)
958 {
959 	unsigned int icache_size, icache_line_size;
960 	unsigned long addr;
961 
962 	icache_size = local_cpu_data().icache_size;
963 	icache_line_size = local_cpu_data().icache_line_size;
964 
965 	/* Clear the valid bits in all the tags. */
966 	for (addr = 0; addr < icache_size; addr += icache_line_size) {
967 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
968 				     "membar #Sync"
969 				     : /* no outputs */
970 				     : "r" (addr | (2 << 3)),
971 				       "i" (ASI_IC_TAG));
972 	}
973 }
974 
975 static void cheetah_flush_icache(void)
976 {
977 	unsigned long dcu_save;
978 
979 	/* Save current DCU, disable I-cache. */
980 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
981 			     "or %0, %2, %%g1\n\t"
982 			     "stxa %%g1, [%%g0] %1\n\t"
983 			     "membar #Sync"
984 			     : "=r" (dcu_save)
985 			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
986 			     : "g1");
987 
988 	__cheetah_flush_icache();
989 
990 	/* Restore DCU register */
991 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
992 			     "membar #Sync"
993 			     : /* no outputs */
994 			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
995 }
996 
997 static void cheetah_flush_dcache(void)
998 {
999 	unsigned int dcache_size, dcache_line_size;
1000 	unsigned long addr;
1001 
1002 	dcache_size = local_cpu_data().dcache_size;
1003 	dcache_line_size = local_cpu_data().dcache_line_size;
1004 
1005 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1006 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1007 				     "membar #Sync"
1008 				     : /* no outputs */
1009 				     : "r" (addr), "i" (ASI_DCACHE_TAG));
1010 	}
1011 }
1012 
1013 /* In order to make the even parity correct we must do two things.
1014  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1015  * Next, we clear out all 32-bytes of data for that line.  Data of
1016  * all-zero + tag parity value of zero == correct parity.
1017  */
1018 static void cheetah_plus_zap_dcache_parity(void)
1019 {
1020 	unsigned int dcache_size, dcache_line_size;
1021 	unsigned long addr;
1022 
1023 	dcache_size = local_cpu_data().dcache_size;
1024 	dcache_line_size = local_cpu_data().dcache_line_size;
1025 
1026 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1027 		unsigned long tag = (addr >> 14);
1028 		unsigned long line;
1029 
1030 		__asm__ __volatile__("membar	#Sync\n\t"
1031 				     "stxa	%0, [%1] %2\n\t"
1032 				     "membar	#Sync"
1033 				     : /* no outputs */
1034 				     : "r" (tag), "r" (addr),
1035 				       "i" (ASI_DCACHE_UTAG));
1036 		for (line = addr; line < addr + dcache_line_size; line += 8)
1037 			__asm__ __volatile__("membar	#Sync\n\t"
1038 					     "stxa	%%g0, [%0] %1\n\t"
1039 					     "membar	#Sync"
1040 					     : /* no outputs */
1041 					     : "r" (line),
1042 					       "i" (ASI_DCACHE_DATA));
1043 	}
1044 }
1045 
1046 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1047  * something palatable to the memory controller driver get_unumber
1048  * routine.
1049  */
1050 #define MT0	137
1051 #define MT1	138
1052 #define MT2	139
1053 #define NONE	254
1054 #define MTC0	140
1055 #define MTC1	141
1056 #define MTC2	142
1057 #define MTC3	143
1058 #define C0	128
1059 #define C1	129
1060 #define C2	130
1061 #define C3	131
1062 #define C4	132
1063 #define C5	133
1064 #define C6	134
1065 #define C7	135
1066 #define C8	136
1067 #define M2	144
1068 #define M3	145
1069 #define M4	146
1070 #define M	147
1071 static unsigned char cheetah_ecc_syntab[] = {
1072 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1073 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1074 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1075 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1076 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1077 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1078 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1079 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1080 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1081 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1082 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1083 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1084 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1085 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1086 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1087 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1088 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1089 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1090 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1091 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1092 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1093 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1094 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1095 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1096 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1097 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1098 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1099 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1100 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1101 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1102 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1103 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1104 };
1105 static unsigned char cheetah_mtag_syntab[] = {
1106        NONE, MTC0,
1107        MTC1, NONE,
1108        MTC2, NONE,
1109        NONE, MT0,
1110        MTC3, NONE,
1111        NONE, MT1,
1112        NONE, MT2,
1113        NONE, NONE
1114 };
1115 
1116 /* Return the highest priority error conditon mentioned. */
1117 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1118 {
1119 	unsigned long tmp = 0;
1120 	int i;
1121 
1122 	for (i = 0; cheetah_error_table[i].mask; i++) {
1123 		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1124 			return tmp;
1125 	}
1126 	return tmp;
1127 }
1128 
1129 static const char *cheetah_get_string(unsigned long bit)
1130 {
1131 	int i;
1132 
1133 	for (i = 0; cheetah_error_table[i].mask; i++) {
1134 		if ((bit & cheetah_error_table[i].mask) != 0UL)
1135 			return cheetah_error_table[i].name;
1136 	}
1137 	return "???";
1138 }
1139 
1140 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1141 			       unsigned long afsr, unsigned long afar, int recoverable)
1142 {
1143 	unsigned long hipri;
1144 	char unum[256];
1145 
1146 	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1147 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1148 	       afsr, afar,
1149 	       (afsr & CHAFSR_TL1) ? 1 : 0);
1150 	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1151 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1152 	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1153 	printk("%s" "ERROR(%d): ",
1154 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1155 	printk("TPC<%pS>\n", (void *) regs->tpc);
1156 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1157 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1158 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1159 	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1160 	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1161 	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1162 	hipri = cheetah_get_hipri(afsr);
1163 	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1164 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1165 	       hipri, cheetah_get_string(hipri));
1166 
1167 	/* Try to get unumber if relevant. */
1168 #define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1169 			 CHAFSR_CPC | CHAFSR_CPU | \
1170 			 CHAFSR_UE  | CHAFSR_CE  | \
1171 			 CHAFSR_EDC | CHAFSR_EDU  | \
1172 			 CHAFSR_UCC | CHAFSR_UCU  | \
1173 			 CHAFSR_WDU | CHAFSR_WDC)
1174 #define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1175 	if (afsr & ESYND_ERRORS) {
1176 		int syndrome;
1177 		int ret;
1178 
1179 		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1180 		syndrome = cheetah_ecc_syntab[syndrome];
1181 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1182 		if (ret != -1)
1183 			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1184 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1185 			       smp_processor_id(), unum);
1186 	} else if (afsr & MSYND_ERRORS) {
1187 		int syndrome;
1188 		int ret;
1189 
1190 		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1191 		syndrome = cheetah_mtag_syntab[syndrome];
1192 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1193 		if (ret != -1)
1194 			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1195 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1196 			       smp_processor_id(), unum);
1197 	}
1198 
1199 	/* Now dump the cache snapshots. */
1200 	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1201 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 	       (int) info->dcache_index,
1203 	       info->dcache_tag,
1204 	       info->dcache_utag,
1205 	       info->dcache_stag);
1206 	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1207 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1208 	       info->dcache_data[0],
1209 	       info->dcache_data[1],
1210 	       info->dcache_data[2],
1211 	       info->dcache_data[3]);
1212 	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1213 	       "u[%016llx] l[%016llx]\n",
1214 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1215 	       (int) info->icache_index,
1216 	       info->icache_tag,
1217 	       info->icache_utag,
1218 	       info->icache_stag,
1219 	       info->icache_upper,
1220 	       info->icache_lower);
1221 	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1222 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1223 	       info->icache_data[0],
1224 	       info->icache_data[1],
1225 	       info->icache_data[2],
1226 	       info->icache_data[3]);
1227 	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1228 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1229 	       info->icache_data[4],
1230 	       info->icache_data[5],
1231 	       info->icache_data[6],
1232 	       info->icache_data[7]);
1233 	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1234 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1235 	       (int) info->ecache_index, info->ecache_tag);
1236 	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1237 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1238 	       info->ecache_data[0],
1239 	       info->ecache_data[1],
1240 	       info->ecache_data[2],
1241 	       info->ecache_data[3]);
1242 
1243 	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1244 	while (afsr != 0UL) {
1245 		unsigned long bit = cheetah_get_hipri(afsr);
1246 
1247 		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1248 		       (recoverable ? KERN_WARNING : KERN_CRIT),
1249 		       bit, cheetah_get_string(bit));
1250 
1251 		afsr &= ~bit;
1252 	}
1253 
1254 	if (!recoverable)
1255 		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1256 }
1257 
1258 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1259 {
1260 	unsigned long afsr, afar;
1261 	int ret = 0;
1262 
1263 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1264 			     : "=r" (afsr)
1265 			     : "i" (ASI_AFSR));
1266 	if ((afsr & cheetah_afsr_errors) != 0) {
1267 		if (logp != NULL) {
1268 			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1269 					     : "=r" (afar)
1270 					     : "i" (ASI_AFAR));
1271 			logp->afsr = afsr;
1272 			logp->afar = afar;
1273 		}
1274 		ret = 1;
1275 	}
1276 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1277 			     "membar #Sync\n\t"
1278 			     : : "r" (afsr), "i" (ASI_AFSR));
1279 
1280 	return ret;
1281 }
1282 
1283 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1284 {
1285 	struct cheetah_err_info local_snapshot, *p;
1286 	int recoverable;
1287 
1288 	/* Flush E-cache */
1289 	cheetah_flush_ecache();
1290 
1291 	p = cheetah_get_error_log(afsr);
1292 	if (!p) {
1293 		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1294 			    afsr, afar);
1295 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1296 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1297 		prom_halt();
1298 	}
1299 
1300 	/* Grab snapshot of logged error. */
1301 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1302 
1303 	/* If the current trap snapshot does not match what the
1304 	 * trap handler passed along into our args, big trouble.
1305 	 * In such a case, mark the local copy as invalid.
1306 	 *
1307 	 * Else, it matches and we mark the afsr in the non-local
1308 	 * copy as invalid so we may log new error traps there.
1309 	 */
1310 	if (p->afsr != afsr || p->afar != afar)
1311 		local_snapshot.afsr = CHAFSR_INVALID;
1312 	else
1313 		p->afsr = CHAFSR_INVALID;
1314 
1315 	cheetah_flush_icache();
1316 	cheetah_flush_dcache();
1317 
1318 	/* Re-enable I-cache/D-cache */
1319 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1320 			     "or %%g1, %1, %%g1\n\t"
1321 			     "stxa %%g1, [%%g0] %0\n\t"
1322 			     "membar #Sync"
1323 			     : /* no outputs */
1324 			     : "i" (ASI_DCU_CONTROL_REG),
1325 			       "i" (DCU_DC | DCU_IC)
1326 			     : "g1");
1327 
1328 	/* Re-enable error reporting */
1329 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1330 			     "or %%g1, %1, %%g1\n\t"
1331 			     "stxa %%g1, [%%g0] %0\n\t"
1332 			     "membar #Sync"
1333 			     : /* no outputs */
1334 			     : "i" (ASI_ESTATE_ERROR_EN),
1335 			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1336 			     : "g1");
1337 
1338 	/* Decide if we can continue after handling this trap and
1339 	 * logging the error.
1340 	 */
1341 	recoverable = 1;
1342 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1343 		recoverable = 0;
1344 
1345 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1346 	 * error was logged while we had error reporting traps disabled.
1347 	 */
1348 	if (cheetah_recheck_errors(&local_snapshot)) {
1349 		unsigned long new_afsr = local_snapshot.afsr;
1350 
1351 		/* If we got a new asynchronous error, die... */
1352 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1353 				CHAFSR_WDU | CHAFSR_CPU |
1354 				CHAFSR_IVU | CHAFSR_UE |
1355 				CHAFSR_BERR | CHAFSR_TO))
1356 			recoverable = 0;
1357 	}
1358 
1359 	/* Log errors. */
1360 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1361 
1362 	if (!recoverable)
1363 		panic("Irrecoverable Fast-ECC error trap.\n");
1364 
1365 	/* Flush E-cache to kick the error trap handlers out. */
1366 	cheetah_flush_ecache();
1367 }
1368 
1369 /* Try to fix a correctable error by pushing the line out from
1370  * the E-cache.  Recheck error reporting registers to see if the
1371  * problem is intermittent.
1372  */
1373 static int cheetah_fix_ce(unsigned long physaddr)
1374 {
1375 	unsigned long orig_estate;
1376 	unsigned long alias1, alias2;
1377 	int ret;
1378 
1379 	/* Make sure correctable error traps are disabled. */
1380 	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1381 			     "andn	%0, %1, %%g1\n\t"
1382 			     "stxa	%%g1, [%%g0] %2\n\t"
1383 			     "membar	#Sync"
1384 			     : "=&r" (orig_estate)
1385 			     : "i" (ESTATE_ERROR_CEEN),
1386 			       "i" (ASI_ESTATE_ERROR_EN)
1387 			     : "g1");
1388 
1389 	/* We calculate alias addresses that will force the
1390 	 * cache line in question out of the E-cache.  Then
1391 	 * we bring it back in with an atomic instruction so
1392 	 * that we get it in some modified/exclusive state,
1393 	 * then we displace it again to try and get proper ECC
1394 	 * pushed back into the system.
1395 	 */
1396 	physaddr &= ~(8UL - 1UL);
1397 	alias1 = (ecache_flush_physbase +
1398 		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1399 	alias2 = alias1 + (ecache_flush_size >> 1);
1400 	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1401 			     "ldxa	[%1] %3, %%g0\n\t"
1402 			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1403 			     "ldxa	[%0] %3, %%g0\n\t"
1404 			     "ldxa	[%1] %3, %%g0\n\t"
1405 			     "membar	#Sync"
1406 			     : /* no outputs */
1407 			     : "r" (alias1), "r" (alias2),
1408 			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1409 
1410 	/* Did that trigger another error? */
1411 	if (cheetah_recheck_errors(NULL)) {
1412 		/* Try one more time. */
1413 		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1414 				     "membar #Sync"
1415 				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1416 		if (cheetah_recheck_errors(NULL))
1417 			ret = 2;
1418 		else
1419 			ret = 1;
1420 	} else {
1421 		/* No new error, intermittent problem. */
1422 		ret = 0;
1423 	}
1424 
1425 	/* Restore error enables. */
1426 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1427 			     "membar	#Sync"
1428 			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1429 
1430 	return ret;
1431 }
1432 
1433 /* Return non-zero if PADDR is a valid physical memory address. */
1434 static int cheetah_check_main_memory(unsigned long paddr)
1435 {
1436 	unsigned long vaddr = PAGE_OFFSET + paddr;
1437 
1438 	if (vaddr > (unsigned long) high_memory)
1439 		return 0;
1440 
1441 	return kern_addr_valid(vaddr);
1442 }
1443 
1444 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1445 {
1446 	struct cheetah_err_info local_snapshot, *p;
1447 	int recoverable, is_memory;
1448 
1449 	p = cheetah_get_error_log(afsr);
1450 	if (!p) {
1451 		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1452 			    afsr, afar);
1453 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1454 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1455 		prom_halt();
1456 	}
1457 
1458 	/* Grab snapshot of logged error. */
1459 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1460 
1461 	/* If the current trap snapshot does not match what the
1462 	 * trap handler passed along into our args, big trouble.
1463 	 * In such a case, mark the local copy as invalid.
1464 	 *
1465 	 * Else, it matches and we mark the afsr in the non-local
1466 	 * copy as invalid so we may log new error traps there.
1467 	 */
1468 	if (p->afsr != afsr || p->afar != afar)
1469 		local_snapshot.afsr = CHAFSR_INVALID;
1470 	else
1471 		p->afsr = CHAFSR_INVALID;
1472 
1473 	is_memory = cheetah_check_main_memory(afar);
1474 
1475 	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1476 		/* XXX Might want to log the results of this operation
1477 		 * XXX somewhere... -DaveM
1478 		 */
1479 		cheetah_fix_ce(afar);
1480 	}
1481 
1482 	{
1483 		int flush_all, flush_line;
1484 
1485 		flush_all = flush_line = 0;
1486 		if ((afsr & CHAFSR_EDC) != 0UL) {
1487 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1488 				flush_line = 1;
1489 			else
1490 				flush_all = 1;
1491 		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1492 			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1493 				flush_line = 1;
1494 			else
1495 				flush_all = 1;
1496 		}
1497 
1498 		/* Trap handler only disabled I-cache, flush it. */
1499 		cheetah_flush_icache();
1500 
1501 		/* Re-enable I-cache */
1502 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1503 				     "or %%g1, %1, %%g1\n\t"
1504 				     "stxa %%g1, [%%g0] %0\n\t"
1505 				     "membar #Sync"
1506 				     : /* no outputs */
1507 				     : "i" (ASI_DCU_CONTROL_REG),
1508 				     "i" (DCU_IC)
1509 				     : "g1");
1510 
1511 		if (flush_all)
1512 			cheetah_flush_ecache();
1513 		else if (flush_line)
1514 			cheetah_flush_ecache_line(afar);
1515 	}
1516 
1517 	/* Re-enable error reporting */
1518 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1519 			     "or %%g1, %1, %%g1\n\t"
1520 			     "stxa %%g1, [%%g0] %0\n\t"
1521 			     "membar #Sync"
1522 			     : /* no outputs */
1523 			     : "i" (ASI_ESTATE_ERROR_EN),
1524 			       "i" (ESTATE_ERROR_CEEN)
1525 			     : "g1");
1526 
1527 	/* Decide if we can continue after handling this trap and
1528 	 * logging the error.
1529 	 */
1530 	recoverable = 1;
1531 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1532 		recoverable = 0;
1533 
1534 	/* Re-check AFSR/AFAR */
1535 	(void) cheetah_recheck_errors(&local_snapshot);
1536 
1537 	/* Log errors. */
1538 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1539 
1540 	if (!recoverable)
1541 		panic("Irrecoverable Correctable-ECC error trap.\n");
1542 }
1543 
1544 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1545 {
1546 	struct cheetah_err_info local_snapshot, *p;
1547 	int recoverable, is_memory;
1548 
1549 #ifdef CONFIG_PCI
1550 	/* Check for the special PCI poke sequence. */
1551 	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1552 		cheetah_flush_icache();
1553 		cheetah_flush_dcache();
1554 
1555 		/* Re-enable I-cache/D-cache */
1556 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1557 				     "or %%g1, %1, %%g1\n\t"
1558 				     "stxa %%g1, [%%g0] %0\n\t"
1559 				     "membar #Sync"
1560 				     : /* no outputs */
1561 				     : "i" (ASI_DCU_CONTROL_REG),
1562 				       "i" (DCU_DC | DCU_IC)
1563 				     : "g1");
1564 
1565 		/* Re-enable error reporting */
1566 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1567 				     "or %%g1, %1, %%g1\n\t"
1568 				     "stxa %%g1, [%%g0] %0\n\t"
1569 				     "membar #Sync"
1570 				     : /* no outputs */
1571 				     : "i" (ASI_ESTATE_ERROR_EN),
1572 				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1573 				     : "g1");
1574 
1575 		(void) cheetah_recheck_errors(NULL);
1576 
1577 		pci_poke_faulted = 1;
1578 		regs->tpc += 4;
1579 		regs->tnpc = regs->tpc + 4;
1580 		return;
1581 	}
1582 #endif
1583 
1584 	p = cheetah_get_error_log(afsr);
1585 	if (!p) {
1586 		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1587 			    afsr, afar);
1588 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1589 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1590 		prom_halt();
1591 	}
1592 
1593 	/* Grab snapshot of logged error. */
1594 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1595 
1596 	/* If the current trap snapshot does not match what the
1597 	 * trap handler passed along into our args, big trouble.
1598 	 * In such a case, mark the local copy as invalid.
1599 	 *
1600 	 * Else, it matches and we mark the afsr in the non-local
1601 	 * copy as invalid so we may log new error traps there.
1602 	 */
1603 	if (p->afsr != afsr || p->afar != afar)
1604 		local_snapshot.afsr = CHAFSR_INVALID;
1605 	else
1606 		p->afsr = CHAFSR_INVALID;
1607 
1608 	is_memory = cheetah_check_main_memory(afar);
1609 
1610 	{
1611 		int flush_all, flush_line;
1612 
1613 		flush_all = flush_line = 0;
1614 		if ((afsr & CHAFSR_EDU) != 0UL) {
1615 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1616 				flush_line = 1;
1617 			else
1618 				flush_all = 1;
1619 		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1620 			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1621 				flush_line = 1;
1622 			else
1623 				flush_all = 1;
1624 		}
1625 
1626 		cheetah_flush_icache();
1627 		cheetah_flush_dcache();
1628 
1629 		/* Re-enable I/D caches */
1630 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1631 				     "or %%g1, %1, %%g1\n\t"
1632 				     "stxa %%g1, [%%g0] %0\n\t"
1633 				     "membar #Sync"
1634 				     : /* no outputs */
1635 				     : "i" (ASI_DCU_CONTROL_REG),
1636 				     "i" (DCU_IC | DCU_DC)
1637 				     : "g1");
1638 
1639 		if (flush_all)
1640 			cheetah_flush_ecache();
1641 		else if (flush_line)
1642 			cheetah_flush_ecache_line(afar);
1643 	}
1644 
1645 	/* Re-enable error reporting */
1646 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1647 			     "or %%g1, %1, %%g1\n\t"
1648 			     "stxa %%g1, [%%g0] %0\n\t"
1649 			     "membar #Sync"
1650 			     : /* no outputs */
1651 			     : "i" (ASI_ESTATE_ERROR_EN),
1652 			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1653 			     : "g1");
1654 
1655 	/* Decide if we can continue after handling this trap and
1656 	 * logging the error.
1657 	 */
1658 	recoverable = 1;
1659 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1660 		recoverable = 0;
1661 
1662 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1663 	 * error was logged while we had error reporting traps disabled.
1664 	 */
1665 	if (cheetah_recheck_errors(&local_snapshot)) {
1666 		unsigned long new_afsr = local_snapshot.afsr;
1667 
1668 		/* If we got a new asynchronous error, die... */
1669 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1670 				CHAFSR_WDU | CHAFSR_CPU |
1671 				CHAFSR_IVU | CHAFSR_UE |
1672 				CHAFSR_BERR | CHAFSR_TO))
1673 			recoverable = 0;
1674 	}
1675 
1676 	/* Log errors. */
1677 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1678 
1679 	/* "Recoverable" here means we try to yank the page from ever
1680 	 * being newly used again.  This depends upon a few things:
1681 	 * 1) Must be main memory, and AFAR must be valid.
1682 	 * 2) If we trapped from user, OK.
1683 	 * 3) Else, if we trapped from kernel we must find exception
1684 	 *    table entry (ie. we have to have been accessing user
1685 	 *    space).
1686 	 *
1687 	 * If AFAR is not in main memory, or we trapped from kernel
1688 	 * and cannot find an exception table entry, it is unacceptable
1689 	 * to try and continue.
1690 	 */
1691 	if (recoverable && is_memory) {
1692 		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1693 			/* OK, usermode access. */
1694 			recoverable = 1;
1695 		} else {
1696 			const struct exception_table_entry *entry;
1697 
1698 			entry = search_exception_tables(regs->tpc);
1699 			if (entry) {
1700 				/* OK, kernel access to userspace. */
1701 				recoverable = 1;
1702 
1703 			} else {
1704 				/* BAD, privileged state is corrupted. */
1705 				recoverable = 0;
1706 			}
1707 
1708 			if (recoverable) {
1709 				if (pfn_valid(afar >> PAGE_SHIFT))
1710 					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1711 				else
1712 					recoverable = 0;
1713 
1714 				/* Only perform fixup if we still have a
1715 				 * recoverable condition.
1716 				 */
1717 				if (recoverable) {
1718 					regs->tpc = entry->fixup;
1719 					regs->tnpc = regs->tpc + 4;
1720 				}
1721 			}
1722 		}
1723 	} else {
1724 		recoverable = 0;
1725 	}
1726 
1727 	if (!recoverable)
1728 		panic("Irrecoverable deferred error trap.\n");
1729 }
1730 
1731 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1732  *
1733  * Bit0:	0=dcache,1=icache
1734  * Bit1:	0=recoverable,1=unrecoverable
1735  *
1736  * The hardware has disabled both the I-cache and D-cache in
1737  * the %dcr register.
1738  */
1739 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1740 {
1741 	if (type & 0x1)
1742 		__cheetah_flush_icache();
1743 	else
1744 		cheetah_plus_zap_dcache_parity();
1745 	cheetah_flush_dcache();
1746 
1747 	/* Re-enable I-cache/D-cache */
1748 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1749 			     "or %%g1, %1, %%g1\n\t"
1750 			     "stxa %%g1, [%%g0] %0\n\t"
1751 			     "membar #Sync"
1752 			     : /* no outputs */
1753 			     : "i" (ASI_DCU_CONTROL_REG),
1754 			       "i" (DCU_DC | DCU_IC)
1755 			     : "g1");
1756 
1757 	if (type & 0x2) {
1758 		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1759 		       smp_processor_id(),
1760 		       (type & 0x1) ? 'I' : 'D',
1761 		       regs->tpc);
1762 		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1763 		panic("Irrecoverable Cheetah+ parity error.");
1764 	}
1765 
1766 	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1767 	       smp_processor_id(),
1768 	       (type & 0x1) ? 'I' : 'D',
1769 	       regs->tpc);
1770 	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1771 }
1772 
1773 struct sun4v_error_entry {
1774 	/* Unique error handle */
1775 /*0x00*/u64		err_handle;
1776 
1777 	/* %stick value at the time of the error */
1778 /*0x08*/u64		err_stick;
1779 
1780 /*0x10*/u8		reserved_1[3];
1781 
1782 	/* Error type */
1783 /*0x13*/u8		err_type;
1784 #define SUN4V_ERR_TYPE_UNDEFINED	0
1785 #define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1786 #define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1787 #define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1788 #define SUN4V_ERR_TYPE_SHUTDOWN_RQST	4
1789 #define SUN4V_ERR_TYPE_DUMP_CORE	5
1790 #define SUN4V_ERR_TYPE_SP_STATE_CHANGE	6
1791 #define SUN4V_ERR_TYPE_NUM		7
1792 
1793 	/* Error attributes */
1794 /*0x14*/u32		err_attrs;
1795 #define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1796 #define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1797 #define SUN4V_ERR_ATTRS_PIO		0x00000004
1798 #define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1799 #define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1800 #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST	0x00000020
1801 #define SUN4V_ERR_ATTRS_ASR		0x00000040
1802 #define SUN4V_ERR_ATTRS_ASI		0x00000080
1803 #define SUN4V_ERR_ATTRS_PRIV_REG	0x00000100
1804 #define SUN4V_ERR_ATTRS_SPSTATE_MSK	0x00000600
1805 #define SUN4V_ERR_ATTRS_SPSTATE_SHFT	9
1806 #define SUN4V_ERR_ATTRS_MODE_MSK	0x03000000
1807 #define SUN4V_ERR_ATTRS_MODE_SHFT	24
1808 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1809 
1810 #define SUN4V_ERR_SPSTATE_FAULTED	0
1811 #define SUN4V_ERR_SPSTATE_AVAILABLE	1
1812 #define SUN4V_ERR_SPSTATE_NOT_PRESENT	2
1813 
1814 #define SUN4V_ERR_MODE_USER		1
1815 #define SUN4V_ERR_MODE_PRIV		2
1816 
1817 	/* Real address of the memory region or PIO transaction */
1818 /*0x18*/u64		err_raddr;
1819 
1820 	/* Size of the operation triggering the error, in bytes */
1821 /*0x20*/u32		err_size;
1822 
1823 	/* ID of the CPU */
1824 /*0x24*/u16		err_cpu;
1825 
1826 	/* Grace periof for shutdown, in seconds */
1827 /*0x26*/u16		err_secs;
1828 
1829 	/* Value of the %asi register */
1830 /*0x28*/u8		err_asi;
1831 
1832 /*0x29*/u8		reserved_2;
1833 
1834 	/* Value of the ASR register number */
1835 /*0x2a*/u16		err_asr;
1836 #define SUN4V_ERR_ASR_VALID		0x8000
1837 
1838 /*0x2c*/u32		reserved_3;
1839 /*0x30*/u64		reserved_4;
1840 /*0x38*/u64		reserved_5;
1841 };
1842 
1843 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1844 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1845 
1846 static const char *sun4v_err_type_to_str(u8 type)
1847 {
1848 	static const char *types[SUN4V_ERR_TYPE_NUM] = {
1849 		"undefined",
1850 		"uncorrected resumable",
1851 		"precise nonresumable",
1852 		"deferred nonresumable",
1853 		"shutdown request",
1854 		"dump core",
1855 		"SP state change",
1856 	};
1857 
1858 	if (type < SUN4V_ERR_TYPE_NUM)
1859 		return types[type];
1860 
1861 	return "unknown";
1862 }
1863 
1864 static void sun4v_emit_err_attr_strings(u32 attrs)
1865 {
1866 	static const char *attr_names[] = {
1867 		"processor",
1868 		"memory",
1869 		"PIO",
1870 		"int-registers",
1871 		"fpu-registers",
1872 		"shutdown-request",
1873 		"ASR",
1874 		"ASI",
1875 		"priv-reg",
1876 	};
1877 	static const char *sp_states[] = {
1878 		"sp-faulted",
1879 		"sp-available",
1880 		"sp-not-present",
1881 		"sp-state-reserved",
1882 	};
1883 	static const char *modes[] = {
1884 		"mode-reserved0",
1885 		"user",
1886 		"priv",
1887 		"mode-reserved1",
1888 	};
1889 	u32 sp_state, mode;
1890 	int i;
1891 
1892 	for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1893 		if (attrs & (1U << i)) {
1894 			const char *s = attr_names[i];
1895 
1896 			pr_cont("%s ", s);
1897 		}
1898 	}
1899 
1900 	sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1901 		    SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1902 	pr_cont("%s ", sp_states[sp_state]);
1903 
1904 	mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1905 		SUN4V_ERR_ATTRS_MODE_SHFT);
1906 	pr_cont("%s ", modes[mode]);
1907 
1908 	if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1909 		pr_cont("res-queue-full ");
1910 }
1911 
1912 /* When the report contains a real-address of "-1" it means that the
1913  * hardware did not provide the address.  So we compute the effective
1914  * address of the load or store instruction at regs->tpc and report
1915  * that.  Usually when this happens it's a PIO and in such a case we
1916  * are using physical addresses with bypass ASIs anyways, so what we
1917  * report here is exactly what we want.
1918  */
1919 static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1920 {
1921 	unsigned int insn;
1922 	u64 addr;
1923 
1924 	if (!(regs->tstate & TSTATE_PRIV))
1925 		return;
1926 
1927 	insn = *(unsigned int *) regs->tpc;
1928 
1929 	addr = compute_effective_address(regs, insn, 0);
1930 
1931 	printk("%s: insn effective address [0x%016llx]\n",
1932 	       pfx, addr);
1933 }
1934 
1935 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1936 			    int cpu, const char *pfx, atomic_t *ocnt)
1937 {
1938 	u64 *raw_ptr = (u64 *) ent;
1939 	u32 attrs;
1940 	int cnt;
1941 
1942 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1943 	printk("%s: TPC [0x%016lx] <%pS>\n",
1944 	       pfx, regs->tpc, (void *) regs->tpc);
1945 
1946 	printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1947 	       pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1948 	printk("%s:      %016llx:%016llx:%016llx:%016llx]\n",
1949 	       pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1950 
1951 	printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1952 	       pfx, ent->err_handle, ent->err_stick);
1953 
1954 	printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1955 
1956 	attrs = ent->err_attrs;
1957 	printk("%s: attrs [0x%08x] < ", pfx, attrs);
1958 	sun4v_emit_err_attr_strings(attrs);
1959 	pr_cont(">\n");
1960 
1961 	/* Various fields in the error report are only valid if
1962 	 * certain attribute bits are set.
1963 	 */
1964 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1965 		     SUN4V_ERR_ATTRS_PIO |
1966 		     SUN4V_ERR_ATTRS_ASI)) {
1967 		printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
1968 
1969 		if (ent->err_raddr == ~(u64)0)
1970 			sun4v_report_real_raddr(pfx, regs);
1971 	}
1972 
1973 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
1974 		printk("%s: size [0x%x]\n", pfx, ent->err_size);
1975 
1976 	if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
1977 		     SUN4V_ERR_ATTRS_INT_REGISTERS |
1978 		     SUN4V_ERR_ATTRS_FPU_REGISTERS |
1979 		     SUN4V_ERR_ATTRS_PRIV_REG))
1980 		printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
1981 
1982 	if (attrs & SUN4V_ERR_ATTRS_ASI)
1983 		printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
1984 
1985 	if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
1986 		      SUN4V_ERR_ATTRS_FPU_REGISTERS |
1987 		      SUN4V_ERR_ATTRS_PRIV_REG)) &&
1988 	    (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
1989 		printk("%s: reg [0x%04x]\n",
1990 		       pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
1991 
1992 	show_regs(regs);
1993 
1994 	if ((cnt = atomic_read(ocnt)) != 0) {
1995 		atomic_set(ocnt, 0);
1996 		wmb();
1997 		printk("%s: Queue overflowed %d times.\n",
1998 		       pfx, cnt);
1999 	}
2000 }
2001 
2002 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2003  * Log the event and clear the first word of the entry.
2004  */
2005 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2006 {
2007 	enum ctx_state prev_state = exception_enter();
2008 	struct sun4v_error_entry *ent, local_copy;
2009 	struct trap_per_cpu *tb;
2010 	unsigned long paddr;
2011 	int cpu;
2012 
2013 	cpu = get_cpu();
2014 
2015 	tb = &trap_block[cpu];
2016 	paddr = tb->resum_kernel_buf_pa + offset;
2017 	ent = __va(paddr);
2018 
2019 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2020 
2021 	/* We have a local copy now, so release the entry.  */
2022 	ent->err_handle = 0;
2023 	wmb();
2024 
2025 	put_cpu();
2026 
2027 	if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2028 		/* We should really take the seconds field of
2029 		 * the error report and use it for the shutdown
2030 		 * invocation, but for now do the same thing we
2031 		 * do for a DS shutdown request.
2032 		 */
2033 		pr_info("Shutdown request, %u seconds...\n",
2034 			local_copy.err_secs);
2035 		orderly_poweroff(true);
2036 		goto out;
2037 	}
2038 
2039 	sun4v_log_error(regs, &local_copy, cpu,
2040 			KERN_ERR "RESUMABLE ERROR",
2041 			&sun4v_resum_oflow_cnt);
2042 out:
2043 	exception_exit(prev_state);
2044 }
2045 
2046 /* If we try to printk() we'll probably make matters worse, by trying
2047  * to retake locks this cpu already holds or causing more errors. So
2048  * just bump a counter, and we'll report these counter bumps above.
2049  */
2050 void sun4v_resum_overflow(struct pt_regs *regs)
2051 {
2052 	atomic_inc(&sun4v_resum_oflow_cnt);
2053 }
2054 
2055 /* Given a set of registers, get the virtual addressi that was being accessed
2056  * by the faulting instructions at tpc.
2057  */
2058 static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
2059 {
2060 	unsigned int insn;
2061 
2062 	if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
2063 		return compute_effective_address(regs, insn,
2064 						 (insn >> 25) & 0x1f);
2065 	}
2066 	return 0;
2067 }
2068 
2069 /* Attempt to handle non-resumable errors generated from userspace.
2070  * Returns true if the signal was handled, false otherwise.
2071  */
2072 bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
2073 				  struct sun4v_error_entry *ent) {
2074 
2075 	unsigned int attrs = ent->err_attrs;
2076 
2077 	if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
2078 		unsigned long addr = ent->err_raddr;
2079 		siginfo_t info;
2080 
2081 		if (addr == ~(u64)0) {
2082 			/* This seems highly unlikely to ever occur */
2083 			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
2084 		} else {
2085 			unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
2086 							      PAGE_SIZE);
2087 
2088 			/* Break the unfortunate news. */
2089 			pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
2090 				 addr);
2091 			pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
2092 				 page_cnt);
2093 
2094 			while (page_cnt-- > 0) {
2095 				if (pfn_valid(addr >> PAGE_SHIFT))
2096 					get_page(pfn_to_page(addr >> PAGE_SHIFT));
2097 				addr += PAGE_SIZE;
2098 			}
2099 		}
2100 		info.si_signo = SIGKILL;
2101 		info.si_errno = 0;
2102 		info.si_trapno = 0;
2103 		force_sig_info(info.si_signo, &info, current);
2104 
2105 		return true;
2106 	}
2107 	if (attrs & SUN4V_ERR_ATTRS_PIO) {
2108 		siginfo_t info;
2109 
2110 		info.si_signo = SIGBUS;
2111 		info.si_code = BUS_ADRERR;
2112 		info.si_addr = (void __user *)sun4v_get_vaddr(regs);
2113 		force_sig_info(info.si_signo, &info, current);
2114 
2115 		return true;
2116 	}
2117 
2118 	/* Default to doing nothing */
2119 	return false;
2120 }
2121 
2122 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2123  * Log the event, clear the first word of the entry, and die.
2124  */
2125 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2126 {
2127 	struct sun4v_error_entry *ent, local_copy;
2128 	struct trap_per_cpu *tb;
2129 	unsigned long paddr;
2130 	int cpu;
2131 
2132 	cpu = get_cpu();
2133 
2134 	tb = &trap_block[cpu];
2135 	paddr = tb->nonresum_kernel_buf_pa + offset;
2136 	ent = __va(paddr);
2137 
2138 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2139 
2140 	/* We have a local copy now, so release the entry.  */
2141 	ent->err_handle = 0;
2142 	wmb();
2143 
2144 	put_cpu();
2145 
2146 	if (!(regs->tstate & TSTATE_PRIV) &&
2147 	    sun4v_nonresum_error_user_handled(regs, &local_copy)) {
2148 		/* DON'T PANIC: This userspace error was handled. */
2149 		return;
2150 	}
2151 
2152 #ifdef CONFIG_PCI
2153 	/* Check for the special PCI poke sequence. */
2154 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2155 		pci_poke_faulted = 1;
2156 		regs->tpc += 4;
2157 		regs->tnpc = regs->tpc + 4;
2158 		return;
2159 	}
2160 #endif
2161 
2162 	sun4v_log_error(regs, &local_copy, cpu,
2163 			KERN_EMERG "NON-RESUMABLE ERROR",
2164 			&sun4v_nonresum_oflow_cnt);
2165 
2166 	panic("Non-resumable error.");
2167 }
2168 
2169 /* If we try to printk() we'll probably make matters worse, by trying
2170  * to retake locks this cpu already holds or causing more errors. So
2171  * just bump a counter, and we'll report these counter bumps above.
2172  */
2173 void sun4v_nonresum_overflow(struct pt_regs *regs)
2174 {
2175 	/* XXX Actually even this can make not that much sense.  Perhaps
2176 	 * XXX we should just pull the plug and panic directly from here?
2177 	 */
2178 	atomic_inc(&sun4v_nonresum_oflow_cnt);
2179 }
2180 
2181 static void sun4v_tlb_error(struct pt_regs *regs)
2182 {
2183 	die_if_kernel("TLB/TSB error", regs);
2184 }
2185 
2186 unsigned long sun4v_err_itlb_vaddr;
2187 unsigned long sun4v_err_itlb_ctx;
2188 unsigned long sun4v_err_itlb_pte;
2189 unsigned long sun4v_err_itlb_error;
2190 
2191 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2192 {
2193 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2194 
2195 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2196 	       regs->tpc, tl);
2197 	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2198 	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2199 	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2200 	       (void *) regs->u_regs[UREG_I7]);
2201 	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2202 	       "pte[%lx] error[%lx]\n",
2203 	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2204 	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
2205 
2206 	sun4v_tlb_error(regs);
2207 }
2208 
2209 unsigned long sun4v_err_dtlb_vaddr;
2210 unsigned long sun4v_err_dtlb_ctx;
2211 unsigned long sun4v_err_dtlb_pte;
2212 unsigned long sun4v_err_dtlb_error;
2213 
2214 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2215 {
2216 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2217 
2218 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2219 	       regs->tpc, tl);
2220 	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2221 	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2222 	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2223 	       (void *) regs->u_regs[UREG_I7]);
2224 	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2225 	       "pte[%lx] error[%lx]\n",
2226 	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2227 	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2228 
2229 	sun4v_tlb_error(regs);
2230 }
2231 
2232 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2233 {
2234 	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2235 	       err, op);
2236 }
2237 
2238 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2239 {
2240 	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2241 	       err, op);
2242 }
2243 
2244 static void do_fpe_common(struct pt_regs *regs)
2245 {
2246 	if (regs->tstate & TSTATE_PRIV) {
2247 		regs->tpc = regs->tnpc;
2248 		regs->tnpc += 4;
2249 	} else {
2250 		unsigned long fsr = current_thread_info()->xfsr[0];
2251 		siginfo_t info;
2252 
2253 		if (test_thread_flag(TIF_32BIT)) {
2254 			regs->tpc &= 0xffffffff;
2255 			regs->tnpc &= 0xffffffff;
2256 		}
2257 		info.si_signo = SIGFPE;
2258 		info.si_errno = 0;
2259 		info.si_addr = (void __user *)regs->tpc;
2260 		info.si_trapno = 0;
2261 		info.si_code = __SI_FAULT;
2262 		if ((fsr & 0x1c000) == (1 << 14)) {
2263 			if (fsr & 0x10)
2264 				info.si_code = FPE_FLTINV;
2265 			else if (fsr & 0x08)
2266 				info.si_code = FPE_FLTOVF;
2267 			else if (fsr & 0x04)
2268 				info.si_code = FPE_FLTUND;
2269 			else if (fsr & 0x02)
2270 				info.si_code = FPE_FLTDIV;
2271 			else if (fsr & 0x01)
2272 				info.si_code = FPE_FLTRES;
2273 		}
2274 		force_sig_info(SIGFPE, &info, current);
2275 	}
2276 }
2277 
2278 void do_fpieee(struct pt_regs *regs)
2279 {
2280 	enum ctx_state prev_state = exception_enter();
2281 
2282 	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2283 		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2284 		goto out;
2285 
2286 	do_fpe_common(regs);
2287 out:
2288 	exception_exit(prev_state);
2289 }
2290 
2291 void do_fpother(struct pt_regs *regs)
2292 {
2293 	enum ctx_state prev_state = exception_enter();
2294 	struct fpustate *f = FPUSTATE;
2295 	int ret = 0;
2296 
2297 	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2298 		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2299 		goto out;
2300 
2301 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2302 	case (2 << 14): /* unfinished_FPop */
2303 	case (3 << 14): /* unimplemented_FPop */
2304 		ret = do_mathemu(regs, f, false);
2305 		break;
2306 	}
2307 	if (ret)
2308 		goto out;
2309 	do_fpe_common(regs);
2310 out:
2311 	exception_exit(prev_state);
2312 }
2313 
2314 void do_tof(struct pt_regs *regs)
2315 {
2316 	enum ctx_state prev_state = exception_enter();
2317 	siginfo_t info;
2318 
2319 	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2320 		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2321 		goto out;
2322 
2323 	if (regs->tstate & TSTATE_PRIV)
2324 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2325 	if (test_thread_flag(TIF_32BIT)) {
2326 		regs->tpc &= 0xffffffff;
2327 		regs->tnpc &= 0xffffffff;
2328 	}
2329 	info.si_signo = SIGEMT;
2330 	info.si_errno = 0;
2331 	info.si_code = EMT_TAGOVF;
2332 	info.si_addr = (void __user *)regs->tpc;
2333 	info.si_trapno = 0;
2334 	force_sig_info(SIGEMT, &info, current);
2335 out:
2336 	exception_exit(prev_state);
2337 }
2338 
2339 void do_div0(struct pt_regs *regs)
2340 {
2341 	enum ctx_state prev_state = exception_enter();
2342 	siginfo_t info;
2343 
2344 	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2345 		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2346 		goto out;
2347 
2348 	if (regs->tstate & TSTATE_PRIV)
2349 		die_if_kernel("TL0: Kernel divide by zero.", regs);
2350 	if (test_thread_flag(TIF_32BIT)) {
2351 		regs->tpc &= 0xffffffff;
2352 		regs->tnpc &= 0xffffffff;
2353 	}
2354 	info.si_signo = SIGFPE;
2355 	info.si_errno = 0;
2356 	info.si_code = FPE_INTDIV;
2357 	info.si_addr = (void __user *)regs->tpc;
2358 	info.si_trapno = 0;
2359 	force_sig_info(SIGFPE, &info, current);
2360 out:
2361 	exception_exit(prev_state);
2362 }
2363 
2364 static void instruction_dump(unsigned int *pc)
2365 {
2366 	int i;
2367 
2368 	if ((((unsigned long) pc) & 3))
2369 		return;
2370 
2371 	printk("Instruction DUMP:");
2372 	for (i = -3; i < 6; i++)
2373 		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2374 	printk("\n");
2375 }
2376 
2377 static void user_instruction_dump(unsigned int __user *pc)
2378 {
2379 	int i;
2380 	unsigned int buf[9];
2381 
2382 	if ((((unsigned long) pc) & 3))
2383 		return;
2384 
2385 	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2386 		return;
2387 
2388 	printk("Instruction DUMP:");
2389 	for (i = 0; i < 9; i++)
2390 		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2391 	printk("\n");
2392 }
2393 
2394 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2395 {
2396 	unsigned long fp, ksp;
2397 	struct thread_info *tp;
2398 	int count = 0;
2399 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2400 	int graph = 0;
2401 #endif
2402 
2403 	ksp = (unsigned long) _ksp;
2404 	if (!tsk)
2405 		tsk = current;
2406 	tp = task_thread_info(tsk);
2407 	if (ksp == 0UL) {
2408 		if (tsk == current)
2409 			asm("mov %%fp, %0" : "=r" (ksp));
2410 		else
2411 			ksp = tp->ksp;
2412 	}
2413 	if (tp == current_thread_info())
2414 		flushw_all();
2415 
2416 	fp = ksp + STACK_BIAS;
2417 
2418 	printk("Call Trace:\n");
2419 	do {
2420 		struct sparc_stackf *sf;
2421 		struct pt_regs *regs;
2422 		unsigned long pc;
2423 
2424 		if (!kstack_valid(tp, fp))
2425 			break;
2426 		sf = (struct sparc_stackf *) fp;
2427 		regs = (struct pt_regs *) (sf + 1);
2428 
2429 		if (kstack_is_trap_frame(tp, regs)) {
2430 			if (!(regs->tstate & TSTATE_PRIV))
2431 				break;
2432 			pc = regs->tpc;
2433 			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2434 		} else {
2435 			pc = sf->callers_pc;
2436 			fp = (unsigned long)sf->fp + STACK_BIAS;
2437 		}
2438 
2439 		printk(" [%016lx] %pS\n", pc, (void *) pc);
2440 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2441 		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2442 			int index = tsk->curr_ret_stack;
2443 			if (tsk->ret_stack && index >= graph) {
2444 				pc = tsk->ret_stack[index - graph].ret;
2445 				printk(" [%016lx] %pS\n", pc, (void *) pc);
2446 				graph++;
2447 			}
2448 		}
2449 #endif
2450 	} while (++count < 16);
2451 }
2452 
2453 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2454 {
2455 	unsigned long fp = rw->ins[6];
2456 
2457 	if (!fp)
2458 		return NULL;
2459 
2460 	return (struct reg_window *) (fp + STACK_BIAS);
2461 }
2462 
2463 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2464 {
2465 	static int die_counter;
2466 	int count = 0;
2467 
2468 	/* Amuse the user. */
2469 	printk(
2470 "              \\|/ ____ \\|/\n"
2471 "              \"@'/ .. \\`@\"\n"
2472 "              /_| \\__/ |_\\\n"
2473 "                 \\__U_/\n");
2474 
2475 	printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2476 	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2477 	__asm__ __volatile__("flushw");
2478 	show_regs(regs);
2479 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2480 	if (regs->tstate & TSTATE_PRIV) {
2481 		struct thread_info *tp = current_thread_info();
2482 		struct reg_window *rw = (struct reg_window *)
2483 			(regs->u_regs[UREG_FP] + STACK_BIAS);
2484 
2485 		/* Stop the back trace when we hit userland or we
2486 		 * find some badly aligned kernel stack.
2487 		 */
2488 		while (rw &&
2489 		       count++ < 30 &&
2490 		       kstack_valid(tp, (unsigned long) rw)) {
2491 			printk("Caller[%016lx]: %pS\n", rw->ins[7],
2492 			       (void *) rw->ins[7]);
2493 
2494 			rw = kernel_stack_up(rw);
2495 		}
2496 		instruction_dump ((unsigned int *) regs->tpc);
2497 	} else {
2498 		if (test_thread_flag(TIF_32BIT)) {
2499 			regs->tpc &= 0xffffffff;
2500 			regs->tnpc &= 0xffffffff;
2501 		}
2502 		user_instruction_dump ((unsigned int __user *) regs->tpc);
2503 	}
2504 	if (panic_on_oops)
2505 		panic("Fatal exception");
2506 	if (regs->tstate & TSTATE_PRIV)
2507 		do_exit(SIGKILL);
2508 	do_exit(SIGSEGV);
2509 }
2510 EXPORT_SYMBOL(die_if_kernel);
2511 
2512 #define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2513 #define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2514 
2515 void do_illegal_instruction(struct pt_regs *regs)
2516 {
2517 	enum ctx_state prev_state = exception_enter();
2518 	unsigned long pc = regs->tpc;
2519 	unsigned long tstate = regs->tstate;
2520 	u32 insn;
2521 	siginfo_t info;
2522 
2523 	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2524 		       0, 0x10, SIGILL) == NOTIFY_STOP)
2525 		goto out;
2526 
2527 	if (tstate & TSTATE_PRIV)
2528 		die_if_kernel("Kernel illegal instruction", regs);
2529 	if (test_thread_flag(TIF_32BIT))
2530 		pc = (u32)pc;
2531 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2532 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2533 			if (handle_popc(insn, regs))
2534 				goto out;
2535 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2536 			if (handle_ldf_stq(insn, regs))
2537 				goto out;
2538 		} else if (tlb_type == hypervisor) {
2539 			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2540 				if (!vis_emul(regs, insn))
2541 					goto out;
2542 			} else {
2543 				struct fpustate *f = FPUSTATE;
2544 
2545 				/* On UltraSPARC T2 and later, FPU insns which
2546 				 * are not implemented in HW signal an illegal
2547 				 * instruction trap and do not set the FP Trap
2548 				 * Trap in the %fsr to unimplemented_FPop.
2549 				 */
2550 				if (do_mathemu(regs, f, true))
2551 					goto out;
2552 			}
2553 		}
2554 	}
2555 	info.si_signo = SIGILL;
2556 	info.si_errno = 0;
2557 	info.si_code = ILL_ILLOPC;
2558 	info.si_addr = (void __user *)pc;
2559 	info.si_trapno = 0;
2560 	force_sig_info(SIGILL, &info, current);
2561 out:
2562 	exception_exit(prev_state);
2563 }
2564 
2565 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2566 {
2567 	enum ctx_state prev_state = exception_enter();
2568 	siginfo_t info;
2569 
2570 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2571 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2572 		goto out;
2573 
2574 	if (regs->tstate & TSTATE_PRIV) {
2575 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2576 		goto out;
2577 	}
2578 	info.si_signo = SIGBUS;
2579 	info.si_errno = 0;
2580 	info.si_code = BUS_ADRALN;
2581 	info.si_addr = (void __user *)sfar;
2582 	info.si_trapno = 0;
2583 	force_sig_info(SIGBUS, &info, current);
2584 out:
2585 	exception_exit(prev_state);
2586 }
2587 
2588 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2589 {
2590 	siginfo_t info;
2591 
2592 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2593 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2594 		return;
2595 
2596 	if (regs->tstate & TSTATE_PRIV) {
2597 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2598 		return;
2599 	}
2600 	info.si_signo = SIGBUS;
2601 	info.si_errno = 0;
2602 	info.si_code = BUS_ADRALN;
2603 	info.si_addr = (void __user *) addr;
2604 	info.si_trapno = 0;
2605 	force_sig_info(SIGBUS, &info, current);
2606 }
2607 
2608 void do_privop(struct pt_regs *regs)
2609 {
2610 	enum ctx_state prev_state = exception_enter();
2611 	siginfo_t info;
2612 
2613 	if (notify_die(DIE_TRAP, "privileged operation", regs,
2614 		       0, 0x11, SIGILL) == NOTIFY_STOP)
2615 		goto out;
2616 
2617 	if (test_thread_flag(TIF_32BIT)) {
2618 		regs->tpc &= 0xffffffff;
2619 		regs->tnpc &= 0xffffffff;
2620 	}
2621 	info.si_signo = SIGILL;
2622 	info.si_errno = 0;
2623 	info.si_code = ILL_PRVOPC;
2624 	info.si_addr = (void __user *)regs->tpc;
2625 	info.si_trapno = 0;
2626 	force_sig_info(SIGILL, &info, current);
2627 out:
2628 	exception_exit(prev_state);
2629 }
2630 
2631 void do_privact(struct pt_regs *regs)
2632 {
2633 	do_privop(regs);
2634 }
2635 
2636 /* Trap level 1 stuff or other traps we should never see... */
2637 void do_cee(struct pt_regs *regs)
2638 {
2639 	exception_enter();
2640 	die_if_kernel("TL0: Cache Error Exception", regs);
2641 }
2642 
2643 void do_div0_tl1(struct pt_regs *regs)
2644 {
2645 	exception_enter();
2646 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2647 	die_if_kernel("TL1: DIV0 Exception", regs);
2648 }
2649 
2650 void do_fpieee_tl1(struct pt_regs *regs)
2651 {
2652 	exception_enter();
2653 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2654 	die_if_kernel("TL1: FPU IEEE Exception", regs);
2655 }
2656 
2657 void do_fpother_tl1(struct pt_regs *regs)
2658 {
2659 	exception_enter();
2660 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2661 	die_if_kernel("TL1: FPU Other Exception", regs);
2662 }
2663 
2664 void do_ill_tl1(struct pt_regs *regs)
2665 {
2666 	exception_enter();
2667 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2668 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2669 }
2670 
2671 void do_irq_tl1(struct pt_regs *regs)
2672 {
2673 	exception_enter();
2674 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2675 	die_if_kernel("TL1: IRQ Exception", regs);
2676 }
2677 
2678 void do_lddfmna_tl1(struct pt_regs *regs)
2679 {
2680 	exception_enter();
2681 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2682 	die_if_kernel("TL1: LDDF Exception", regs);
2683 }
2684 
2685 void do_stdfmna_tl1(struct pt_regs *regs)
2686 {
2687 	exception_enter();
2688 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2689 	die_if_kernel("TL1: STDF Exception", regs);
2690 }
2691 
2692 void do_paw(struct pt_regs *regs)
2693 {
2694 	exception_enter();
2695 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2696 }
2697 
2698 void do_paw_tl1(struct pt_regs *regs)
2699 {
2700 	exception_enter();
2701 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2702 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2703 }
2704 
2705 void do_vaw(struct pt_regs *regs)
2706 {
2707 	exception_enter();
2708 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2709 }
2710 
2711 void do_vaw_tl1(struct pt_regs *regs)
2712 {
2713 	exception_enter();
2714 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2715 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2716 }
2717 
2718 void do_tof_tl1(struct pt_regs *regs)
2719 {
2720 	exception_enter();
2721 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2722 	die_if_kernel("TL1: Tag Overflow Exception", regs);
2723 }
2724 
2725 void do_getpsr(struct pt_regs *regs)
2726 {
2727 	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2728 	regs->tpc   = regs->tnpc;
2729 	regs->tnpc += 4;
2730 	if (test_thread_flag(TIF_32BIT)) {
2731 		regs->tpc &= 0xffffffff;
2732 		regs->tnpc &= 0xffffffff;
2733 	}
2734 }
2735 
2736 u64 cpu_mondo_counter[NR_CPUS] = {0};
2737 struct trap_per_cpu trap_block[NR_CPUS];
2738 EXPORT_SYMBOL(trap_block);
2739 
2740 /* This can get invoked before sched_init() so play it super safe
2741  * and use hard_smp_processor_id().
2742  */
2743 void notrace init_cur_cpu_trap(struct thread_info *t)
2744 {
2745 	int cpu = hard_smp_processor_id();
2746 	struct trap_per_cpu *p = &trap_block[cpu];
2747 
2748 	p->thread = t;
2749 	p->pgd_paddr = 0;
2750 }
2751 
2752 extern void thread_info_offsets_are_bolixed_dave(void);
2753 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2754 extern void tsb_config_offsets_are_bolixed_dave(void);
2755 
2756 /* Only invoked on boot processor. */
2757 void __init trap_init(void)
2758 {
2759 	/* Compile time sanity check. */
2760 	BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2761 		     TI_FLAGS != offsetof(struct thread_info, flags) ||
2762 		     TI_CPU != offsetof(struct thread_info, cpu) ||
2763 		     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2764 		     TI_KSP != offsetof(struct thread_info, ksp) ||
2765 		     TI_FAULT_ADDR != offsetof(struct thread_info,
2766 					       fault_address) ||
2767 		     TI_KREGS != offsetof(struct thread_info, kregs) ||
2768 		     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2769 		     TI_REG_WINDOW != offsetof(struct thread_info,
2770 					       reg_window) ||
2771 		     TI_RWIN_SPTRS != offsetof(struct thread_info,
2772 					       rwbuf_stkptrs) ||
2773 		     TI_GSR != offsetof(struct thread_info, gsr) ||
2774 		     TI_XFSR != offsetof(struct thread_info, xfsr) ||
2775 		     TI_PRE_COUNT != offsetof(struct thread_info,
2776 					      preempt_count) ||
2777 		     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2778 		     TI_CURRENT_DS != offsetof(struct thread_info,
2779 						current_ds) ||
2780 		     TI_KUNA_REGS != offsetof(struct thread_info,
2781 					      kern_una_regs) ||
2782 		     TI_KUNA_INSN != offsetof(struct thread_info,
2783 					      kern_una_insn) ||
2784 		     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2785 		     (TI_FPREGS & (64 - 1)));
2786 
2787 	BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2788 						     thread) ||
2789 		     (TRAP_PER_CPU_PGD_PADDR !=
2790 		      offsetof(struct trap_per_cpu, pgd_paddr)) ||
2791 		     (TRAP_PER_CPU_CPU_MONDO_PA !=
2792 		      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2793 		     (TRAP_PER_CPU_DEV_MONDO_PA !=
2794 		      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2795 		     (TRAP_PER_CPU_RESUM_MONDO_PA !=
2796 		      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2797 		     (TRAP_PER_CPU_RESUM_KBUF_PA !=
2798 		      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2799 		     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2800 		      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2801 		     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2802 		      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2803 		     (TRAP_PER_CPU_FAULT_INFO !=
2804 		      offsetof(struct trap_per_cpu, fault_info)) ||
2805 		     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2806 		      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2807 		     (TRAP_PER_CPU_CPU_LIST_PA !=
2808 		      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2809 		     (TRAP_PER_CPU_TSB_HUGE !=
2810 		      offsetof(struct trap_per_cpu, tsb_huge)) ||
2811 		     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2812 		      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2813 		     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2814 		      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2815 		     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2816 		      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2817 		     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2818 		      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2819 		     (TRAP_PER_CPU_RESUM_QMASK !=
2820 		      offsetof(struct trap_per_cpu, resum_qmask)) ||
2821 		     (TRAP_PER_CPU_NONRESUM_QMASK !=
2822 		      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2823 		     (TRAP_PER_CPU_PER_CPU_BASE !=
2824 		      offsetof(struct trap_per_cpu, __per_cpu_base)));
2825 
2826 	BUILD_BUG_ON((TSB_CONFIG_TSB !=
2827 		      offsetof(struct tsb_config, tsb)) ||
2828 		     (TSB_CONFIG_RSS_LIMIT !=
2829 		      offsetof(struct tsb_config, tsb_rss_limit)) ||
2830 		     (TSB_CONFIG_NENTRIES !=
2831 		      offsetof(struct tsb_config, tsb_nentries)) ||
2832 		     (TSB_CONFIG_REG_VAL !=
2833 		      offsetof(struct tsb_config, tsb_reg_val)) ||
2834 		     (TSB_CONFIG_MAP_VADDR !=
2835 		      offsetof(struct tsb_config, tsb_map_vaddr)) ||
2836 		     (TSB_CONFIG_MAP_PTE !=
2837 		      offsetof(struct tsb_config, tsb_map_pte)));
2838 
2839 	/* Attach to the address space of init_task.  On SMP we
2840 	 * do this in smp.c:smp_callin for other cpus.
2841 	 */
2842 	mmgrab(&init_mm);
2843 	current->active_mm = &init_mm;
2844 }
2845