xref: /openbmc/linux/arch/sparc/kernel/traps_64.c (revision b34e08d5)
1 /* arch/sparc64/kernel/traps.c
2  *
3  * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
4  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5  */
6 
7 /*
8  * I like traps on v9, :))))
9  */
10 
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/linkage.h>
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/kdebug.h>
20 #include <linux/ftrace.h>
21 #include <linux/reboot.h>
22 #include <linux/gfp.h>
23 #include <linux/context_tracking.h>
24 
25 #include <asm/smp.h>
26 #include <asm/delay.h>
27 #include <asm/ptrace.h>
28 #include <asm/oplib.h>
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/unistd.h>
32 #include <asm/uaccess.h>
33 #include <asm/fpumacro.h>
34 #include <asm/lsu.h>
35 #include <asm/dcu.h>
36 #include <asm/estate.h>
37 #include <asm/chafsr.h>
38 #include <asm/sfafsr.h>
39 #include <asm/psrcompat.h>
40 #include <asm/processor.h>
41 #include <asm/timer.h>
42 #include <asm/head.h>
43 #include <asm/prom.h>
44 #include <asm/memctrl.h>
45 #include <asm/cacheflush.h>
46 
47 #include "entry.h"
48 #include "kstack.h"
49 
50 /* When an irrecoverable trap occurs at tl > 0, the trap entry
51  * code logs the trap state registers at every level in the trap
52  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
53  * is as follows:
54  */
55 struct tl1_traplog {
56 	struct {
57 		unsigned long tstate;
58 		unsigned long tpc;
59 		unsigned long tnpc;
60 		unsigned long tt;
61 	} trapstack[4];
62 	unsigned long tl;
63 };
64 
65 static void dump_tl1_traplog(struct tl1_traplog *p)
66 {
67 	int i, limit;
68 
69 	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
70 	       "dumping track stack.\n", p->tl);
71 
72 	limit = (tlb_type == hypervisor) ? 2 : 4;
73 	for (i = 0; i < limit; i++) {
74 		printk(KERN_EMERG
75 		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
76 		       "TNPC[%016lx] TT[%lx]\n",
77 		       i + 1,
78 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
79 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
80 		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
81 	}
82 }
83 
84 void bad_trap(struct pt_regs *regs, long lvl)
85 {
86 	char buffer[32];
87 	siginfo_t info;
88 
89 	if (notify_die(DIE_TRAP, "bad trap", regs,
90 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
91 		return;
92 
93 	if (lvl < 0x100) {
94 		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
95 		die_if_kernel(buffer, regs);
96 	}
97 
98 	lvl -= 0x100;
99 	if (regs->tstate & TSTATE_PRIV) {
100 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
101 		die_if_kernel(buffer, regs);
102 	}
103 	if (test_thread_flag(TIF_32BIT)) {
104 		regs->tpc &= 0xffffffff;
105 		regs->tnpc &= 0xffffffff;
106 	}
107 	info.si_signo = SIGILL;
108 	info.si_errno = 0;
109 	info.si_code = ILL_ILLTRP;
110 	info.si_addr = (void __user *)regs->tpc;
111 	info.si_trapno = lvl;
112 	force_sig_info(SIGILL, &info, current);
113 }
114 
115 void bad_trap_tl1(struct pt_regs *regs, long lvl)
116 {
117 	char buffer[32];
118 
119 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
120 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
121 		return;
122 
123 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
124 
125 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
126 	die_if_kernel (buffer, regs);
127 }
128 
129 #ifdef CONFIG_DEBUG_BUGVERBOSE
130 void do_BUG(const char *file, int line)
131 {
132 	bust_spinlocks(1);
133 	printk("kernel BUG at %s:%d!\n", file, line);
134 }
135 EXPORT_SYMBOL(do_BUG);
136 #endif
137 
138 static DEFINE_SPINLOCK(dimm_handler_lock);
139 static dimm_printer_t dimm_handler;
140 
141 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
142 {
143 	unsigned long flags;
144 	int ret = -ENODEV;
145 
146 	spin_lock_irqsave(&dimm_handler_lock, flags);
147 	if (dimm_handler) {
148 		ret = dimm_handler(synd_code, paddr, buf, buflen);
149 	} else if (tlb_type == spitfire) {
150 		if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
151 			ret = -EINVAL;
152 		else
153 			ret = 0;
154 	} else
155 		ret = -ENODEV;
156 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
157 
158 	return ret;
159 }
160 
161 int register_dimm_printer(dimm_printer_t func)
162 {
163 	unsigned long flags;
164 	int ret = 0;
165 
166 	spin_lock_irqsave(&dimm_handler_lock, flags);
167 	if (!dimm_handler)
168 		dimm_handler = func;
169 	else
170 		ret = -EEXIST;
171 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
172 
173 	return ret;
174 }
175 EXPORT_SYMBOL_GPL(register_dimm_printer);
176 
177 void unregister_dimm_printer(dimm_printer_t func)
178 {
179 	unsigned long flags;
180 
181 	spin_lock_irqsave(&dimm_handler_lock, flags);
182 	if (dimm_handler == func)
183 		dimm_handler = NULL;
184 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
185 }
186 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
187 
188 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
189 {
190 	enum ctx_state prev_state = exception_enter();
191 	siginfo_t info;
192 
193 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
194 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
195 		goto out;
196 
197 	if (regs->tstate & TSTATE_PRIV) {
198 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
199 		       "SFAR[%016lx], going.\n", sfsr, sfar);
200 		die_if_kernel("Iax", regs);
201 	}
202 	if (test_thread_flag(TIF_32BIT)) {
203 		regs->tpc &= 0xffffffff;
204 		regs->tnpc &= 0xffffffff;
205 	}
206 	info.si_signo = SIGSEGV;
207 	info.si_errno = 0;
208 	info.si_code = SEGV_MAPERR;
209 	info.si_addr = (void __user *)regs->tpc;
210 	info.si_trapno = 0;
211 	force_sig_info(SIGSEGV, &info, current);
212 out:
213 	exception_exit(prev_state);
214 }
215 
216 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
217 {
218 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
219 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
220 		return;
221 
222 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
223 	spitfire_insn_access_exception(regs, sfsr, sfar);
224 }
225 
226 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
227 {
228 	unsigned short type = (type_ctx >> 16);
229 	unsigned short ctx  = (type_ctx & 0xffff);
230 	siginfo_t info;
231 
232 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
233 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
234 		return;
235 
236 	if (regs->tstate & TSTATE_PRIV) {
237 		printk("sun4v_insn_access_exception: ADDR[%016lx] "
238 		       "CTX[%04x] TYPE[%04x], going.\n",
239 		       addr, ctx, type);
240 		die_if_kernel("Iax", regs);
241 	}
242 
243 	if (test_thread_flag(TIF_32BIT)) {
244 		regs->tpc &= 0xffffffff;
245 		regs->tnpc &= 0xffffffff;
246 	}
247 	info.si_signo = SIGSEGV;
248 	info.si_errno = 0;
249 	info.si_code = SEGV_MAPERR;
250 	info.si_addr = (void __user *) addr;
251 	info.si_trapno = 0;
252 	force_sig_info(SIGSEGV, &info, current);
253 }
254 
255 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
256 {
257 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
258 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
259 		return;
260 
261 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
262 	sun4v_insn_access_exception(regs, addr, type_ctx);
263 }
264 
265 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
266 {
267 	enum ctx_state prev_state = exception_enter();
268 	siginfo_t info;
269 
270 	if (notify_die(DIE_TRAP, "data access exception", regs,
271 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
272 		goto out;
273 
274 	if (regs->tstate & TSTATE_PRIV) {
275 		/* Test if this comes from uaccess places. */
276 		const struct exception_table_entry *entry;
277 
278 		entry = search_exception_tables(regs->tpc);
279 		if (entry) {
280 			/* Ouch, somebody is trying VM hole tricks on us... */
281 #ifdef DEBUG_EXCEPTIONS
282 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
283 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
284 			       regs->tpc, entry->fixup);
285 #endif
286 			regs->tpc = entry->fixup;
287 			regs->tnpc = regs->tpc + 4;
288 			goto out;
289 		}
290 		/* Shit... */
291 		printk("spitfire_data_access_exception: SFSR[%016lx] "
292 		       "SFAR[%016lx], going.\n", sfsr, sfar);
293 		die_if_kernel("Dax", regs);
294 	}
295 
296 	info.si_signo = SIGSEGV;
297 	info.si_errno = 0;
298 	info.si_code = SEGV_MAPERR;
299 	info.si_addr = (void __user *)sfar;
300 	info.si_trapno = 0;
301 	force_sig_info(SIGSEGV, &info, current);
302 out:
303 	exception_exit(prev_state);
304 }
305 
306 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
307 {
308 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
309 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
310 		return;
311 
312 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
313 	spitfire_data_access_exception(regs, sfsr, sfar);
314 }
315 
316 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
317 {
318 	unsigned short type = (type_ctx >> 16);
319 	unsigned short ctx  = (type_ctx & 0xffff);
320 	siginfo_t info;
321 
322 	if (notify_die(DIE_TRAP, "data access exception", regs,
323 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
324 		return;
325 
326 	if (regs->tstate & TSTATE_PRIV) {
327 		/* Test if this comes from uaccess places. */
328 		const struct exception_table_entry *entry;
329 
330 		entry = search_exception_tables(regs->tpc);
331 		if (entry) {
332 			/* Ouch, somebody is trying VM hole tricks on us... */
333 #ifdef DEBUG_EXCEPTIONS
334 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
335 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
336 			       regs->tpc, entry->fixup);
337 #endif
338 			regs->tpc = entry->fixup;
339 			regs->tnpc = regs->tpc + 4;
340 			return;
341 		}
342 		printk("sun4v_data_access_exception: ADDR[%016lx] "
343 		       "CTX[%04x] TYPE[%04x], going.\n",
344 		       addr, ctx, type);
345 		die_if_kernel("Dax", regs);
346 	}
347 
348 	if (test_thread_flag(TIF_32BIT)) {
349 		regs->tpc &= 0xffffffff;
350 		regs->tnpc &= 0xffffffff;
351 	}
352 	info.si_signo = SIGSEGV;
353 	info.si_errno = 0;
354 	info.si_code = SEGV_MAPERR;
355 	info.si_addr = (void __user *) addr;
356 	info.si_trapno = 0;
357 	force_sig_info(SIGSEGV, &info, current);
358 }
359 
360 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
361 {
362 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
363 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
364 		return;
365 
366 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
367 	sun4v_data_access_exception(regs, addr, type_ctx);
368 }
369 
370 #ifdef CONFIG_PCI
371 #include "pci_impl.h"
372 #endif
373 
374 /* When access exceptions happen, we must do this. */
375 static void spitfire_clean_and_reenable_l1_caches(void)
376 {
377 	unsigned long va;
378 
379 	if (tlb_type != spitfire)
380 		BUG();
381 
382 	/* Clean 'em. */
383 	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
384 		spitfire_put_icache_tag(va, 0x0);
385 		spitfire_put_dcache_tag(va, 0x0);
386 	}
387 
388 	/* Re-enable in LSU. */
389 	__asm__ __volatile__("flush %%g6\n\t"
390 			     "membar #Sync\n\t"
391 			     "stxa %0, [%%g0] %1\n\t"
392 			     "membar #Sync"
393 			     : /* no outputs */
394 			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
395 				    LSU_CONTROL_IM | LSU_CONTROL_DM),
396 			     "i" (ASI_LSU_CONTROL)
397 			     : "memory");
398 }
399 
400 static void spitfire_enable_estate_errors(void)
401 {
402 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
403 			     "membar	#Sync"
404 			     : /* no outputs */
405 			     : "r" (ESTATE_ERR_ALL),
406 			       "i" (ASI_ESTATE_ERROR_EN));
407 }
408 
409 static char ecc_syndrome_table[] = {
410 	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
411 	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
412 	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
413 	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
414 	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
415 	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
416 	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
417 	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
418 	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
419 	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
420 	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
421 	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
422 	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
423 	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
424 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
425 	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
426 	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
427 	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
428 	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
429 	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
430 	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
431 	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
432 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
433 	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
434 	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
435 	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
436 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
437 	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
438 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
439 	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
440 	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
441 	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
442 };
443 
444 static char *syndrome_unknown = "<Unknown>";
445 
446 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
447 {
448 	unsigned short scode;
449 	char memmod_str[64], *p;
450 
451 	if (udbl & bit) {
452 		scode = ecc_syndrome_table[udbl & 0xff];
453 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
454 			p = syndrome_unknown;
455 		else
456 			p = memmod_str;
457 		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
458 		       "Memory Module \"%s\"\n",
459 		       smp_processor_id(), scode, p);
460 	}
461 
462 	if (udbh & bit) {
463 		scode = ecc_syndrome_table[udbh & 0xff];
464 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
465 			p = syndrome_unknown;
466 		else
467 			p = memmod_str;
468 		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
469 		       "Memory Module \"%s\"\n",
470 		       smp_processor_id(), scode, p);
471 	}
472 
473 }
474 
475 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
476 {
477 
478 	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
479 	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
480 	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
481 
482 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
483 
484 	/* We always log it, even if someone is listening for this
485 	 * trap.
486 	 */
487 	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
488 		   0, TRAP_TYPE_CEE, SIGTRAP);
489 
490 	/* The Correctable ECC Error trap does not disable I/D caches.  So
491 	 * we only have to restore the ESTATE Error Enable register.
492 	 */
493 	spitfire_enable_estate_errors();
494 }
495 
496 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
497 {
498 	siginfo_t info;
499 
500 	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
501 	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
502 	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
503 
504 	/* XXX add more human friendly logging of the error status
505 	 * XXX as is implemented for cheetah
506 	 */
507 
508 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
509 
510 	/* We always log it, even if someone is listening for this
511 	 * trap.
512 	 */
513 	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
514 		   0, tt, SIGTRAP);
515 
516 	if (regs->tstate & TSTATE_PRIV) {
517 		if (tl1)
518 			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
519 		die_if_kernel("UE", regs);
520 	}
521 
522 	/* XXX need more intelligent processing here, such as is implemented
523 	 * XXX for cheetah errors, in fact if the E-cache still holds the
524 	 * XXX line with bad parity this will loop
525 	 */
526 
527 	spitfire_clean_and_reenable_l1_caches();
528 	spitfire_enable_estate_errors();
529 
530 	if (test_thread_flag(TIF_32BIT)) {
531 		regs->tpc &= 0xffffffff;
532 		regs->tnpc &= 0xffffffff;
533 	}
534 	info.si_signo = SIGBUS;
535 	info.si_errno = 0;
536 	info.si_code = BUS_OBJERR;
537 	info.si_addr = (void *)0;
538 	info.si_trapno = 0;
539 	force_sig_info(SIGBUS, &info, current);
540 }
541 
542 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
543 {
544 	unsigned long afsr, tt, udbh, udbl;
545 	int tl1;
546 
547 	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
548 	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
549 	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
550 	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
551 	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
552 
553 #ifdef CONFIG_PCI
554 	if (tt == TRAP_TYPE_DAE &&
555 	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
556 		spitfire_clean_and_reenable_l1_caches();
557 		spitfire_enable_estate_errors();
558 
559 		pci_poke_faulted = 1;
560 		regs->tnpc = regs->tpc + 4;
561 		return;
562 	}
563 #endif
564 
565 	if (afsr & SFAFSR_UE)
566 		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
567 
568 	if (tt == TRAP_TYPE_CEE) {
569 		/* Handle the case where we took a CEE trap, but ACK'd
570 		 * only the UE state in the UDB error registers.
571 		 */
572 		if (afsr & SFAFSR_UE) {
573 			if (udbh & UDBE_CE) {
574 				__asm__ __volatile__(
575 					"stxa	%0, [%1] %2\n\t"
576 					"membar	#Sync"
577 					: /* no outputs */
578 					: "r" (udbh & UDBE_CE),
579 					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
580 			}
581 			if (udbl & UDBE_CE) {
582 				__asm__ __volatile__(
583 					"stxa	%0, [%1] %2\n\t"
584 					"membar	#Sync"
585 					: /* no outputs */
586 					: "r" (udbl & UDBE_CE),
587 					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
588 			}
589 		}
590 
591 		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
592 	}
593 }
594 
595 int cheetah_pcache_forced_on;
596 
597 void cheetah_enable_pcache(void)
598 {
599 	unsigned long dcr;
600 
601 	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
602 	       smp_processor_id());
603 
604 	__asm__ __volatile__("ldxa [%%g0] %1, %0"
605 			     : "=r" (dcr)
606 			     : "i" (ASI_DCU_CONTROL_REG));
607 	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
608 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
609 			     "membar #Sync"
610 			     : /* no outputs */
611 			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
612 }
613 
614 /* Cheetah error trap handling. */
615 static unsigned long ecache_flush_physbase;
616 static unsigned long ecache_flush_linesize;
617 static unsigned long ecache_flush_size;
618 
619 /* This table is ordered in priority of errors and matches the
620  * AFAR overwrite policy as well.
621  */
622 
623 struct afsr_error_table {
624 	unsigned long mask;
625 	const char *name;
626 };
627 
628 static const char CHAFSR_PERR_msg[] =
629 	"System interface protocol error";
630 static const char CHAFSR_IERR_msg[] =
631 	"Internal processor error";
632 static const char CHAFSR_ISAP_msg[] =
633 	"System request parity error on incoming address";
634 static const char CHAFSR_UCU_msg[] =
635 	"Uncorrectable E-cache ECC error for ifetch/data";
636 static const char CHAFSR_UCC_msg[] =
637 	"SW Correctable E-cache ECC error for ifetch/data";
638 static const char CHAFSR_UE_msg[] =
639 	"Uncorrectable system bus data ECC error for read";
640 static const char CHAFSR_EDU_msg[] =
641 	"Uncorrectable E-cache ECC error for stmerge/blkld";
642 static const char CHAFSR_EMU_msg[] =
643 	"Uncorrectable system bus MTAG error";
644 static const char CHAFSR_WDU_msg[] =
645 	"Uncorrectable E-cache ECC error for writeback";
646 static const char CHAFSR_CPU_msg[] =
647 	"Uncorrectable ECC error for copyout";
648 static const char CHAFSR_CE_msg[] =
649 	"HW corrected system bus data ECC error for read";
650 static const char CHAFSR_EDC_msg[] =
651 	"HW corrected E-cache ECC error for stmerge/blkld";
652 static const char CHAFSR_EMC_msg[] =
653 	"HW corrected system bus MTAG ECC error";
654 static const char CHAFSR_WDC_msg[] =
655 	"HW corrected E-cache ECC error for writeback";
656 static const char CHAFSR_CPC_msg[] =
657 	"HW corrected ECC error for copyout";
658 static const char CHAFSR_TO_msg[] =
659 	"Unmapped error from system bus";
660 static const char CHAFSR_BERR_msg[] =
661 	"Bus error response from system bus";
662 static const char CHAFSR_IVC_msg[] =
663 	"HW corrected system bus data ECC error for ivec read";
664 static const char CHAFSR_IVU_msg[] =
665 	"Uncorrectable system bus data ECC error for ivec read";
666 static struct afsr_error_table __cheetah_error_table[] = {
667 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
668 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
669 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
670 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
671 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
672 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
673 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
674 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
675 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
676 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
677 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
678 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
679 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
680 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
681 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
682 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
683 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
684 	/* These two do not update the AFAR. */
685 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
686 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
687 	{	0,		NULL			},
688 };
689 static const char CHPAFSR_DTO_msg[] =
690 	"System bus unmapped error for prefetch/storequeue-read";
691 static const char CHPAFSR_DBERR_msg[] =
692 	"System bus error for prefetch/storequeue-read";
693 static const char CHPAFSR_THCE_msg[] =
694 	"Hardware corrected E-cache Tag ECC error";
695 static const char CHPAFSR_TSCE_msg[] =
696 	"SW handled correctable E-cache Tag ECC error";
697 static const char CHPAFSR_TUE_msg[] =
698 	"Uncorrectable E-cache Tag ECC error";
699 static const char CHPAFSR_DUE_msg[] =
700 	"System bus uncorrectable data ECC error due to prefetch/store-fill";
701 static struct afsr_error_table __cheetah_plus_error_table[] = {
702 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
703 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
704 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
705 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
706 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
707 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
708 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
709 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
710 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
711 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
712 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
713 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
714 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
715 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
716 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
717 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
718 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
719 	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
720 	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
721 	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
722 	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
723 	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
724 	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
725 	/* These two do not update the AFAR. */
726 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
727 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
728 	{	0,		NULL			},
729 };
730 static const char JPAFSR_JETO_msg[] =
731 	"System interface protocol error, hw timeout caused";
732 static const char JPAFSR_SCE_msg[] =
733 	"Parity error on system snoop results";
734 static const char JPAFSR_JEIC_msg[] =
735 	"System interface protocol error, illegal command detected";
736 static const char JPAFSR_JEIT_msg[] =
737 	"System interface protocol error, illegal ADTYPE detected";
738 static const char JPAFSR_OM_msg[] =
739 	"Out of range memory error has occurred";
740 static const char JPAFSR_ETP_msg[] =
741 	"Parity error on L2 cache tag SRAM";
742 static const char JPAFSR_UMS_msg[] =
743 	"Error due to unsupported store";
744 static const char JPAFSR_RUE_msg[] =
745 	"Uncorrectable ECC error from remote cache/memory";
746 static const char JPAFSR_RCE_msg[] =
747 	"Correctable ECC error from remote cache/memory";
748 static const char JPAFSR_BP_msg[] =
749 	"JBUS parity error on returned read data";
750 static const char JPAFSR_WBP_msg[] =
751 	"JBUS parity error on data for writeback or block store";
752 static const char JPAFSR_FRC_msg[] =
753 	"Foreign read to DRAM incurring correctable ECC error";
754 static const char JPAFSR_FRU_msg[] =
755 	"Foreign read to DRAM incurring uncorrectable ECC error";
756 static struct afsr_error_table __jalapeno_error_table[] = {
757 	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
758 	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
759 	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
760 	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
761 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
762 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
763 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
764 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
765 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
766 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
767 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
768 	{	JPAFSR_OM,	JPAFSR_OM_msg		},
769 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
770 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
771 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
772 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
773 	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
774 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
775 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
776 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
777 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
778 	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
779 	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
780 	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
781 	{	JPAFSR_BP,	JPAFSR_BP_msg		},
782 	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
783 	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
784 	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
785 	/* These two do not update the AFAR. */
786 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
787 	{	0,		NULL			},
788 };
789 static struct afsr_error_table *cheetah_error_table;
790 static unsigned long cheetah_afsr_errors;
791 
792 struct cheetah_err_info *cheetah_error_log;
793 
794 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
795 {
796 	struct cheetah_err_info *p;
797 	int cpu = smp_processor_id();
798 
799 	if (!cheetah_error_log)
800 		return NULL;
801 
802 	p = cheetah_error_log + (cpu * 2);
803 	if ((afsr & CHAFSR_TL1) != 0UL)
804 		p++;
805 
806 	return p;
807 }
808 
809 extern unsigned int tl0_icpe[], tl1_icpe[];
810 extern unsigned int tl0_dcpe[], tl1_dcpe[];
811 extern unsigned int tl0_fecc[], tl1_fecc[];
812 extern unsigned int tl0_cee[], tl1_cee[];
813 extern unsigned int tl0_iae[], tl1_iae[];
814 extern unsigned int tl0_dae[], tl1_dae[];
815 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
816 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
817 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
818 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
819 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
820 
821 void __init cheetah_ecache_flush_init(void)
822 {
823 	unsigned long largest_size, smallest_linesize, order, ver;
824 	int i, sz;
825 
826 	/* Scan all cpu device tree nodes, note two values:
827 	 * 1) largest E-cache size
828 	 * 2) smallest E-cache line size
829 	 */
830 	largest_size = 0UL;
831 	smallest_linesize = ~0UL;
832 
833 	for (i = 0; i < NR_CPUS; i++) {
834 		unsigned long val;
835 
836 		val = cpu_data(i).ecache_size;
837 		if (!val)
838 			continue;
839 
840 		if (val > largest_size)
841 			largest_size = val;
842 
843 		val = cpu_data(i).ecache_line_size;
844 		if (val < smallest_linesize)
845 			smallest_linesize = val;
846 
847 	}
848 
849 	if (largest_size == 0UL || smallest_linesize == ~0UL) {
850 		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
851 			    "parameters.\n");
852 		prom_halt();
853 	}
854 
855 	ecache_flush_size = (2 * largest_size);
856 	ecache_flush_linesize = smallest_linesize;
857 
858 	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
859 
860 	if (ecache_flush_physbase == ~0UL) {
861 		prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
862 			    "contiguous physical memory.\n",
863 			    ecache_flush_size);
864 		prom_halt();
865 	}
866 
867 	/* Now allocate error trap reporting scoreboard. */
868 	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
869 	for (order = 0; order < MAX_ORDER; order++) {
870 		if ((PAGE_SIZE << order) >= sz)
871 			break;
872 	}
873 	cheetah_error_log = (struct cheetah_err_info *)
874 		__get_free_pages(GFP_KERNEL, order);
875 	if (!cheetah_error_log) {
876 		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
877 			    "error logging scoreboard (%d bytes).\n", sz);
878 		prom_halt();
879 	}
880 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
881 
882 	/* Mark all AFSRs as invalid so that the trap handler will
883 	 * log new new information there.
884 	 */
885 	for (i = 0; i < 2 * NR_CPUS; i++)
886 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
887 
888 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
889 	if ((ver >> 32) == __JALAPENO_ID ||
890 	    (ver >> 32) == __SERRANO_ID) {
891 		cheetah_error_table = &__jalapeno_error_table[0];
892 		cheetah_afsr_errors = JPAFSR_ERRORS;
893 	} else if ((ver >> 32) == 0x003e0015) {
894 		cheetah_error_table = &__cheetah_plus_error_table[0];
895 		cheetah_afsr_errors = CHPAFSR_ERRORS;
896 	} else {
897 		cheetah_error_table = &__cheetah_error_table[0];
898 		cheetah_afsr_errors = CHAFSR_ERRORS;
899 	}
900 
901 	/* Now patch trap tables. */
902 	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
903 	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
904 	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
905 	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
906 	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
907 	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
908 	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
909 	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
910 	if (tlb_type == cheetah_plus) {
911 		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
912 		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
913 		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
914 		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
915 	}
916 	flushi(PAGE_OFFSET);
917 }
918 
919 static void cheetah_flush_ecache(void)
920 {
921 	unsigned long flush_base = ecache_flush_physbase;
922 	unsigned long flush_linesize = ecache_flush_linesize;
923 	unsigned long flush_size = ecache_flush_size;
924 
925 	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
926 			     "   bne,pt	%%xcc, 1b\n\t"
927 			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
928 			     : "=&r" (flush_size)
929 			     : "0" (flush_size), "r" (flush_base),
930 			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
931 }
932 
933 static void cheetah_flush_ecache_line(unsigned long physaddr)
934 {
935 	unsigned long alias;
936 
937 	physaddr &= ~(8UL - 1UL);
938 	physaddr = (ecache_flush_physbase +
939 		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
940 	alias = physaddr + (ecache_flush_size >> 1UL);
941 	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
942 			     "ldxa [%1] %2, %%g0\n\t"
943 			     "membar #Sync"
944 			     : /* no outputs */
945 			     : "r" (physaddr), "r" (alias),
946 			       "i" (ASI_PHYS_USE_EC));
947 }
948 
949 /* Unfortunately, the diagnostic access to the I-cache tags we need to
950  * use to clear the thing interferes with I-cache coherency transactions.
951  *
952  * So we must only flush the I-cache when it is disabled.
953  */
954 static void __cheetah_flush_icache(void)
955 {
956 	unsigned int icache_size, icache_line_size;
957 	unsigned long addr;
958 
959 	icache_size = local_cpu_data().icache_size;
960 	icache_line_size = local_cpu_data().icache_line_size;
961 
962 	/* Clear the valid bits in all the tags. */
963 	for (addr = 0; addr < icache_size; addr += icache_line_size) {
964 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
965 				     "membar #Sync"
966 				     : /* no outputs */
967 				     : "r" (addr | (2 << 3)),
968 				       "i" (ASI_IC_TAG));
969 	}
970 }
971 
972 static void cheetah_flush_icache(void)
973 {
974 	unsigned long dcu_save;
975 
976 	/* Save current DCU, disable I-cache. */
977 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
978 			     "or %0, %2, %%g1\n\t"
979 			     "stxa %%g1, [%%g0] %1\n\t"
980 			     "membar #Sync"
981 			     : "=r" (dcu_save)
982 			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
983 			     : "g1");
984 
985 	__cheetah_flush_icache();
986 
987 	/* Restore DCU register */
988 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
989 			     "membar #Sync"
990 			     : /* no outputs */
991 			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
992 }
993 
994 static void cheetah_flush_dcache(void)
995 {
996 	unsigned int dcache_size, dcache_line_size;
997 	unsigned long addr;
998 
999 	dcache_size = local_cpu_data().dcache_size;
1000 	dcache_line_size = local_cpu_data().dcache_line_size;
1001 
1002 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1003 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1004 				     "membar #Sync"
1005 				     : /* no outputs */
1006 				     : "r" (addr), "i" (ASI_DCACHE_TAG));
1007 	}
1008 }
1009 
1010 /* In order to make the even parity correct we must do two things.
1011  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1012  * Next, we clear out all 32-bytes of data for that line.  Data of
1013  * all-zero + tag parity value of zero == correct parity.
1014  */
1015 static void cheetah_plus_zap_dcache_parity(void)
1016 {
1017 	unsigned int dcache_size, dcache_line_size;
1018 	unsigned long addr;
1019 
1020 	dcache_size = local_cpu_data().dcache_size;
1021 	dcache_line_size = local_cpu_data().dcache_line_size;
1022 
1023 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1024 		unsigned long tag = (addr >> 14);
1025 		unsigned long line;
1026 
1027 		__asm__ __volatile__("membar	#Sync\n\t"
1028 				     "stxa	%0, [%1] %2\n\t"
1029 				     "membar	#Sync"
1030 				     : /* no outputs */
1031 				     : "r" (tag), "r" (addr),
1032 				       "i" (ASI_DCACHE_UTAG));
1033 		for (line = addr; line < addr + dcache_line_size; line += 8)
1034 			__asm__ __volatile__("membar	#Sync\n\t"
1035 					     "stxa	%%g0, [%0] %1\n\t"
1036 					     "membar	#Sync"
1037 					     : /* no outputs */
1038 					     : "r" (line),
1039 					       "i" (ASI_DCACHE_DATA));
1040 	}
1041 }
1042 
1043 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1044  * something palatable to the memory controller driver get_unumber
1045  * routine.
1046  */
1047 #define MT0	137
1048 #define MT1	138
1049 #define MT2	139
1050 #define NONE	254
1051 #define MTC0	140
1052 #define MTC1	141
1053 #define MTC2	142
1054 #define MTC3	143
1055 #define C0	128
1056 #define C1	129
1057 #define C2	130
1058 #define C3	131
1059 #define C4	132
1060 #define C5	133
1061 #define C6	134
1062 #define C7	135
1063 #define C8	136
1064 #define M2	144
1065 #define M3	145
1066 #define M4	146
1067 #define M	147
1068 static unsigned char cheetah_ecc_syntab[] = {
1069 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1070 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1071 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1072 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1073 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1074 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1075 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1076 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1077 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1078 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1079 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1080 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1081 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1082 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1083 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1084 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1085 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1086 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1087 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1088 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1089 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1090 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1091 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1092 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1093 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1094 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1095 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1096 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1097 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1098 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1099 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1100 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1101 };
1102 static unsigned char cheetah_mtag_syntab[] = {
1103        NONE, MTC0,
1104        MTC1, NONE,
1105        MTC2, NONE,
1106        NONE, MT0,
1107        MTC3, NONE,
1108        NONE, MT1,
1109        NONE, MT2,
1110        NONE, NONE
1111 };
1112 
1113 /* Return the highest priority error conditon mentioned. */
1114 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1115 {
1116 	unsigned long tmp = 0;
1117 	int i;
1118 
1119 	for (i = 0; cheetah_error_table[i].mask; i++) {
1120 		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1121 			return tmp;
1122 	}
1123 	return tmp;
1124 }
1125 
1126 static const char *cheetah_get_string(unsigned long bit)
1127 {
1128 	int i;
1129 
1130 	for (i = 0; cheetah_error_table[i].mask; i++) {
1131 		if ((bit & cheetah_error_table[i].mask) != 0UL)
1132 			return cheetah_error_table[i].name;
1133 	}
1134 	return "???";
1135 }
1136 
1137 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1138 			       unsigned long afsr, unsigned long afar, int recoverable)
1139 {
1140 	unsigned long hipri;
1141 	char unum[256];
1142 
1143 	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1144 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1145 	       afsr, afar,
1146 	       (afsr & CHAFSR_TL1) ? 1 : 0);
1147 	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1148 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1149 	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1150 	printk("%s" "ERROR(%d): ",
1151 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1152 	printk("TPC<%pS>\n", (void *) regs->tpc);
1153 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1154 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1155 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1156 	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1157 	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1158 	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1159 	hipri = cheetah_get_hipri(afsr);
1160 	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1161 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1162 	       hipri, cheetah_get_string(hipri));
1163 
1164 	/* Try to get unumber if relevant. */
1165 #define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1166 			 CHAFSR_CPC | CHAFSR_CPU | \
1167 			 CHAFSR_UE  | CHAFSR_CE  | \
1168 			 CHAFSR_EDC | CHAFSR_EDU  | \
1169 			 CHAFSR_UCC | CHAFSR_UCU  | \
1170 			 CHAFSR_WDU | CHAFSR_WDC)
1171 #define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1172 	if (afsr & ESYND_ERRORS) {
1173 		int syndrome;
1174 		int ret;
1175 
1176 		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1177 		syndrome = cheetah_ecc_syntab[syndrome];
1178 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1179 		if (ret != -1)
1180 			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1181 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1182 			       smp_processor_id(), unum);
1183 	} else if (afsr & MSYND_ERRORS) {
1184 		int syndrome;
1185 		int ret;
1186 
1187 		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1188 		syndrome = cheetah_mtag_syntab[syndrome];
1189 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1190 		if (ret != -1)
1191 			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1192 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1193 			       smp_processor_id(), unum);
1194 	}
1195 
1196 	/* Now dump the cache snapshots. */
1197 	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1198 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1199 	       (int) info->dcache_index,
1200 	       info->dcache_tag,
1201 	       info->dcache_utag,
1202 	       info->dcache_stag);
1203 	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1204 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1205 	       info->dcache_data[0],
1206 	       info->dcache_data[1],
1207 	       info->dcache_data[2],
1208 	       info->dcache_data[3]);
1209 	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1210 	       "u[%016llx] l[%016llx]\n",
1211 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1212 	       (int) info->icache_index,
1213 	       info->icache_tag,
1214 	       info->icache_utag,
1215 	       info->icache_stag,
1216 	       info->icache_upper,
1217 	       info->icache_lower);
1218 	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1219 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1220 	       info->icache_data[0],
1221 	       info->icache_data[1],
1222 	       info->icache_data[2],
1223 	       info->icache_data[3]);
1224 	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1225 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1226 	       info->icache_data[4],
1227 	       info->icache_data[5],
1228 	       info->icache_data[6],
1229 	       info->icache_data[7]);
1230 	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1231 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1232 	       (int) info->ecache_index, info->ecache_tag);
1233 	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1234 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1235 	       info->ecache_data[0],
1236 	       info->ecache_data[1],
1237 	       info->ecache_data[2],
1238 	       info->ecache_data[3]);
1239 
1240 	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1241 	while (afsr != 0UL) {
1242 		unsigned long bit = cheetah_get_hipri(afsr);
1243 
1244 		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1245 		       (recoverable ? KERN_WARNING : KERN_CRIT),
1246 		       bit, cheetah_get_string(bit));
1247 
1248 		afsr &= ~bit;
1249 	}
1250 
1251 	if (!recoverable)
1252 		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1253 }
1254 
1255 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1256 {
1257 	unsigned long afsr, afar;
1258 	int ret = 0;
1259 
1260 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1261 			     : "=r" (afsr)
1262 			     : "i" (ASI_AFSR));
1263 	if ((afsr & cheetah_afsr_errors) != 0) {
1264 		if (logp != NULL) {
1265 			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1266 					     : "=r" (afar)
1267 					     : "i" (ASI_AFAR));
1268 			logp->afsr = afsr;
1269 			logp->afar = afar;
1270 		}
1271 		ret = 1;
1272 	}
1273 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1274 			     "membar #Sync\n\t"
1275 			     : : "r" (afsr), "i" (ASI_AFSR));
1276 
1277 	return ret;
1278 }
1279 
1280 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1281 {
1282 	struct cheetah_err_info local_snapshot, *p;
1283 	int recoverable;
1284 
1285 	/* Flush E-cache */
1286 	cheetah_flush_ecache();
1287 
1288 	p = cheetah_get_error_log(afsr);
1289 	if (!p) {
1290 		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1291 			    afsr, afar);
1292 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1293 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1294 		prom_halt();
1295 	}
1296 
1297 	/* Grab snapshot of logged error. */
1298 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1299 
1300 	/* If the current trap snapshot does not match what the
1301 	 * trap handler passed along into our args, big trouble.
1302 	 * In such a case, mark the local copy as invalid.
1303 	 *
1304 	 * Else, it matches and we mark the afsr in the non-local
1305 	 * copy as invalid so we may log new error traps there.
1306 	 */
1307 	if (p->afsr != afsr || p->afar != afar)
1308 		local_snapshot.afsr = CHAFSR_INVALID;
1309 	else
1310 		p->afsr = CHAFSR_INVALID;
1311 
1312 	cheetah_flush_icache();
1313 	cheetah_flush_dcache();
1314 
1315 	/* Re-enable I-cache/D-cache */
1316 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1317 			     "or %%g1, %1, %%g1\n\t"
1318 			     "stxa %%g1, [%%g0] %0\n\t"
1319 			     "membar #Sync"
1320 			     : /* no outputs */
1321 			     : "i" (ASI_DCU_CONTROL_REG),
1322 			       "i" (DCU_DC | DCU_IC)
1323 			     : "g1");
1324 
1325 	/* Re-enable error reporting */
1326 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1327 			     "or %%g1, %1, %%g1\n\t"
1328 			     "stxa %%g1, [%%g0] %0\n\t"
1329 			     "membar #Sync"
1330 			     : /* no outputs */
1331 			     : "i" (ASI_ESTATE_ERROR_EN),
1332 			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1333 			     : "g1");
1334 
1335 	/* Decide if we can continue after handling this trap and
1336 	 * logging the error.
1337 	 */
1338 	recoverable = 1;
1339 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1340 		recoverable = 0;
1341 
1342 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1343 	 * error was logged while we had error reporting traps disabled.
1344 	 */
1345 	if (cheetah_recheck_errors(&local_snapshot)) {
1346 		unsigned long new_afsr = local_snapshot.afsr;
1347 
1348 		/* If we got a new asynchronous error, die... */
1349 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1350 				CHAFSR_WDU | CHAFSR_CPU |
1351 				CHAFSR_IVU | CHAFSR_UE |
1352 				CHAFSR_BERR | CHAFSR_TO))
1353 			recoverable = 0;
1354 	}
1355 
1356 	/* Log errors. */
1357 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1358 
1359 	if (!recoverable)
1360 		panic("Irrecoverable Fast-ECC error trap.\n");
1361 
1362 	/* Flush E-cache to kick the error trap handlers out. */
1363 	cheetah_flush_ecache();
1364 }
1365 
1366 /* Try to fix a correctable error by pushing the line out from
1367  * the E-cache.  Recheck error reporting registers to see if the
1368  * problem is intermittent.
1369  */
1370 static int cheetah_fix_ce(unsigned long physaddr)
1371 {
1372 	unsigned long orig_estate;
1373 	unsigned long alias1, alias2;
1374 	int ret;
1375 
1376 	/* Make sure correctable error traps are disabled. */
1377 	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1378 			     "andn	%0, %1, %%g1\n\t"
1379 			     "stxa	%%g1, [%%g0] %2\n\t"
1380 			     "membar	#Sync"
1381 			     : "=&r" (orig_estate)
1382 			     : "i" (ESTATE_ERROR_CEEN),
1383 			       "i" (ASI_ESTATE_ERROR_EN)
1384 			     : "g1");
1385 
1386 	/* We calculate alias addresses that will force the
1387 	 * cache line in question out of the E-cache.  Then
1388 	 * we bring it back in with an atomic instruction so
1389 	 * that we get it in some modified/exclusive state,
1390 	 * then we displace it again to try and get proper ECC
1391 	 * pushed back into the system.
1392 	 */
1393 	physaddr &= ~(8UL - 1UL);
1394 	alias1 = (ecache_flush_physbase +
1395 		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1396 	alias2 = alias1 + (ecache_flush_size >> 1);
1397 	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1398 			     "ldxa	[%1] %3, %%g0\n\t"
1399 			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1400 			     "ldxa	[%0] %3, %%g0\n\t"
1401 			     "ldxa	[%1] %3, %%g0\n\t"
1402 			     "membar	#Sync"
1403 			     : /* no outputs */
1404 			     : "r" (alias1), "r" (alias2),
1405 			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1406 
1407 	/* Did that trigger another error? */
1408 	if (cheetah_recheck_errors(NULL)) {
1409 		/* Try one more time. */
1410 		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1411 				     "membar #Sync"
1412 				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1413 		if (cheetah_recheck_errors(NULL))
1414 			ret = 2;
1415 		else
1416 			ret = 1;
1417 	} else {
1418 		/* No new error, intermittent problem. */
1419 		ret = 0;
1420 	}
1421 
1422 	/* Restore error enables. */
1423 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1424 			     "membar	#Sync"
1425 			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1426 
1427 	return ret;
1428 }
1429 
1430 /* Return non-zero if PADDR is a valid physical memory address. */
1431 static int cheetah_check_main_memory(unsigned long paddr)
1432 {
1433 	unsigned long vaddr = PAGE_OFFSET + paddr;
1434 
1435 	if (vaddr > (unsigned long) high_memory)
1436 		return 0;
1437 
1438 	return kern_addr_valid(vaddr);
1439 }
1440 
1441 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1442 {
1443 	struct cheetah_err_info local_snapshot, *p;
1444 	int recoverable, is_memory;
1445 
1446 	p = cheetah_get_error_log(afsr);
1447 	if (!p) {
1448 		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1449 			    afsr, afar);
1450 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1451 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1452 		prom_halt();
1453 	}
1454 
1455 	/* Grab snapshot of logged error. */
1456 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1457 
1458 	/* If the current trap snapshot does not match what the
1459 	 * trap handler passed along into our args, big trouble.
1460 	 * In such a case, mark the local copy as invalid.
1461 	 *
1462 	 * Else, it matches and we mark the afsr in the non-local
1463 	 * copy as invalid so we may log new error traps there.
1464 	 */
1465 	if (p->afsr != afsr || p->afar != afar)
1466 		local_snapshot.afsr = CHAFSR_INVALID;
1467 	else
1468 		p->afsr = CHAFSR_INVALID;
1469 
1470 	is_memory = cheetah_check_main_memory(afar);
1471 
1472 	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1473 		/* XXX Might want to log the results of this operation
1474 		 * XXX somewhere... -DaveM
1475 		 */
1476 		cheetah_fix_ce(afar);
1477 	}
1478 
1479 	{
1480 		int flush_all, flush_line;
1481 
1482 		flush_all = flush_line = 0;
1483 		if ((afsr & CHAFSR_EDC) != 0UL) {
1484 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1485 				flush_line = 1;
1486 			else
1487 				flush_all = 1;
1488 		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1489 			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1490 				flush_line = 1;
1491 			else
1492 				flush_all = 1;
1493 		}
1494 
1495 		/* Trap handler only disabled I-cache, flush it. */
1496 		cheetah_flush_icache();
1497 
1498 		/* Re-enable I-cache */
1499 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1500 				     "or %%g1, %1, %%g1\n\t"
1501 				     "stxa %%g1, [%%g0] %0\n\t"
1502 				     "membar #Sync"
1503 				     : /* no outputs */
1504 				     : "i" (ASI_DCU_CONTROL_REG),
1505 				     "i" (DCU_IC)
1506 				     : "g1");
1507 
1508 		if (flush_all)
1509 			cheetah_flush_ecache();
1510 		else if (flush_line)
1511 			cheetah_flush_ecache_line(afar);
1512 	}
1513 
1514 	/* Re-enable error reporting */
1515 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1516 			     "or %%g1, %1, %%g1\n\t"
1517 			     "stxa %%g1, [%%g0] %0\n\t"
1518 			     "membar #Sync"
1519 			     : /* no outputs */
1520 			     : "i" (ASI_ESTATE_ERROR_EN),
1521 			       "i" (ESTATE_ERROR_CEEN)
1522 			     : "g1");
1523 
1524 	/* Decide if we can continue after handling this trap and
1525 	 * logging the error.
1526 	 */
1527 	recoverable = 1;
1528 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1529 		recoverable = 0;
1530 
1531 	/* Re-check AFSR/AFAR */
1532 	(void) cheetah_recheck_errors(&local_snapshot);
1533 
1534 	/* Log errors. */
1535 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1536 
1537 	if (!recoverable)
1538 		panic("Irrecoverable Correctable-ECC error trap.\n");
1539 }
1540 
1541 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1542 {
1543 	struct cheetah_err_info local_snapshot, *p;
1544 	int recoverable, is_memory;
1545 
1546 #ifdef CONFIG_PCI
1547 	/* Check for the special PCI poke sequence. */
1548 	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1549 		cheetah_flush_icache();
1550 		cheetah_flush_dcache();
1551 
1552 		/* Re-enable I-cache/D-cache */
1553 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1554 				     "or %%g1, %1, %%g1\n\t"
1555 				     "stxa %%g1, [%%g0] %0\n\t"
1556 				     "membar #Sync"
1557 				     : /* no outputs */
1558 				     : "i" (ASI_DCU_CONTROL_REG),
1559 				       "i" (DCU_DC | DCU_IC)
1560 				     : "g1");
1561 
1562 		/* Re-enable error reporting */
1563 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1564 				     "or %%g1, %1, %%g1\n\t"
1565 				     "stxa %%g1, [%%g0] %0\n\t"
1566 				     "membar #Sync"
1567 				     : /* no outputs */
1568 				     : "i" (ASI_ESTATE_ERROR_EN),
1569 				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1570 				     : "g1");
1571 
1572 		(void) cheetah_recheck_errors(NULL);
1573 
1574 		pci_poke_faulted = 1;
1575 		regs->tpc += 4;
1576 		regs->tnpc = regs->tpc + 4;
1577 		return;
1578 	}
1579 #endif
1580 
1581 	p = cheetah_get_error_log(afsr);
1582 	if (!p) {
1583 		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1584 			    afsr, afar);
1585 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1586 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1587 		prom_halt();
1588 	}
1589 
1590 	/* Grab snapshot of logged error. */
1591 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1592 
1593 	/* If the current trap snapshot does not match what the
1594 	 * trap handler passed along into our args, big trouble.
1595 	 * In such a case, mark the local copy as invalid.
1596 	 *
1597 	 * Else, it matches and we mark the afsr in the non-local
1598 	 * copy as invalid so we may log new error traps there.
1599 	 */
1600 	if (p->afsr != afsr || p->afar != afar)
1601 		local_snapshot.afsr = CHAFSR_INVALID;
1602 	else
1603 		p->afsr = CHAFSR_INVALID;
1604 
1605 	is_memory = cheetah_check_main_memory(afar);
1606 
1607 	{
1608 		int flush_all, flush_line;
1609 
1610 		flush_all = flush_line = 0;
1611 		if ((afsr & CHAFSR_EDU) != 0UL) {
1612 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1613 				flush_line = 1;
1614 			else
1615 				flush_all = 1;
1616 		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1617 			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1618 				flush_line = 1;
1619 			else
1620 				flush_all = 1;
1621 		}
1622 
1623 		cheetah_flush_icache();
1624 		cheetah_flush_dcache();
1625 
1626 		/* Re-enable I/D caches */
1627 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1628 				     "or %%g1, %1, %%g1\n\t"
1629 				     "stxa %%g1, [%%g0] %0\n\t"
1630 				     "membar #Sync"
1631 				     : /* no outputs */
1632 				     : "i" (ASI_DCU_CONTROL_REG),
1633 				     "i" (DCU_IC | DCU_DC)
1634 				     : "g1");
1635 
1636 		if (flush_all)
1637 			cheetah_flush_ecache();
1638 		else if (flush_line)
1639 			cheetah_flush_ecache_line(afar);
1640 	}
1641 
1642 	/* Re-enable error reporting */
1643 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1644 			     "or %%g1, %1, %%g1\n\t"
1645 			     "stxa %%g1, [%%g0] %0\n\t"
1646 			     "membar #Sync"
1647 			     : /* no outputs */
1648 			     : "i" (ASI_ESTATE_ERROR_EN),
1649 			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1650 			     : "g1");
1651 
1652 	/* Decide if we can continue after handling this trap and
1653 	 * logging the error.
1654 	 */
1655 	recoverable = 1;
1656 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1657 		recoverable = 0;
1658 
1659 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1660 	 * error was logged while we had error reporting traps disabled.
1661 	 */
1662 	if (cheetah_recheck_errors(&local_snapshot)) {
1663 		unsigned long new_afsr = local_snapshot.afsr;
1664 
1665 		/* If we got a new asynchronous error, die... */
1666 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1667 				CHAFSR_WDU | CHAFSR_CPU |
1668 				CHAFSR_IVU | CHAFSR_UE |
1669 				CHAFSR_BERR | CHAFSR_TO))
1670 			recoverable = 0;
1671 	}
1672 
1673 	/* Log errors. */
1674 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1675 
1676 	/* "Recoverable" here means we try to yank the page from ever
1677 	 * being newly used again.  This depends upon a few things:
1678 	 * 1) Must be main memory, and AFAR must be valid.
1679 	 * 2) If we trapped from user, OK.
1680 	 * 3) Else, if we trapped from kernel we must find exception
1681 	 *    table entry (ie. we have to have been accessing user
1682 	 *    space).
1683 	 *
1684 	 * If AFAR is not in main memory, or we trapped from kernel
1685 	 * and cannot find an exception table entry, it is unacceptable
1686 	 * to try and continue.
1687 	 */
1688 	if (recoverable && is_memory) {
1689 		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1690 			/* OK, usermode access. */
1691 			recoverable = 1;
1692 		} else {
1693 			const struct exception_table_entry *entry;
1694 
1695 			entry = search_exception_tables(regs->tpc);
1696 			if (entry) {
1697 				/* OK, kernel access to userspace. */
1698 				recoverable = 1;
1699 
1700 			} else {
1701 				/* BAD, privileged state is corrupted. */
1702 				recoverable = 0;
1703 			}
1704 
1705 			if (recoverable) {
1706 				if (pfn_valid(afar >> PAGE_SHIFT))
1707 					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1708 				else
1709 					recoverable = 0;
1710 
1711 				/* Only perform fixup if we still have a
1712 				 * recoverable condition.
1713 				 */
1714 				if (recoverable) {
1715 					regs->tpc = entry->fixup;
1716 					regs->tnpc = regs->tpc + 4;
1717 				}
1718 			}
1719 		}
1720 	} else {
1721 		recoverable = 0;
1722 	}
1723 
1724 	if (!recoverable)
1725 		panic("Irrecoverable deferred error trap.\n");
1726 }
1727 
1728 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1729  *
1730  * Bit0:	0=dcache,1=icache
1731  * Bit1:	0=recoverable,1=unrecoverable
1732  *
1733  * The hardware has disabled both the I-cache and D-cache in
1734  * the %dcr register.
1735  */
1736 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1737 {
1738 	if (type & 0x1)
1739 		__cheetah_flush_icache();
1740 	else
1741 		cheetah_plus_zap_dcache_parity();
1742 	cheetah_flush_dcache();
1743 
1744 	/* Re-enable I-cache/D-cache */
1745 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1746 			     "or %%g1, %1, %%g1\n\t"
1747 			     "stxa %%g1, [%%g0] %0\n\t"
1748 			     "membar #Sync"
1749 			     : /* no outputs */
1750 			     : "i" (ASI_DCU_CONTROL_REG),
1751 			       "i" (DCU_DC | DCU_IC)
1752 			     : "g1");
1753 
1754 	if (type & 0x2) {
1755 		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1756 		       smp_processor_id(),
1757 		       (type & 0x1) ? 'I' : 'D',
1758 		       regs->tpc);
1759 		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1760 		panic("Irrecoverable Cheetah+ parity error.");
1761 	}
1762 
1763 	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1764 	       smp_processor_id(),
1765 	       (type & 0x1) ? 'I' : 'D',
1766 	       regs->tpc);
1767 	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1768 }
1769 
1770 struct sun4v_error_entry {
1771 	/* Unique error handle */
1772 /*0x00*/u64		err_handle;
1773 
1774 	/* %stick value at the time of the error */
1775 /*0x08*/u64		err_stick;
1776 
1777 /*0x10*/u8		reserved_1[3];
1778 
1779 	/* Error type */
1780 /*0x13*/u8		err_type;
1781 #define SUN4V_ERR_TYPE_UNDEFINED	0
1782 #define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1783 #define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1784 #define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1785 #define SUN4V_ERR_TYPE_SHUTDOWN_RQST	4
1786 #define SUN4V_ERR_TYPE_DUMP_CORE	5
1787 #define SUN4V_ERR_TYPE_SP_STATE_CHANGE	6
1788 #define SUN4V_ERR_TYPE_NUM		7
1789 
1790 	/* Error attributes */
1791 /*0x14*/u32		err_attrs;
1792 #define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1793 #define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1794 #define SUN4V_ERR_ATTRS_PIO		0x00000004
1795 #define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1796 #define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1797 #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST	0x00000020
1798 #define SUN4V_ERR_ATTRS_ASR		0x00000040
1799 #define SUN4V_ERR_ATTRS_ASI		0x00000080
1800 #define SUN4V_ERR_ATTRS_PRIV_REG	0x00000100
1801 #define SUN4V_ERR_ATTRS_SPSTATE_MSK	0x00000600
1802 #define SUN4V_ERR_ATTRS_SPSTATE_SHFT	9
1803 #define SUN4V_ERR_ATTRS_MODE_MSK	0x03000000
1804 #define SUN4V_ERR_ATTRS_MODE_SHFT	24
1805 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1806 
1807 #define SUN4V_ERR_SPSTATE_FAULTED	0
1808 #define SUN4V_ERR_SPSTATE_AVAILABLE	1
1809 #define SUN4V_ERR_SPSTATE_NOT_PRESENT	2
1810 
1811 #define SUN4V_ERR_MODE_USER		1
1812 #define SUN4V_ERR_MODE_PRIV		2
1813 
1814 	/* Real address of the memory region or PIO transaction */
1815 /*0x18*/u64		err_raddr;
1816 
1817 	/* Size of the operation triggering the error, in bytes */
1818 /*0x20*/u32		err_size;
1819 
1820 	/* ID of the CPU */
1821 /*0x24*/u16		err_cpu;
1822 
1823 	/* Grace periof for shutdown, in seconds */
1824 /*0x26*/u16		err_secs;
1825 
1826 	/* Value of the %asi register */
1827 /*0x28*/u8		err_asi;
1828 
1829 /*0x29*/u8		reserved_2;
1830 
1831 	/* Value of the ASR register number */
1832 /*0x2a*/u16		err_asr;
1833 #define SUN4V_ERR_ASR_VALID		0x8000
1834 
1835 /*0x2c*/u32		reserved_3;
1836 /*0x30*/u64		reserved_4;
1837 /*0x38*/u64		reserved_5;
1838 };
1839 
1840 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1841 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1842 
1843 static const char *sun4v_err_type_to_str(u8 type)
1844 {
1845 	static const char *types[SUN4V_ERR_TYPE_NUM] = {
1846 		"undefined",
1847 		"uncorrected resumable",
1848 		"precise nonresumable",
1849 		"deferred nonresumable",
1850 		"shutdown request",
1851 		"dump core",
1852 		"SP state change",
1853 	};
1854 
1855 	if (type < SUN4V_ERR_TYPE_NUM)
1856 		return types[type];
1857 
1858 	return "unknown";
1859 }
1860 
1861 static void sun4v_emit_err_attr_strings(u32 attrs)
1862 {
1863 	static const char *attr_names[] = {
1864 		"processor",
1865 		"memory",
1866 		"PIO",
1867 		"int-registers",
1868 		"fpu-registers",
1869 		"shutdown-request",
1870 		"ASR",
1871 		"ASI",
1872 		"priv-reg",
1873 	};
1874 	static const char *sp_states[] = {
1875 		"sp-faulted",
1876 		"sp-available",
1877 		"sp-not-present",
1878 		"sp-state-reserved",
1879 	};
1880 	static const char *modes[] = {
1881 		"mode-reserved0",
1882 		"user",
1883 		"priv",
1884 		"mode-reserved1",
1885 	};
1886 	u32 sp_state, mode;
1887 	int i;
1888 
1889 	for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1890 		if (attrs & (1U << i)) {
1891 			const char *s = attr_names[i];
1892 
1893 			pr_cont("%s ", s);
1894 		}
1895 	}
1896 
1897 	sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1898 		    SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1899 	pr_cont("%s ", sp_states[sp_state]);
1900 
1901 	mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1902 		SUN4V_ERR_ATTRS_MODE_SHFT);
1903 	pr_cont("%s ", modes[mode]);
1904 
1905 	if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1906 		pr_cont("res-queue-full ");
1907 }
1908 
1909 /* When the report contains a real-address of "-1" it means that the
1910  * hardware did not provide the address.  So we compute the effective
1911  * address of the load or store instruction at regs->tpc and report
1912  * that.  Usually when this happens it's a PIO and in such a case we
1913  * are using physical addresses with bypass ASIs anyways, so what we
1914  * report here is exactly what we want.
1915  */
1916 static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1917 {
1918 	unsigned int insn;
1919 	u64 addr;
1920 
1921 	if (!(regs->tstate & TSTATE_PRIV))
1922 		return;
1923 
1924 	insn = *(unsigned int *) regs->tpc;
1925 
1926 	addr = compute_effective_address(regs, insn, 0);
1927 
1928 	printk("%s: insn effective address [0x%016llx]\n",
1929 	       pfx, addr);
1930 }
1931 
1932 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1933 			    int cpu, const char *pfx, atomic_t *ocnt)
1934 {
1935 	u64 *raw_ptr = (u64 *) ent;
1936 	u32 attrs;
1937 	int cnt;
1938 
1939 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1940 	printk("%s: TPC [0x%016lx] <%pS>\n",
1941 	       pfx, regs->tpc, (void *) regs->tpc);
1942 
1943 	printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1944 	       pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1945 	printk("%s:      %016llx:%016llx:%016llx:%016llx]\n",
1946 	       pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1947 
1948 	printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1949 	       pfx, ent->err_handle, ent->err_stick);
1950 
1951 	printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1952 
1953 	attrs = ent->err_attrs;
1954 	printk("%s: attrs [0x%08x] < ", pfx, attrs);
1955 	sun4v_emit_err_attr_strings(attrs);
1956 	pr_cont(">\n");
1957 
1958 	/* Various fields in the error report are only valid if
1959 	 * certain attribute bits are set.
1960 	 */
1961 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1962 		     SUN4V_ERR_ATTRS_PIO |
1963 		     SUN4V_ERR_ATTRS_ASI)) {
1964 		printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
1965 
1966 		if (ent->err_raddr == ~(u64)0)
1967 			sun4v_report_real_raddr(pfx, regs);
1968 	}
1969 
1970 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
1971 		printk("%s: size [0x%x]\n", pfx, ent->err_size);
1972 
1973 	if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
1974 		     SUN4V_ERR_ATTRS_INT_REGISTERS |
1975 		     SUN4V_ERR_ATTRS_FPU_REGISTERS |
1976 		     SUN4V_ERR_ATTRS_PRIV_REG))
1977 		printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
1978 
1979 	if (attrs & SUN4V_ERR_ATTRS_ASI)
1980 		printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
1981 
1982 	if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
1983 		      SUN4V_ERR_ATTRS_FPU_REGISTERS |
1984 		      SUN4V_ERR_ATTRS_PRIV_REG)) &&
1985 	    (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
1986 		printk("%s: reg [0x%04x]\n",
1987 		       pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
1988 
1989 	show_regs(regs);
1990 
1991 	if ((cnt = atomic_read(ocnt)) != 0) {
1992 		atomic_set(ocnt, 0);
1993 		wmb();
1994 		printk("%s: Queue overflowed %d times.\n",
1995 		       pfx, cnt);
1996 	}
1997 }
1998 
1999 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2000  * Log the event and clear the first word of the entry.
2001  */
2002 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2003 {
2004 	enum ctx_state prev_state = exception_enter();
2005 	struct sun4v_error_entry *ent, local_copy;
2006 	struct trap_per_cpu *tb;
2007 	unsigned long paddr;
2008 	int cpu;
2009 
2010 	cpu = get_cpu();
2011 
2012 	tb = &trap_block[cpu];
2013 	paddr = tb->resum_kernel_buf_pa + offset;
2014 	ent = __va(paddr);
2015 
2016 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2017 
2018 	/* We have a local copy now, so release the entry.  */
2019 	ent->err_handle = 0;
2020 	wmb();
2021 
2022 	put_cpu();
2023 
2024 	if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2025 		/* We should really take the seconds field of
2026 		 * the error report and use it for the shutdown
2027 		 * invocation, but for now do the same thing we
2028 		 * do for a DS shutdown request.
2029 		 */
2030 		pr_info("Shutdown request, %u seconds...\n",
2031 			local_copy.err_secs);
2032 		orderly_poweroff(true);
2033 		goto out;
2034 	}
2035 
2036 	sun4v_log_error(regs, &local_copy, cpu,
2037 			KERN_ERR "RESUMABLE ERROR",
2038 			&sun4v_resum_oflow_cnt);
2039 out:
2040 	exception_exit(prev_state);
2041 }
2042 
2043 /* If we try to printk() we'll probably make matters worse, by trying
2044  * to retake locks this cpu already holds or causing more errors. So
2045  * just bump a counter, and we'll report these counter bumps above.
2046  */
2047 void sun4v_resum_overflow(struct pt_regs *regs)
2048 {
2049 	atomic_inc(&sun4v_resum_oflow_cnt);
2050 }
2051 
2052 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2053  * Log the event, clear the first word of the entry, and die.
2054  */
2055 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2056 {
2057 	struct sun4v_error_entry *ent, local_copy;
2058 	struct trap_per_cpu *tb;
2059 	unsigned long paddr;
2060 	int cpu;
2061 
2062 	cpu = get_cpu();
2063 
2064 	tb = &trap_block[cpu];
2065 	paddr = tb->nonresum_kernel_buf_pa + offset;
2066 	ent = __va(paddr);
2067 
2068 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2069 
2070 	/* We have a local copy now, so release the entry.  */
2071 	ent->err_handle = 0;
2072 	wmb();
2073 
2074 	put_cpu();
2075 
2076 #ifdef CONFIG_PCI
2077 	/* Check for the special PCI poke sequence. */
2078 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2079 		pci_poke_faulted = 1;
2080 		regs->tpc += 4;
2081 		regs->tnpc = regs->tpc + 4;
2082 		return;
2083 	}
2084 #endif
2085 
2086 	sun4v_log_error(regs, &local_copy, cpu,
2087 			KERN_EMERG "NON-RESUMABLE ERROR",
2088 			&sun4v_nonresum_oflow_cnt);
2089 
2090 	panic("Non-resumable error.");
2091 }
2092 
2093 /* If we try to printk() we'll probably make matters worse, by trying
2094  * to retake locks this cpu already holds or causing more errors. So
2095  * just bump a counter, and we'll report these counter bumps above.
2096  */
2097 void sun4v_nonresum_overflow(struct pt_regs *regs)
2098 {
2099 	/* XXX Actually even this can make not that much sense.  Perhaps
2100 	 * XXX we should just pull the plug and panic directly from here?
2101 	 */
2102 	atomic_inc(&sun4v_nonresum_oflow_cnt);
2103 }
2104 
2105 unsigned long sun4v_err_itlb_vaddr;
2106 unsigned long sun4v_err_itlb_ctx;
2107 unsigned long sun4v_err_itlb_pte;
2108 unsigned long sun4v_err_itlb_error;
2109 
2110 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2111 {
2112 	if (tl > 1)
2113 		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2114 
2115 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2116 	       regs->tpc, tl);
2117 	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2118 	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2119 	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2120 	       (void *) regs->u_regs[UREG_I7]);
2121 	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2122 	       "pte[%lx] error[%lx]\n",
2123 	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2124 	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
2125 
2126 	prom_halt();
2127 }
2128 
2129 unsigned long sun4v_err_dtlb_vaddr;
2130 unsigned long sun4v_err_dtlb_ctx;
2131 unsigned long sun4v_err_dtlb_pte;
2132 unsigned long sun4v_err_dtlb_error;
2133 
2134 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2135 {
2136 	if (tl > 1)
2137 		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2138 
2139 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2140 	       regs->tpc, tl);
2141 	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2142 	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2143 	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2144 	       (void *) regs->u_regs[UREG_I7]);
2145 	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2146 	       "pte[%lx] error[%lx]\n",
2147 	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2148 	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2149 
2150 	prom_halt();
2151 }
2152 
2153 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2154 {
2155 	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2156 	       err, op);
2157 }
2158 
2159 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2160 {
2161 	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2162 	       err, op);
2163 }
2164 
2165 static void do_fpe_common(struct pt_regs *regs)
2166 {
2167 	if (regs->tstate & TSTATE_PRIV) {
2168 		regs->tpc = regs->tnpc;
2169 		regs->tnpc += 4;
2170 	} else {
2171 		unsigned long fsr = current_thread_info()->xfsr[0];
2172 		siginfo_t info;
2173 
2174 		if (test_thread_flag(TIF_32BIT)) {
2175 			regs->tpc &= 0xffffffff;
2176 			regs->tnpc &= 0xffffffff;
2177 		}
2178 		info.si_signo = SIGFPE;
2179 		info.si_errno = 0;
2180 		info.si_addr = (void __user *)regs->tpc;
2181 		info.si_trapno = 0;
2182 		info.si_code = __SI_FAULT;
2183 		if ((fsr & 0x1c000) == (1 << 14)) {
2184 			if (fsr & 0x10)
2185 				info.si_code = FPE_FLTINV;
2186 			else if (fsr & 0x08)
2187 				info.si_code = FPE_FLTOVF;
2188 			else if (fsr & 0x04)
2189 				info.si_code = FPE_FLTUND;
2190 			else if (fsr & 0x02)
2191 				info.si_code = FPE_FLTDIV;
2192 			else if (fsr & 0x01)
2193 				info.si_code = FPE_FLTRES;
2194 		}
2195 		force_sig_info(SIGFPE, &info, current);
2196 	}
2197 }
2198 
2199 void do_fpieee(struct pt_regs *regs)
2200 {
2201 	enum ctx_state prev_state = exception_enter();
2202 
2203 	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2204 		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2205 		goto out;
2206 
2207 	do_fpe_common(regs);
2208 out:
2209 	exception_exit(prev_state);
2210 }
2211 
2212 extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
2213 
2214 void do_fpother(struct pt_regs *regs)
2215 {
2216 	enum ctx_state prev_state = exception_enter();
2217 	struct fpustate *f = FPUSTATE;
2218 	int ret = 0;
2219 
2220 	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2221 		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2222 		goto out;
2223 
2224 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2225 	case (2 << 14): /* unfinished_FPop */
2226 	case (3 << 14): /* unimplemented_FPop */
2227 		ret = do_mathemu(regs, f, false);
2228 		break;
2229 	}
2230 	if (ret)
2231 		goto out;
2232 	do_fpe_common(regs);
2233 out:
2234 	exception_exit(prev_state);
2235 }
2236 
2237 void do_tof(struct pt_regs *regs)
2238 {
2239 	enum ctx_state prev_state = exception_enter();
2240 	siginfo_t info;
2241 
2242 	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2243 		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2244 		goto out;
2245 
2246 	if (regs->tstate & TSTATE_PRIV)
2247 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2248 	if (test_thread_flag(TIF_32BIT)) {
2249 		regs->tpc &= 0xffffffff;
2250 		regs->tnpc &= 0xffffffff;
2251 	}
2252 	info.si_signo = SIGEMT;
2253 	info.si_errno = 0;
2254 	info.si_code = EMT_TAGOVF;
2255 	info.si_addr = (void __user *)regs->tpc;
2256 	info.si_trapno = 0;
2257 	force_sig_info(SIGEMT, &info, current);
2258 out:
2259 	exception_exit(prev_state);
2260 }
2261 
2262 void do_div0(struct pt_regs *regs)
2263 {
2264 	enum ctx_state prev_state = exception_enter();
2265 	siginfo_t info;
2266 
2267 	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2268 		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2269 		goto out;
2270 
2271 	if (regs->tstate & TSTATE_PRIV)
2272 		die_if_kernel("TL0: Kernel divide by zero.", regs);
2273 	if (test_thread_flag(TIF_32BIT)) {
2274 		regs->tpc &= 0xffffffff;
2275 		regs->tnpc &= 0xffffffff;
2276 	}
2277 	info.si_signo = SIGFPE;
2278 	info.si_errno = 0;
2279 	info.si_code = FPE_INTDIV;
2280 	info.si_addr = (void __user *)regs->tpc;
2281 	info.si_trapno = 0;
2282 	force_sig_info(SIGFPE, &info, current);
2283 out:
2284 	exception_exit(prev_state);
2285 }
2286 
2287 static void instruction_dump(unsigned int *pc)
2288 {
2289 	int i;
2290 
2291 	if ((((unsigned long) pc) & 3))
2292 		return;
2293 
2294 	printk("Instruction DUMP:");
2295 	for (i = -3; i < 6; i++)
2296 		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2297 	printk("\n");
2298 }
2299 
2300 static void user_instruction_dump(unsigned int __user *pc)
2301 {
2302 	int i;
2303 	unsigned int buf[9];
2304 
2305 	if ((((unsigned long) pc) & 3))
2306 		return;
2307 
2308 	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2309 		return;
2310 
2311 	printk("Instruction DUMP:");
2312 	for (i = 0; i < 9; i++)
2313 		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2314 	printk("\n");
2315 }
2316 
2317 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2318 {
2319 	unsigned long fp, ksp;
2320 	struct thread_info *tp;
2321 	int count = 0;
2322 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2323 	int graph = 0;
2324 #endif
2325 
2326 	ksp = (unsigned long) _ksp;
2327 	if (!tsk)
2328 		tsk = current;
2329 	tp = task_thread_info(tsk);
2330 	if (ksp == 0UL) {
2331 		if (tsk == current)
2332 			asm("mov %%fp, %0" : "=r" (ksp));
2333 		else
2334 			ksp = tp->ksp;
2335 	}
2336 	if (tp == current_thread_info())
2337 		flushw_all();
2338 
2339 	fp = ksp + STACK_BIAS;
2340 
2341 	printk("Call Trace:\n");
2342 	do {
2343 		struct sparc_stackf *sf;
2344 		struct pt_regs *regs;
2345 		unsigned long pc;
2346 
2347 		if (!kstack_valid(tp, fp))
2348 			break;
2349 		sf = (struct sparc_stackf *) fp;
2350 		regs = (struct pt_regs *) (sf + 1);
2351 
2352 		if (kstack_is_trap_frame(tp, regs)) {
2353 			if (!(regs->tstate & TSTATE_PRIV))
2354 				break;
2355 			pc = regs->tpc;
2356 			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2357 		} else {
2358 			pc = sf->callers_pc;
2359 			fp = (unsigned long)sf->fp + STACK_BIAS;
2360 		}
2361 
2362 		printk(" [%016lx] %pS\n", pc, (void *) pc);
2363 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2364 		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2365 			int index = tsk->curr_ret_stack;
2366 			if (tsk->ret_stack && index >= graph) {
2367 				pc = tsk->ret_stack[index - graph].ret;
2368 				printk(" [%016lx] %pS\n", pc, (void *) pc);
2369 				graph++;
2370 			}
2371 		}
2372 #endif
2373 	} while (++count < 16);
2374 }
2375 
2376 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2377 {
2378 	unsigned long fp = rw->ins[6];
2379 
2380 	if (!fp)
2381 		return NULL;
2382 
2383 	return (struct reg_window *) (fp + STACK_BIAS);
2384 }
2385 
2386 void die_if_kernel(char *str, struct pt_regs *regs)
2387 {
2388 	static int die_counter;
2389 	int count = 0;
2390 
2391 	/* Amuse the user. */
2392 	printk(
2393 "              \\|/ ____ \\|/\n"
2394 "              \"@'/ .. \\`@\"\n"
2395 "              /_| \\__/ |_\\\n"
2396 "                 \\__U_/\n");
2397 
2398 	printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2399 	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2400 	__asm__ __volatile__("flushw");
2401 	show_regs(regs);
2402 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2403 	if (regs->tstate & TSTATE_PRIV) {
2404 		struct thread_info *tp = current_thread_info();
2405 		struct reg_window *rw = (struct reg_window *)
2406 			(regs->u_regs[UREG_FP] + STACK_BIAS);
2407 
2408 		/* Stop the back trace when we hit userland or we
2409 		 * find some badly aligned kernel stack.
2410 		 */
2411 		while (rw &&
2412 		       count++ < 30 &&
2413 		       kstack_valid(tp, (unsigned long) rw)) {
2414 			printk("Caller[%016lx]: %pS\n", rw->ins[7],
2415 			       (void *) rw->ins[7]);
2416 
2417 			rw = kernel_stack_up(rw);
2418 		}
2419 		instruction_dump ((unsigned int *) regs->tpc);
2420 	} else {
2421 		if (test_thread_flag(TIF_32BIT)) {
2422 			regs->tpc &= 0xffffffff;
2423 			regs->tnpc &= 0xffffffff;
2424 		}
2425 		user_instruction_dump ((unsigned int __user *) regs->tpc);
2426 	}
2427 	if (regs->tstate & TSTATE_PRIV)
2428 		do_exit(SIGKILL);
2429 	do_exit(SIGSEGV);
2430 }
2431 EXPORT_SYMBOL(die_if_kernel);
2432 
2433 #define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2434 #define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2435 
2436 extern int handle_popc(u32 insn, struct pt_regs *regs);
2437 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2438 
2439 void do_illegal_instruction(struct pt_regs *regs)
2440 {
2441 	enum ctx_state prev_state = exception_enter();
2442 	unsigned long pc = regs->tpc;
2443 	unsigned long tstate = regs->tstate;
2444 	u32 insn;
2445 	siginfo_t info;
2446 
2447 	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2448 		       0, 0x10, SIGILL) == NOTIFY_STOP)
2449 		goto out;
2450 
2451 	if (tstate & TSTATE_PRIV)
2452 		die_if_kernel("Kernel illegal instruction", regs);
2453 	if (test_thread_flag(TIF_32BIT))
2454 		pc = (u32)pc;
2455 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2456 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2457 			if (handle_popc(insn, regs))
2458 				goto out;
2459 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2460 			if (handle_ldf_stq(insn, regs))
2461 				goto out;
2462 		} else if (tlb_type == hypervisor) {
2463 			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2464 				if (!vis_emul(regs, insn))
2465 					goto out;
2466 			} else {
2467 				struct fpustate *f = FPUSTATE;
2468 
2469 				/* On UltraSPARC T2 and later, FPU insns which
2470 				 * are not implemented in HW signal an illegal
2471 				 * instruction trap and do not set the FP Trap
2472 				 * Trap in the %fsr to unimplemented_FPop.
2473 				 */
2474 				if (do_mathemu(regs, f, true))
2475 					goto out;
2476 			}
2477 		}
2478 	}
2479 	info.si_signo = SIGILL;
2480 	info.si_errno = 0;
2481 	info.si_code = ILL_ILLOPC;
2482 	info.si_addr = (void __user *)pc;
2483 	info.si_trapno = 0;
2484 	force_sig_info(SIGILL, &info, current);
2485 out:
2486 	exception_exit(prev_state);
2487 }
2488 
2489 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2490 
2491 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2492 {
2493 	enum ctx_state prev_state = exception_enter();
2494 	siginfo_t info;
2495 
2496 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2497 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2498 		goto out;
2499 
2500 	if (regs->tstate & TSTATE_PRIV) {
2501 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2502 		goto out;
2503 	}
2504 	info.si_signo = SIGBUS;
2505 	info.si_errno = 0;
2506 	info.si_code = BUS_ADRALN;
2507 	info.si_addr = (void __user *)sfar;
2508 	info.si_trapno = 0;
2509 	force_sig_info(SIGBUS, &info, current);
2510 out:
2511 	exception_exit(prev_state);
2512 }
2513 
2514 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2515 {
2516 	siginfo_t info;
2517 
2518 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2519 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2520 		return;
2521 
2522 	if (regs->tstate & TSTATE_PRIV) {
2523 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2524 		return;
2525 	}
2526 	info.si_signo = SIGBUS;
2527 	info.si_errno = 0;
2528 	info.si_code = BUS_ADRALN;
2529 	info.si_addr = (void __user *) addr;
2530 	info.si_trapno = 0;
2531 	force_sig_info(SIGBUS, &info, current);
2532 }
2533 
2534 void do_privop(struct pt_regs *regs)
2535 {
2536 	enum ctx_state prev_state = exception_enter();
2537 	siginfo_t info;
2538 
2539 	if (notify_die(DIE_TRAP, "privileged operation", regs,
2540 		       0, 0x11, SIGILL) == NOTIFY_STOP)
2541 		goto out;
2542 
2543 	if (test_thread_flag(TIF_32BIT)) {
2544 		regs->tpc &= 0xffffffff;
2545 		regs->tnpc &= 0xffffffff;
2546 	}
2547 	info.si_signo = SIGILL;
2548 	info.si_errno = 0;
2549 	info.si_code = ILL_PRVOPC;
2550 	info.si_addr = (void __user *)regs->tpc;
2551 	info.si_trapno = 0;
2552 	force_sig_info(SIGILL, &info, current);
2553 out:
2554 	exception_exit(prev_state);
2555 }
2556 
2557 void do_privact(struct pt_regs *regs)
2558 {
2559 	do_privop(regs);
2560 }
2561 
2562 /* Trap level 1 stuff or other traps we should never see... */
2563 void do_cee(struct pt_regs *regs)
2564 {
2565 	exception_enter();
2566 	die_if_kernel("TL0: Cache Error Exception", regs);
2567 }
2568 
2569 void do_cee_tl1(struct pt_regs *regs)
2570 {
2571 	exception_enter();
2572 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2573 	die_if_kernel("TL1: Cache Error Exception", regs);
2574 }
2575 
2576 void do_dae_tl1(struct pt_regs *regs)
2577 {
2578 	exception_enter();
2579 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2580 	die_if_kernel("TL1: Data Access Exception", regs);
2581 }
2582 
2583 void do_iae_tl1(struct pt_regs *regs)
2584 {
2585 	exception_enter();
2586 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2587 	die_if_kernel("TL1: Instruction Access Exception", regs);
2588 }
2589 
2590 void do_div0_tl1(struct pt_regs *regs)
2591 {
2592 	exception_enter();
2593 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2594 	die_if_kernel("TL1: DIV0 Exception", regs);
2595 }
2596 
2597 void do_fpdis_tl1(struct pt_regs *regs)
2598 {
2599 	exception_enter();
2600 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2601 	die_if_kernel("TL1: FPU Disabled", regs);
2602 }
2603 
2604 void do_fpieee_tl1(struct pt_regs *regs)
2605 {
2606 	exception_enter();
2607 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2608 	die_if_kernel("TL1: FPU IEEE Exception", regs);
2609 }
2610 
2611 void do_fpother_tl1(struct pt_regs *regs)
2612 {
2613 	exception_enter();
2614 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2615 	die_if_kernel("TL1: FPU Other Exception", regs);
2616 }
2617 
2618 void do_ill_tl1(struct pt_regs *regs)
2619 {
2620 	exception_enter();
2621 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2622 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2623 }
2624 
2625 void do_irq_tl1(struct pt_regs *regs)
2626 {
2627 	exception_enter();
2628 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2629 	die_if_kernel("TL1: IRQ Exception", regs);
2630 }
2631 
2632 void do_lddfmna_tl1(struct pt_regs *regs)
2633 {
2634 	exception_enter();
2635 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2636 	die_if_kernel("TL1: LDDF Exception", regs);
2637 }
2638 
2639 void do_stdfmna_tl1(struct pt_regs *regs)
2640 {
2641 	exception_enter();
2642 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2643 	die_if_kernel("TL1: STDF Exception", regs);
2644 }
2645 
2646 void do_paw(struct pt_regs *regs)
2647 {
2648 	exception_enter();
2649 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2650 }
2651 
2652 void do_paw_tl1(struct pt_regs *regs)
2653 {
2654 	exception_enter();
2655 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2656 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2657 }
2658 
2659 void do_vaw(struct pt_regs *regs)
2660 {
2661 	exception_enter();
2662 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2663 }
2664 
2665 void do_vaw_tl1(struct pt_regs *regs)
2666 {
2667 	exception_enter();
2668 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2669 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2670 }
2671 
2672 void do_tof_tl1(struct pt_regs *regs)
2673 {
2674 	exception_enter();
2675 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2676 	die_if_kernel("TL1: Tag Overflow Exception", regs);
2677 }
2678 
2679 void do_getpsr(struct pt_regs *regs)
2680 {
2681 	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2682 	regs->tpc   = regs->tnpc;
2683 	regs->tnpc += 4;
2684 	if (test_thread_flag(TIF_32BIT)) {
2685 		regs->tpc &= 0xffffffff;
2686 		regs->tnpc &= 0xffffffff;
2687 	}
2688 }
2689 
2690 struct trap_per_cpu trap_block[NR_CPUS];
2691 EXPORT_SYMBOL(trap_block);
2692 
2693 /* This can get invoked before sched_init() so play it super safe
2694  * and use hard_smp_processor_id().
2695  */
2696 void notrace init_cur_cpu_trap(struct thread_info *t)
2697 {
2698 	int cpu = hard_smp_processor_id();
2699 	struct trap_per_cpu *p = &trap_block[cpu];
2700 
2701 	p->thread = t;
2702 	p->pgd_paddr = 0;
2703 }
2704 
2705 extern void thread_info_offsets_are_bolixed_dave(void);
2706 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2707 extern void tsb_config_offsets_are_bolixed_dave(void);
2708 
2709 /* Only invoked on boot processor. */
2710 void __init trap_init(void)
2711 {
2712 	/* Compile time sanity check. */
2713 	BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2714 		     TI_FLAGS != offsetof(struct thread_info, flags) ||
2715 		     TI_CPU != offsetof(struct thread_info, cpu) ||
2716 		     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2717 		     TI_KSP != offsetof(struct thread_info, ksp) ||
2718 		     TI_FAULT_ADDR != offsetof(struct thread_info,
2719 					       fault_address) ||
2720 		     TI_KREGS != offsetof(struct thread_info, kregs) ||
2721 		     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2722 		     TI_EXEC_DOMAIN != offsetof(struct thread_info,
2723 						exec_domain) ||
2724 		     TI_REG_WINDOW != offsetof(struct thread_info,
2725 					       reg_window) ||
2726 		     TI_RWIN_SPTRS != offsetof(struct thread_info,
2727 					       rwbuf_stkptrs) ||
2728 		     TI_GSR != offsetof(struct thread_info, gsr) ||
2729 		     TI_XFSR != offsetof(struct thread_info, xfsr) ||
2730 		     TI_PRE_COUNT != offsetof(struct thread_info,
2731 					      preempt_count) ||
2732 		     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2733 		     TI_CURRENT_DS != offsetof(struct thread_info,
2734 						current_ds) ||
2735 		     TI_RESTART_BLOCK != offsetof(struct thread_info,
2736 						  restart_block) ||
2737 		     TI_KUNA_REGS != offsetof(struct thread_info,
2738 					      kern_una_regs) ||
2739 		     TI_KUNA_INSN != offsetof(struct thread_info,
2740 					      kern_una_insn) ||
2741 		     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2742 		     (TI_FPREGS & (64 - 1)));
2743 
2744 	BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2745 						     thread) ||
2746 		     (TRAP_PER_CPU_PGD_PADDR !=
2747 		      offsetof(struct trap_per_cpu, pgd_paddr)) ||
2748 		     (TRAP_PER_CPU_CPU_MONDO_PA !=
2749 		      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2750 		     (TRAP_PER_CPU_DEV_MONDO_PA !=
2751 		      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2752 		     (TRAP_PER_CPU_RESUM_MONDO_PA !=
2753 		      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2754 		     (TRAP_PER_CPU_RESUM_KBUF_PA !=
2755 		      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2756 		     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2757 		      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2758 		     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2759 		      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2760 		     (TRAP_PER_CPU_FAULT_INFO !=
2761 		      offsetof(struct trap_per_cpu, fault_info)) ||
2762 		     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2763 		      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2764 		     (TRAP_PER_CPU_CPU_LIST_PA !=
2765 		      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2766 		     (TRAP_PER_CPU_TSB_HUGE !=
2767 		      offsetof(struct trap_per_cpu, tsb_huge)) ||
2768 		     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2769 		      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2770 		     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2771 		      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2772 		     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2773 		      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2774 		     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2775 		      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2776 		     (TRAP_PER_CPU_RESUM_QMASK !=
2777 		      offsetof(struct trap_per_cpu, resum_qmask)) ||
2778 		     (TRAP_PER_CPU_NONRESUM_QMASK !=
2779 		      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2780 		     (TRAP_PER_CPU_PER_CPU_BASE !=
2781 		      offsetof(struct trap_per_cpu, __per_cpu_base)));
2782 
2783 	BUILD_BUG_ON((TSB_CONFIG_TSB !=
2784 		      offsetof(struct tsb_config, tsb)) ||
2785 		     (TSB_CONFIG_RSS_LIMIT !=
2786 		      offsetof(struct tsb_config, tsb_rss_limit)) ||
2787 		     (TSB_CONFIG_NENTRIES !=
2788 		      offsetof(struct tsb_config, tsb_nentries)) ||
2789 		     (TSB_CONFIG_REG_VAL !=
2790 		      offsetof(struct tsb_config, tsb_reg_val)) ||
2791 		     (TSB_CONFIG_MAP_VADDR !=
2792 		      offsetof(struct tsb_config, tsb_map_vaddr)) ||
2793 		     (TSB_CONFIG_MAP_PTE !=
2794 		      offsetof(struct tsb_config, tsb_map_pte)));
2795 
2796 	/* Attach to the address space of init_task.  On SMP we
2797 	 * do this in smp.c:smp_callin for other cpus.
2798 	 */
2799 	atomic_inc(&init_mm.mm_count);
2800 	current->active_mm = &init_mm;
2801 }
2802