xref: /openbmc/linux/arch/ia64/kernel/traps.c (revision db181ce0)
1 /*
2  * Architecture-specific trap handling.
3  *
4  * Copyright (C) 1998-2003 Hewlett-Packard Co
5  *	David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/tty.h>
14 #include <linux/vt_kern.h>		/* For unblank_screen() */
15 #include <linux/module.h>       /* for EXPORT_SYMBOL */
16 #include <linux/hardirq.h>
17 #include <linux/kprobes.h>
18 #include <linux/delay.h>		/* for ssleep() */
19 #include <linux/kdebug.h>
20 
21 #include <asm/fpswa.h>
22 #include <asm/intrinsics.h>
23 #include <asm/processor.h>
24 #include <asm/uaccess.h>
25 #include <asm/setup.h>
26 
27 fpswa_interface_t *fpswa_interface;
28 EXPORT_SYMBOL(fpswa_interface);
29 
30 void __init
31 trap_init (void)
32 {
33 	if (ia64_boot_param->fpswa)
34 		/* FPSWA fixup: make the interface pointer a kernel virtual address: */
35 		fpswa_interface = __va(ia64_boot_param->fpswa);
36 }
37 
38 int
39 die (const char *str, struct pt_regs *regs, long err)
40 {
41 	static struct {
42 		spinlock_t lock;
43 		u32 lock_owner;
44 		int lock_owner_depth;
45 	} die = {
46 		.lock =	__SPIN_LOCK_UNLOCKED(die.lock),
47 		.lock_owner = -1,
48 		.lock_owner_depth = 0
49 	};
50 	static int die_counter;
51 	int cpu = get_cpu();
52 
53 	if (die.lock_owner != cpu) {
54 		console_verbose();
55 		spin_lock_irq(&die.lock);
56 		die.lock_owner = cpu;
57 		die.lock_owner_depth = 0;
58 		bust_spinlocks(1);
59 	}
60 	put_cpu();
61 
62 	if (++die.lock_owner_depth < 3) {
63 		printk("%s[%d]: %s %ld [%d]\n",
64 		current->comm, task_pid_nr(current), str, err, ++die_counter);
65 		if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
66 	            != NOTIFY_STOP)
67 			show_regs(regs);
68 		else
69 			regs = NULL;
70   	} else
71 		printk(KERN_ERR "Recursive die() failure, output suppressed\n");
72 
73 	bust_spinlocks(0);
74 	die.lock_owner = -1;
75 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
76 	spin_unlock_irq(&die.lock);
77 
78 	if (!regs)
79 		return 1;
80 
81 	if (panic_on_oops)
82 		panic("Fatal exception");
83 
84   	do_exit(SIGSEGV);
85 	return 0;
86 }
87 
88 int
89 die_if_kernel (char *str, struct pt_regs *regs, long err)
90 {
91 	if (!user_mode(regs))
92 		return die(str, regs, err);
93 	return 0;
94 }
95 
96 void
97 __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
98 {
99 	siginfo_t siginfo;
100 	int sig, code;
101 
102 	/* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
103 	siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
104 	siginfo.si_imm = break_num;
105 	siginfo.si_flags = 0;		/* clear __ISR_VALID */
106 	siginfo.si_isr = 0;
107 
108 	switch (break_num) {
109 	      case 0: /* unknown error (used by GCC for __builtin_abort()) */
110 		if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
111 			       	== NOTIFY_STOP)
112 			return;
113 		if (die_if_kernel("bugcheck!", regs, break_num))
114 			return;
115 		sig = SIGILL; code = ILL_ILLOPC;
116 		break;
117 
118 	      case 1: /* integer divide by zero */
119 		sig = SIGFPE; code = FPE_INTDIV;
120 		break;
121 
122 	      case 2: /* integer overflow */
123 		sig = SIGFPE; code = FPE_INTOVF;
124 		break;
125 
126 	      case 3: /* range check/bounds check */
127 		sig = SIGFPE; code = FPE_FLTSUB;
128 		break;
129 
130 	      case 4: /* null pointer dereference */
131 		sig = SIGSEGV; code = SEGV_MAPERR;
132 		break;
133 
134 	      case 5: /* misaligned data */
135 		sig = SIGSEGV; code = BUS_ADRALN;
136 		break;
137 
138 	      case 6: /* decimal overflow */
139 		sig = SIGFPE; code = __FPE_DECOVF;
140 		break;
141 
142 	      case 7: /* decimal divide by zero */
143 		sig = SIGFPE; code = __FPE_DECDIV;
144 		break;
145 
146 	      case 8: /* packed decimal error */
147 		sig = SIGFPE; code = __FPE_DECERR;
148 		break;
149 
150 	      case 9: /* invalid ASCII digit */
151 		sig = SIGFPE; code = __FPE_INVASC;
152 		break;
153 
154 	      case 10: /* invalid decimal digit */
155 		sig = SIGFPE; code = __FPE_INVDEC;
156 		break;
157 
158 	      case 11: /* paragraph stack overflow */
159 		sig = SIGSEGV; code = __SEGV_PSTKOVF;
160 		break;
161 
162 	      case 0x3f000 ... 0x3ffff:	/* bundle-update in progress */
163 		sig = SIGILL; code = __ILL_BNDMOD;
164 		break;
165 
166 	      default:
167 		if ((break_num < 0x40000 || break_num > 0x100000)
168 		    && die_if_kernel("Bad break", regs, break_num))
169 			return;
170 
171 		if (break_num < 0x80000) {
172 			sig = SIGILL; code = __ILL_BREAK;
173 		} else {
174 			if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
175 					== NOTIFY_STOP)
176 				return;
177 			sig = SIGTRAP; code = TRAP_BRKPT;
178 		}
179 	}
180 	siginfo.si_signo = sig;
181 	siginfo.si_errno = 0;
182 	siginfo.si_code = code;
183 	force_sig_info(sig, &siginfo, current);
184 }
185 
186 /*
187  * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
188  * and it doesn't own the fp-high register partition.  When this happens, we save the
189  * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
190  * the fp-high partition of the current task (if necessary).  Note that the kernel has
191  * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
192  * care of clearing psr.dfh.
193  */
194 static inline void
195 disabled_fph_fault (struct pt_regs *regs)
196 {
197 	struct ia64_psr *psr = ia64_psr(regs);
198 
199 	/* first, grant user-level access to fph partition: */
200 	psr->dfh = 0;
201 
202 	/*
203 	 * Make sure that no other task gets in on this processor
204 	 * while we're claiming the FPU
205 	 */
206 	preempt_disable();
207 #ifndef CONFIG_SMP
208 	{
209 		struct task_struct *fpu_owner
210 			= (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
211 
212 		if (ia64_is_local_fpu_owner(current)) {
213 			preempt_enable_no_resched();
214 			return;
215 		}
216 
217 		if (fpu_owner)
218 			ia64_flush_fph(fpu_owner);
219 	}
220 #endif /* !CONFIG_SMP */
221 	ia64_set_local_fpu_owner(current);
222 	if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
223 		__ia64_load_fpu(current->thread.fph);
224 		psr->mfh = 0;
225 	} else {
226 		__ia64_init_fpu();
227 		/*
228 		 * Set mfh because the state in thread.fph does not match the state in
229 		 * the fph partition.
230 		 */
231 		psr->mfh = 1;
232 	}
233 	preempt_enable_no_resched();
234 }
235 
236 static inline int
237 fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
238 	    struct pt_regs *regs)
239 {
240 	fp_state_t fp_state;
241 	fpswa_ret_t ret;
242 
243 	if (!fpswa_interface)
244 		return -1;
245 
246 	memset(&fp_state, 0, sizeof(fp_state_t));
247 
248 	/*
249 	 * compute fp_state.  only FP registers f6 - f11 are used by the
250 	 * kernel, so set those bits in the mask and set the low volatile
251 	 * pointer to point to these registers.
252 	 */
253 	fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
254 
255 	fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
256 	/*
257 	 * unsigned long (*EFI_FPSWA) (
258 	 *      unsigned long    trap_type,
259 	 *	void             *Bundle,
260 	 *	unsigned long    *pipsr,
261 	 *	unsigned long    *pfsr,
262 	 *	unsigned long    *pisr,
263 	 *	unsigned long    *ppreds,
264 	 *	unsigned long    *pifs,
265 	 *	void             *fp_state);
266 	 */
267 	ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
268 					(unsigned long *) ipsr, (unsigned long *) fpsr,
269 					(unsigned long *) isr, (unsigned long *) pr,
270 					(unsigned long *) ifs, &fp_state);
271 
272 	return ret.status;
273 }
274 
275 struct fpu_swa_msg {
276 	unsigned long count;
277 	unsigned long time;
278 };
279 static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
280 DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
281 static struct fpu_swa_msg last __cacheline_aligned;
282 
283 
284 /*
285  * Handle floating-point assist faults and traps.
286  */
287 static int
288 handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
289 {
290 	long exception, bundle[2];
291 	unsigned long fault_ip;
292 	struct siginfo siginfo;
293 
294 	fault_ip = regs->cr_iip;
295 	if (!fp_fault && (ia64_psr(regs)->ri == 0))
296 		fault_ip -= 16;
297 	if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
298 		return -1;
299 
300 	if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT))  {
301 		unsigned long count, current_jiffies = jiffies;
302 		struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
303 
304 		if (unlikely(current_jiffies > cp->time))
305 			cp->count = 0;
306 		if (unlikely(cp->count < 5)) {
307 			cp->count++;
308 			cp->time = current_jiffies + 5 * HZ;
309 
310 			/* minimize races by grabbing a copy of count BEFORE checking last.time. */
311 			count = last.count;
312 			barrier();
313 
314 			/*
315 			 * Lower 4 bits are used as a count. Upper bits are a sequence
316 			 * number that is updated when count is reset. The cmpxchg will
317 			 * fail is seqno has changed. This minimizes mutiple cpus
318 			 * resetting the count.
319 			 */
320 			if (current_jiffies > last.time)
321 				(void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
322 
323 			/* used fetchadd to atomically update the count */
324 			if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
325 				last.time = current_jiffies + 5 * HZ;
326 				printk(KERN_WARNING
327 		       			"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
328 		       			current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
329 			}
330 		}
331 	}
332 
333 	exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
334 			       &regs->cr_ifs, regs);
335 	if (fp_fault) {
336 		if (exception == 0) {
337 			/* emulation was successful */
338 			ia64_increment_ip(regs);
339 		} else if (exception == -1) {
340 			printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
341 			return -1;
342 		} else {
343 			/* is next instruction a trap? */
344 			if (exception & 2) {
345 				ia64_increment_ip(regs);
346 			}
347 			siginfo.si_signo = SIGFPE;
348 			siginfo.si_errno = 0;
349 			siginfo.si_code = __SI_FAULT;	/* default code */
350 			siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
351 			if (isr & 0x11) {
352 				siginfo.si_code = FPE_FLTINV;
353 			} else if (isr & 0x22) {
354 				/* denormal operand gets the same si_code as underflow
355 				* see arch/i386/kernel/traps.c:math_error()  */
356 				siginfo.si_code = FPE_FLTUND;
357 			} else if (isr & 0x44) {
358 				siginfo.si_code = FPE_FLTDIV;
359 			}
360 			siginfo.si_isr = isr;
361 			siginfo.si_flags = __ISR_VALID;
362 			siginfo.si_imm = 0;
363 			force_sig_info(SIGFPE, &siginfo, current);
364 		}
365 	} else {
366 		if (exception == -1) {
367 			printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
368 			return -1;
369 		} else if (exception != 0) {
370 			/* raise exception */
371 			siginfo.si_signo = SIGFPE;
372 			siginfo.si_errno = 0;
373 			siginfo.si_code = __SI_FAULT;	/* default code */
374 			siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
375 			if (isr & 0x880) {
376 				siginfo.si_code = FPE_FLTOVF;
377 			} else if (isr & 0x1100) {
378 				siginfo.si_code = FPE_FLTUND;
379 			} else if (isr & 0x2200) {
380 				siginfo.si_code = FPE_FLTRES;
381 			}
382 			siginfo.si_isr = isr;
383 			siginfo.si_flags = __ISR_VALID;
384 			siginfo.si_imm = 0;
385 			force_sig_info(SIGFPE, &siginfo, current);
386 		}
387 	}
388 	return 0;
389 }
390 
391 struct illegal_op_return {
392 	unsigned long fkt, arg1, arg2, arg3;
393 };
394 
395 struct illegal_op_return
396 ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
397 		       long arg4, long arg5, long arg6, long arg7,
398 		       struct pt_regs regs)
399 {
400 	struct illegal_op_return rv;
401 	struct siginfo si;
402 	char buf[128];
403 
404 #ifdef CONFIG_IA64_BRL_EMU
405 	{
406 		extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
407 
408 		rv = ia64_emulate_brl(&regs, ec);
409 		if (rv.fkt != (unsigned long) -1)
410 			return rv;
411 	}
412 #endif
413 
414 	sprintf(buf, "IA-64 Illegal operation fault");
415 	rv.fkt = 0;
416 	if (die_if_kernel(buf, &regs, 0))
417 		return rv;
418 
419 	memset(&si, 0, sizeof(si));
420 	si.si_signo = SIGILL;
421 	si.si_code = ILL_ILLOPC;
422 	si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
423 	force_sig_info(SIGILL, &si, current);
424 	return rv;
425 }
426 
427 void __kprobes
428 ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
429 	    unsigned long iim, unsigned long itir, long arg5, long arg6,
430 	    long arg7, struct pt_regs regs)
431 {
432 	unsigned long code, error = isr, iip;
433 	struct siginfo siginfo;
434 	char buf[128];
435 	int result, sig;
436 	static const char *reason[] = {
437 		"IA-64 Illegal Operation fault",
438 		"IA-64 Privileged Operation fault",
439 		"IA-64 Privileged Register fault",
440 		"IA-64 Reserved Register/Field fault",
441 		"Disabled Instruction Set Transition fault",
442 		"Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
443 		"Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
444 		"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
445 	};
446 
447 	if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
448 		/*
449 		 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
450 		 * the lfetch.
451 		 */
452 		ia64_psr(&regs)->ed = 1;
453 		return;
454 	}
455 
456 	iip = regs.cr_iip + ia64_psr(&regs)->ri;
457 
458 	switch (vector) {
459 	      case 24: /* General Exception */
460 		code = (isr >> 4) & 0xf;
461 		sprintf(buf, "General Exception: %s%s", reason[code],
462 			(code == 3) ? ((isr & (1UL << 37))
463 				       ? " (RSE access)" : " (data access)") : "");
464 		if (code == 8) {
465 # ifdef CONFIG_IA64_PRINT_HAZARDS
466 			printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
467 			       current->comm, task_pid_nr(current),
468 			       regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
469 # endif
470 			return;
471 		}
472 		break;
473 
474 	      case 25: /* Disabled FP-Register */
475 		if (isr & 2) {
476 			disabled_fph_fault(&regs);
477 			return;
478 		}
479 		sprintf(buf, "Disabled FPL fault---not supposed to happen!");
480 		break;
481 
482 	      case 26: /* NaT Consumption */
483 		if (user_mode(&regs)) {
484 			void __user *addr;
485 
486 			if (((isr >> 4) & 0xf) == 2) {
487 				/* NaT page consumption */
488 				sig = SIGSEGV;
489 				code = SEGV_ACCERR;
490 				addr = (void __user *) ifa;
491 			} else {
492 				/* register NaT consumption */
493 				sig = SIGILL;
494 				code = ILL_ILLOPN;
495 				addr = (void __user *) (regs.cr_iip
496 							+ ia64_psr(&regs)->ri);
497 			}
498 			siginfo.si_signo = sig;
499 			siginfo.si_code = code;
500 			siginfo.si_errno = 0;
501 			siginfo.si_addr = addr;
502 			siginfo.si_imm = vector;
503 			siginfo.si_flags = __ISR_VALID;
504 			siginfo.si_isr = isr;
505 			force_sig_info(sig, &siginfo, current);
506 			return;
507 		} else if (ia64_done_with_exception(&regs))
508 			return;
509 		sprintf(buf, "NaT consumption");
510 		break;
511 
512 	      case 31: /* Unsupported Data Reference */
513 		if (user_mode(&regs)) {
514 			siginfo.si_signo = SIGILL;
515 			siginfo.si_code = ILL_ILLOPN;
516 			siginfo.si_errno = 0;
517 			siginfo.si_addr = (void __user *) iip;
518 			siginfo.si_imm = vector;
519 			siginfo.si_flags = __ISR_VALID;
520 			siginfo.si_isr = isr;
521 			force_sig_info(SIGILL, &siginfo, current);
522 			return;
523 		}
524 		sprintf(buf, "Unsupported data reference");
525 		break;
526 
527 	      case 29: /* Debug */
528 	      case 35: /* Taken Branch Trap */
529 	      case 36: /* Single Step Trap */
530 		if (fsys_mode(current, &regs)) {
531 			extern char __kernel_syscall_via_break[];
532 			/*
533 			 * Got a trap in fsys-mode: Taken Branch Trap
534 			 * and Single Step trap need special handling;
535 			 * Debug trap is ignored (we disable it here
536 			 * and re-enable it in the lower-privilege trap).
537 			 */
538 			if (unlikely(vector == 29)) {
539 				set_thread_flag(TIF_DB_DISABLED);
540 				ia64_psr(&regs)->db = 0;
541 				ia64_psr(&regs)->lp = 1;
542 				return;
543 			}
544 			/* re-do the system call via break 0x100000: */
545 			regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
546 			ia64_psr(&regs)->ri = 0;
547 			ia64_psr(&regs)->cpl = 3;
548 			return;
549 		}
550 		switch (vector) {
551 		      case 29:
552 			siginfo.si_code = TRAP_HWBKPT;
553 #ifdef CONFIG_ITANIUM
554 			/*
555 			 * Erratum 10 (IFA may contain incorrect address) now has
556 			 * "NoFix" status.  There are no plans for fixing this.
557 			 */
558 			if (ia64_psr(&regs)->is == 0)
559 			  ifa = regs.cr_iip;
560 #endif
561 			break;
562 		      case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
563 		      case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
564 		}
565 		if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
566 			       	== NOTIFY_STOP)
567 			return;
568 		siginfo.si_signo = SIGTRAP;
569 		siginfo.si_errno = 0;
570 		siginfo.si_addr  = (void __user *) ifa;
571 		siginfo.si_imm   = 0;
572 		siginfo.si_flags = __ISR_VALID;
573 		siginfo.si_isr   = isr;
574 		force_sig_info(SIGTRAP, &siginfo, current);
575 		return;
576 
577 	      case 32: /* fp fault */
578 	      case 33: /* fp trap */
579 		result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
580 		if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
581 			siginfo.si_signo = SIGFPE;
582 			siginfo.si_errno = 0;
583 			siginfo.si_code = FPE_FLTINV;
584 			siginfo.si_addr = (void __user *) iip;
585 			siginfo.si_flags = __ISR_VALID;
586 			siginfo.si_isr = isr;
587 			siginfo.si_imm = 0;
588 			force_sig_info(SIGFPE, &siginfo, current);
589 		}
590 		return;
591 
592 	      case 34:
593 		if (isr & 0x2) {
594 			/* Lower-Privilege Transfer Trap */
595 
596 			/* If we disabled debug traps during an fsyscall,
597 			 * re-enable them here.
598 			 */
599 			if (test_thread_flag(TIF_DB_DISABLED)) {
600 				clear_thread_flag(TIF_DB_DISABLED);
601 				ia64_psr(&regs)->db = 1;
602 			}
603 
604 			/*
605 			 * Just clear PSR.lp and then return immediately:
606 			 * all the interesting work (e.g., signal delivery)
607 			 * is done in the kernel exit path.
608 			 */
609 			ia64_psr(&regs)->lp = 0;
610 			return;
611 		} else {
612 			/* Unimplemented Instr. Address Trap */
613 			if (user_mode(&regs)) {
614 				siginfo.si_signo = SIGILL;
615 				siginfo.si_code = ILL_BADIADDR;
616 				siginfo.si_errno = 0;
617 				siginfo.si_flags = 0;
618 				siginfo.si_isr = 0;
619 				siginfo.si_imm = 0;
620 				siginfo.si_addr = (void __user *) iip;
621 				force_sig_info(SIGILL, &siginfo, current);
622 				return;
623 			}
624 			sprintf(buf, "Unimplemented Instruction Address fault");
625 		}
626 		break;
627 
628 	      case 45:
629 		printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
630 		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
631 		       iip, ifa, isr);
632 		force_sig(SIGSEGV, current);
633 		return;
634 
635 	      case 46:
636 		printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
637 		printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
638 		       iip, ifa, isr, iim);
639 		force_sig(SIGSEGV, current);
640 		return;
641 
642 	      case 47:
643 		sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
644 		break;
645 
646 	      default:
647 		sprintf(buf, "Fault %lu", vector);
648 		break;
649 	}
650 	if (!die_if_kernel(buf, &regs, error))
651 		force_sig(SIGILL, current);
652 }
653