1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Derived from "arch/i386/mm/fault.c" 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * 9 * Modified by Cort Dougan and Paul Mackerras. 10 * 11 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com) 12 */ 13 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/sched/task_stack.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/types.h> 21 #include <linux/pagemap.h> 22 #include <linux/ptrace.h> 23 #include <linux/mman.h> 24 #include <linux/mm.h> 25 #include <linux/interrupt.h> 26 #include <linux/highmem.h> 27 #include <linux/extable.h> 28 #include <linux/kprobes.h> 29 #include <linux/kdebug.h> 30 #include <linux/perf_event.h> 31 #include <linux/ratelimit.h> 32 #include <linux/context_tracking.h> 33 #include <linux/hugetlb.h> 34 #include <linux/uaccess.h> 35 36 #include <asm/firmware.h> 37 #include <asm/page.h> 38 #include <asm/pgtable.h> 39 #include <asm/mmu.h> 40 #include <asm/mmu_context.h> 41 #include <asm/siginfo.h> 42 #include <asm/debug.h> 43 #include <asm/kup.h> 44 45 /* 46 * Check whether the instruction inst is a store using 47 * an update addressing form which will update r1. 48 */ 49 static bool store_updates_sp(unsigned int inst) 50 { 51 /* check for 1 in the rA field */ 52 if (((inst >> 16) & 0x1f) != 1) 53 return false; 54 /* check major opcode */ 55 switch (inst >> 26) { 56 case OP_STWU: 57 case OP_STBU: 58 case OP_STHU: 59 case OP_STFSU: 60 case OP_STFDU: 61 return true; 62 case OP_STD: /* std or stdu */ 63 return (inst & 3) == 1; 64 case OP_31: 65 /* check minor opcode */ 66 switch ((inst >> 1) & 0x3ff) { 67 case OP_31_XOP_STDUX: 68 case OP_31_XOP_STWUX: 69 case OP_31_XOP_STBUX: 70 case OP_31_XOP_STHUX: 71 case OP_31_XOP_STFSUX: 72 case OP_31_XOP_STFDUX: 73 return true; 74 } 75 } 76 return false; 77 } 78 /* 79 * do_page_fault error handling helpers 80 */ 81 82 static int 83 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code) 84 { 85 /* 86 * If we are in kernel mode, bail out with a SEGV, this will 87 * be caught by the assembly which will restore the non-volatile 88 * registers before calling bad_page_fault() 89 */ 90 if (!user_mode(regs)) 91 return SIGSEGV; 92 93 _exception(SIGSEGV, regs, si_code, address); 94 95 return 0; 96 } 97 98 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address) 99 { 100 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); 101 } 102 103 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) 104 { 105 struct mm_struct *mm = current->mm; 106 107 /* 108 * Something tried to access memory that isn't in our memory map.. 109 * Fix it, but check if it's kernel or user first.. 110 */ 111 up_read(&mm->mmap_sem); 112 113 return __bad_area_nosemaphore(regs, address, si_code); 114 } 115 116 static noinline int bad_area(struct pt_regs *regs, unsigned long address) 117 { 118 return __bad_area(regs, address, SEGV_MAPERR); 119 } 120 121 static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address, 122 int pkey) 123 { 124 /* 125 * If we are in kernel mode, bail out with a SEGV, this will 126 * be caught by the assembly which will restore the non-volatile 127 * registers before calling bad_page_fault() 128 */ 129 if (!user_mode(regs)) 130 return SIGSEGV; 131 132 _exception_pkey(regs, address, pkey); 133 134 return 0; 135 } 136 137 static noinline int bad_access(struct pt_regs *regs, unsigned long address) 138 { 139 return __bad_area(regs, address, SEGV_ACCERR); 140 } 141 142 static int do_sigbus(struct pt_regs *regs, unsigned long address, 143 vm_fault_t fault) 144 { 145 if (!user_mode(regs)) 146 return SIGBUS; 147 148 current->thread.trap_nr = BUS_ADRERR; 149 #ifdef CONFIG_MEMORY_FAILURE 150 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { 151 unsigned int lsb = 0; /* shutup gcc */ 152 153 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 154 current->comm, current->pid, address); 155 156 if (fault & VM_FAULT_HWPOISON_LARGE) 157 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 158 if (fault & VM_FAULT_HWPOISON) 159 lsb = PAGE_SHIFT; 160 161 force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb); 162 return 0; 163 } 164 165 #endif 166 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 167 return 0; 168 } 169 170 static int mm_fault_error(struct pt_regs *regs, unsigned long addr, 171 vm_fault_t fault) 172 { 173 /* 174 * Kernel page fault interrupted by SIGKILL. We have no reason to 175 * continue processing. 176 */ 177 if (fatal_signal_pending(current) && !user_mode(regs)) 178 return SIGKILL; 179 180 /* Out of memory */ 181 if (fault & VM_FAULT_OOM) { 182 /* 183 * We ran out of memory, or some other thing happened to us that 184 * made us unable to handle the page fault gracefully. 185 */ 186 if (!user_mode(regs)) 187 return SIGSEGV; 188 pagefault_out_of_memory(); 189 } else { 190 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 191 VM_FAULT_HWPOISON_LARGE)) 192 return do_sigbus(regs, addr, fault); 193 else if (fault & VM_FAULT_SIGSEGV) 194 return bad_area_nosemaphore(regs, addr); 195 else 196 BUG(); 197 } 198 return 0; 199 } 200 201 /* Is this a bad kernel fault ? */ 202 static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, 203 unsigned long address, bool is_write) 204 { 205 int is_exec = TRAP(regs) == 0x400; 206 207 /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */ 208 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT | 209 DSISR_PROTFAULT))) { 210 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n", 211 address >= TASK_SIZE ? "exec-protected" : "user", 212 address, 213 from_kuid(&init_user_ns, current_uid())); 214 215 // Kernel exec fault is always bad 216 return true; 217 } 218 219 if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) && 220 !search_exception_tables(regs->nip)) { 221 pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n", 222 address, 223 from_kuid(&init_user_ns, current_uid())); 224 } 225 226 // Kernel fault on kernel address is bad 227 if (address >= TASK_SIZE) 228 return true; 229 230 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad 231 if (!search_exception_tables(regs->nip)) 232 return true; 233 234 // Read/write fault in a valid region (the exception table search passed 235 // above), but blocked by KUAP is bad, it can never succeed. 236 if (bad_kuap_fault(regs, address, is_write)) 237 return true; 238 239 // What's left? Kernel fault on user in well defined regions (extable 240 // matched), and allowed by KUAP in the faulting context. 241 return false; 242 } 243 244 static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, 245 struct vm_area_struct *vma, unsigned int flags, 246 bool *must_retry) 247 { 248 /* 249 * N.B. The POWER/Open ABI allows programs to access up to 250 * 288 bytes below the stack pointer. 251 * The kernel signal delivery code writes up to about 1.5kB 252 * below the stack pointer (r1) before decrementing it. 253 * The exec code can write slightly over 640kB to the stack 254 * before setting the user r1. Thus we allow the stack to 255 * expand to 1MB without further checks. 256 */ 257 if (address + 0x100000 < vma->vm_end) { 258 unsigned int __user *nip = (unsigned int __user *)regs->nip; 259 /* get user regs even if this fault is in kernel mode */ 260 struct pt_regs *uregs = current->thread.regs; 261 if (uregs == NULL) 262 return true; 263 264 /* 265 * A user-mode access to an address a long way below 266 * the stack pointer is only valid if the instruction 267 * is one which would update the stack pointer to the 268 * address accessed if the instruction completed, 269 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb 270 * (or the byte, halfword, float or double forms). 271 * 272 * If we don't check this then any write to the area 273 * between the last mapped region and the stack will 274 * expand the stack rather than segfaulting. 275 */ 276 if (address + 2048 >= uregs->gpr[1]) 277 return false; 278 279 if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && 280 access_ok(nip, sizeof(*nip))) { 281 unsigned int inst; 282 283 if (!probe_user_read(&inst, nip, sizeof(inst))) 284 return !store_updates_sp(inst); 285 *must_retry = true; 286 } 287 return true; 288 } 289 return false; 290 } 291 292 static bool access_error(bool is_write, bool is_exec, 293 struct vm_area_struct *vma) 294 { 295 /* 296 * Allow execution from readable areas if the MMU does not 297 * provide separate controls over reading and executing. 298 * 299 * Note: That code used to not be enabled for 4xx/BookE. 300 * It is now as I/D cache coherency for these is done at 301 * set_pte_at() time and I see no reason why the test 302 * below wouldn't be valid on those processors. This -may- 303 * break programs compiled with a really old ABI though. 304 */ 305 if (is_exec) { 306 return !(vma->vm_flags & VM_EXEC) && 307 (cpu_has_feature(CPU_FTR_NOEXECUTE) || 308 !(vma->vm_flags & (VM_READ | VM_WRITE))); 309 } 310 311 if (is_write) { 312 if (unlikely(!(vma->vm_flags & VM_WRITE))) 313 return true; 314 return false; 315 } 316 317 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) 318 return true; 319 /* 320 * We should ideally do the vma pkey access check here. But in the 321 * fault path, handle_mm_fault() also does the same check. To avoid 322 * these multiple checks, we skip it here and handle access error due 323 * to pkeys later. 324 */ 325 return false; 326 } 327 328 #ifdef CONFIG_PPC_SMLPAR 329 static inline void cmo_account_page_fault(void) 330 { 331 if (firmware_has_feature(FW_FEATURE_CMO)) { 332 u32 page_ins; 333 334 preempt_disable(); 335 page_ins = be32_to_cpu(get_lppaca()->page_ins); 336 page_ins += 1 << PAGE_FACTOR; 337 get_lppaca()->page_ins = cpu_to_be32(page_ins); 338 preempt_enable(); 339 } 340 } 341 #else 342 static inline void cmo_account_page_fault(void) { } 343 #endif /* CONFIG_PPC_SMLPAR */ 344 345 #ifdef CONFIG_PPC_BOOK3S 346 static void sanity_check_fault(bool is_write, bool is_user, 347 unsigned long error_code, unsigned long address) 348 { 349 /* 350 * Userspace trying to access kernel address, we get PROTFAULT for that. 351 */ 352 if (is_user && address >= TASK_SIZE) { 353 if ((long)address == -1) 354 return; 355 356 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", 357 current->comm, current->pid, address, 358 from_kuid(&init_user_ns, current_uid())); 359 return; 360 } 361 362 /* 363 * For hash translation mode, we should never get a 364 * PROTFAULT. Any update to pte to reduce access will result in us 365 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE 366 * fault instead of DSISR_PROTFAULT. 367 * 368 * A pte update to relax the access will not result in a hash page table 369 * entry invalidate and hence can result in DSISR_PROTFAULT. 370 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have 371 * the special !is_write in the below conditional. 372 * 373 * For platforms that doesn't supports coherent icache and do support 374 * per page noexec bit, we do setup things such that we do the 375 * sync between D/I cache via fault. But that is handled via low level 376 * hash fault code (hash_page_do_lazy_icache()) and we should not reach 377 * here in such case. 378 * 379 * For wrong access that can result in PROTFAULT, the above vma->vm_flags 380 * check should handle those and hence we should fall to the bad_area 381 * handling correctly. 382 * 383 * For embedded with per page exec support that doesn't support coherent 384 * icache we do get PROTFAULT and we handle that D/I cache sync in 385 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON 386 * is conditional for server MMU. 387 * 388 * For radix, we can get prot fault for autonuma case, because radix 389 * page table will have them marked noaccess for user. 390 */ 391 if (radix_enabled() || is_write) 392 return; 393 394 WARN_ON_ONCE(error_code & DSISR_PROTFAULT); 395 } 396 #else 397 static void sanity_check_fault(bool is_write, bool is_user, 398 unsigned long error_code, unsigned long address) { } 399 #endif /* CONFIG_PPC_BOOK3S */ 400 401 /* 402 * Define the correct "is_write" bit in error_code based 403 * on the processor family 404 */ 405 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 406 #define page_fault_is_write(__err) ((__err) & ESR_DST) 407 #define page_fault_is_bad(__err) (0) 408 #else 409 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE) 410 #if defined(CONFIG_PPC_8xx) 411 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G) 412 #elif defined(CONFIG_PPC64) 413 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S) 414 #else 415 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S) 416 #endif 417 #endif 418 419 /* 420 * For 600- and 800-family processors, the error_code parameter is DSISR 421 * for a data fault, SRR1 for an instruction fault. For 400-family processors 422 * the error_code parameter is ESR for a data fault, 0 for an instruction 423 * fault. 424 * For 64-bit processors, the error_code parameter is 425 * - DSISR for a non-SLB data access fault, 426 * - SRR1 & 0x08000000 for a non-SLB instruction access fault 427 * - 0 any SLB fault. 428 * 429 * The return value is 0 if the fault was handled, or the signal 430 * number if this is a kernel fault that can't be handled here. 431 */ 432 static int __do_page_fault(struct pt_regs *regs, unsigned long address, 433 unsigned long error_code) 434 { 435 struct vm_area_struct * vma; 436 struct mm_struct *mm = current->mm; 437 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 438 int is_exec = TRAP(regs) == 0x400; 439 int is_user = user_mode(regs); 440 int is_write = page_fault_is_write(error_code); 441 vm_fault_t fault, major = 0; 442 bool must_retry = false; 443 bool kprobe_fault = kprobe_page_fault(regs, 11); 444 445 if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) 446 return 0; 447 448 if (unlikely(page_fault_is_bad(error_code))) { 449 if (is_user) { 450 _exception(SIGBUS, regs, BUS_OBJERR, address); 451 return 0; 452 } 453 return SIGBUS; 454 } 455 456 /* Additional sanity check(s) */ 457 sanity_check_fault(is_write, is_user, error_code, address); 458 459 /* 460 * The kernel should never take an execute fault nor should it 461 * take a page fault to a kernel address or a page fault to a user 462 * address outside of dedicated places 463 */ 464 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) 465 return SIGSEGV; 466 467 /* 468 * If we're in an interrupt, have no user context or are running 469 * in a region with pagefaults disabled then we must not take the fault 470 */ 471 if (unlikely(faulthandler_disabled() || !mm)) { 472 if (is_user) 473 printk_ratelimited(KERN_ERR "Page fault in user mode" 474 " with faulthandler_disabled()=%d" 475 " mm=%p\n", 476 faulthandler_disabled(), mm); 477 return bad_area_nosemaphore(regs, address); 478 } 479 480 /* We restore the interrupt state now */ 481 if (!arch_irq_disabled_regs(regs)) 482 local_irq_enable(); 483 484 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 485 486 if (error_code & DSISR_KEYFAULT) 487 return bad_key_fault_exception(regs, address, 488 get_mm_addr_key(mm, address)); 489 490 /* 491 * We want to do this outside mmap_sem, because reading code around nip 492 * can result in fault, which will cause a deadlock when called with 493 * mmap_sem held 494 */ 495 if (is_user) 496 flags |= FAULT_FLAG_USER; 497 if (is_write) 498 flags |= FAULT_FLAG_WRITE; 499 if (is_exec) 500 flags |= FAULT_FLAG_INSTRUCTION; 501 502 /* When running in the kernel we expect faults to occur only to 503 * addresses in user space. All other faults represent errors in the 504 * kernel and should generate an OOPS. Unfortunately, in the case of an 505 * erroneous fault occurring in a code path which already holds mmap_sem 506 * we will deadlock attempting to validate the fault against the 507 * address space. Luckily the kernel only validly references user 508 * space from well defined areas of code, which are listed in the 509 * exceptions table. 510 * 511 * As the vast majority of faults will be valid we will only perform 512 * the source reference check when there is a possibility of a deadlock. 513 * Attempt to lock the address space, if we cannot we then validate the 514 * source. If this is invalid we can skip the address space check, 515 * thus avoiding the deadlock. 516 */ 517 if (unlikely(!down_read_trylock(&mm->mmap_sem))) { 518 if (!is_user && !search_exception_tables(regs->nip)) 519 return bad_area_nosemaphore(regs, address); 520 521 retry: 522 down_read(&mm->mmap_sem); 523 } else { 524 /* 525 * The above down_read_trylock() might have succeeded in 526 * which case we'll have missed the might_sleep() from 527 * down_read(): 528 */ 529 might_sleep(); 530 } 531 532 vma = find_vma(mm, address); 533 if (unlikely(!vma)) 534 return bad_area(regs, address); 535 if (likely(vma->vm_start <= address)) 536 goto good_area; 537 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) 538 return bad_area(regs, address); 539 540 /* The stack is being expanded, check if it's valid */ 541 if (unlikely(bad_stack_expansion(regs, address, vma, flags, 542 &must_retry))) { 543 if (!must_retry) 544 return bad_area(regs, address); 545 546 up_read(&mm->mmap_sem); 547 if (fault_in_pages_readable((const char __user *)regs->nip, 548 sizeof(unsigned int))) 549 return bad_area_nosemaphore(regs, address); 550 goto retry; 551 } 552 553 /* Try to expand it */ 554 if (unlikely(expand_stack(vma, address))) 555 return bad_area(regs, address); 556 557 good_area: 558 if (unlikely(access_error(is_write, is_exec, vma))) 559 return bad_access(regs, address); 560 561 /* 562 * If for any reason at all we couldn't handle the fault, 563 * make sure we exit gracefully rather than endlessly redo 564 * the fault. 565 */ 566 fault = handle_mm_fault(vma, address, flags); 567 568 #ifdef CONFIG_PPC_MEM_KEYS 569 /* 570 * we skipped checking for access error due to key earlier. 571 * Check that using handle_mm_fault error return. 572 */ 573 if (unlikely(fault & VM_FAULT_SIGSEGV) && 574 !arch_vma_access_permitted(vma, is_write, is_exec, 0)) { 575 576 int pkey = vma_pkey(vma); 577 578 up_read(&mm->mmap_sem); 579 return bad_key_fault_exception(regs, address, pkey); 580 } 581 #endif /* CONFIG_PPC_MEM_KEYS */ 582 583 major |= fault & VM_FAULT_MAJOR; 584 585 /* 586 * Handle the retry right now, the mmap_sem has been released in that 587 * case. 588 */ 589 if (unlikely(fault & VM_FAULT_RETRY)) { 590 /* We retry only once */ 591 if (flags & FAULT_FLAG_ALLOW_RETRY) { 592 /* 593 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 594 * of starvation. 595 */ 596 flags &= ~FAULT_FLAG_ALLOW_RETRY; 597 flags |= FAULT_FLAG_TRIED; 598 if (!fatal_signal_pending(current)) 599 goto retry; 600 } 601 602 /* 603 * User mode? Just return to handle the fatal exception otherwise 604 * return to bad_page_fault 605 */ 606 return is_user ? 0 : SIGBUS; 607 } 608 609 up_read(¤t->mm->mmap_sem); 610 611 if (unlikely(fault & VM_FAULT_ERROR)) 612 return mm_fault_error(regs, address, fault); 613 614 /* 615 * Major/minor page fault accounting. 616 */ 617 if (major) { 618 current->maj_flt++; 619 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 620 cmo_account_page_fault(); 621 } else { 622 current->min_flt++; 623 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 624 } 625 return 0; 626 } 627 NOKPROBE_SYMBOL(__do_page_fault); 628 629 int do_page_fault(struct pt_regs *regs, unsigned long address, 630 unsigned long error_code) 631 { 632 enum ctx_state prev_state = exception_enter(); 633 int rc = __do_page_fault(regs, address, error_code); 634 exception_exit(prev_state); 635 return rc; 636 } 637 NOKPROBE_SYMBOL(do_page_fault); 638 639 /* 640 * bad_page_fault is called when we have a bad access from the kernel. 641 * It is called from the DSI and ISI handlers in head.S and from some 642 * of the procedures in traps.c. 643 */ 644 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 645 { 646 const struct exception_table_entry *entry; 647 int is_write = page_fault_is_write(regs->dsisr); 648 649 /* Are we prepared to handle this fault? */ 650 if ((entry = search_exception_tables(regs->nip)) != NULL) { 651 regs->nip = extable_fixup(entry); 652 return; 653 } 654 655 /* kernel has accessed a bad area */ 656 657 switch (TRAP(regs)) { 658 case 0x300: 659 case 0x380: 660 case 0xe00: 661 pr_alert("BUG: %s on %s at 0x%08lx\n", 662 regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" : 663 "Unable to handle kernel data access", 664 is_write ? "write" : "read", regs->dar); 665 break; 666 case 0x400: 667 case 0x480: 668 pr_alert("BUG: Unable to handle kernel instruction fetch%s", 669 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n"); 670 break; 671 case 0x600: 672 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n", 673 regs->dar); 674 break; 675 default: 676 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n", 677 regs->dar); 678 break; 679 } 680 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", 681 regs->nip); 682 683 if (task_stack_end_corrupted(current)) 684 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 685 686 die("Kernel access of bad area", regs, sig); 687 } 688