1 /* 2 * S390 version 3 * Copyright IBM Corp. 1999 4 * Author(s): Hartmut Penner (hp@de.ibm.com) 5 * Ulrich Weigand (uweigand@de.ibm.com) 6 * 7 * Derived from "arch/i386/mm/fault.c" 8 * Copyright (C) 1995 Linus Torvalds 9 */ 10 11 #include <linux/kernel_stat.h> 12 #include <linux/perf_event.h> 13 #include <linux/signal.h> 14 #include <linux/sched.h> 15 #include <linux/kernel.h> 16 #include <linux/errno.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 #include <linux/ptrace.h> 20 #include <linux/mman.h> 21 #include <linux/mm.h> 22 #include <linux/compat.h> 23 #include <linux/smp.h> 24 #include <linux/kdebug.h> 25 #include <linux/init.h> 26 #include <linux/console.h> 27 #include <linux/module.h> 28 #include <linux/hardirq.h> 29 #include <linux/kprobes.h> 30 #include <linux/uaccess.h> 31 #include <linux/hugetlb.h> 32 #include <asm/asm-offsets.h> 33 #include <asm/pgtable.h> 34 #include <asm/irq.h> 35 #include <asm/mmu_context.h> 36 #include <asm/facility.h> 37 #include "../kernel/entry.h" 38 39 #ifndef CONFIG_64BIT 40 #define __FAIL_ADDR_MASK 0x7ffff000 41 #define __SUBCODE_MASK 0x0200 42 #define __PF_RES_FIELD 0ULL 43 #else /* CONFIG_64BIT */ 44 #define __FAIL_ADDR_MASK -4096L 45 #define __SUBCODE_MASK 0x0600 46 #define __PF_RES_FIELD 0x8000000000000000ULL 47 #endif /* CONFIG_64BIT */ 48 49 #define VM_FAULT_BADCONTEXT 0x010000 50 #define VM_FAULT_BADMAP 0x020000 51 #define VM_FAULT_BADACCESS 0x040000 52 #define VM_FAULT_SIGNAL 0x080000 53 #define VM_FAULT_PFAULT 0x100000 54 55 static unsigned long store_indication __read_mostly; 56 57 #ifdef CONFIG_64BIT 58 static int __init fault_init(void) 59 { 60 if (test_facility(75)) 61 store_indication = 0xc00; 62 return 0; 63 } 64 early_initcall(fault_init); 65 #endif 66 67 static inline int notify_page_fault(struct pt_regs *regs) 68 { 69 int ret = 0; 70 71 /* kprobe_running() needs smp_processor_id() */ 72 if (kprobes_built_in() && !user_mode(regs)) { 73 preempt_disable(); 74 if (kprobe_running() && kprobe_fault_handler(regs, 14)) 75 ret = 1; 76 preempt_enable(); 77 } 78 return ret; 79 } 80 81 82 /* 83 * Unlock any spinlocks which will prevent us from getting the 84 * message out. 85 */ 86 void bust_spinlocks(int yes) 87 { 88 if (yes) { 89 oops_in_progress = 1; 90 } else { 91 int loglevel_save = console_loglevel; 92 console_unblank(); 93 oops_in_progress = 0; 94 /* 95 * OK, the message is on the console. Now we call printk() 96 * without oops_in_progress set so that printk will give klogd 97 * a poke. Hold onto your hats... 98 */ 99 console_loglevel = 15; 100 printk(" "); 101 console_loglevel = loglevel_save; 102 } 103 } 104 105 /* 106 * Returns the address space associated with the fault. 107 * Returns 0 for kernel space and 1 for user space. 108 */ 109 static inline int user_space_fault(struct pt_regs *regs) 110 { 111 unsigned long trans_exc_code; 112 113 /* 114 * The lowest two bits of the translation exception 115 * identification indicate which paging table was used. 116 */ 117 trans_exc_code = regs->int_parm_long & 3; 118 if (trans_exc_code == 3) /* home space -> kernel */ 119 return 0; 120 if (user_mode(regs)) 121 return 1; 122 if (trans_exc_code == 2) /* secondary space -> set_fs */ 123 return current->thread.mm_segment.ar4; 124 if (current->flags & PF_VCPU) 125 return 1; 126 return 0; 127 } 128 129 static int bad_address(void *p) 130 { 131 unsigned long dummy; 132 133 return probe_kernel_address((unsigned long *)p, dummy); 134 } 135 136 #ifdef CONFIG_64BIT 137 static void dump_pagetable(unsigned long asce, unsigned long address) 138 { 139 unsigned long *table = __va(asce & PAGE_MASK); 140 141 pr_alert("AS:%016lx ", asce); 142 switch (asce & _ASCE_TYPE_MASK) { 143 case _ASCE_TYPE_REGION1: 144 table = table + ((address >> 53) & 0x7ff); 145 if (bad_address(table)) 146 goto bad; 147 pr_cont("R1:%016lx ", *table); 148 if (*table & _REGION_ENTRY_INVALID) 149 goto out; 150 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 151 /* fallthrough */ 152 case _ASCE_TYPE_REGION2: 153 table = table + ((address >> 42) & 0x7ff); 154 if (bad_address(table)) 155 goto bad; 156 pr_cont("R2:%016lx ", *table); 157 if (*table & _REGION_ENTRY_INVALID) 158 goto out; 159 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 160 /* fallthrough */ 161 case _ASCE_TYPE_REGION3: 162 table = table + ((address >> 31) & 0x7ff); 163 if (bad_address(table)) 164 goto bad; 165 pr_cont("R3:%016lx ", *table); 166 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) 167 goto out; 168 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 169 /* fallthrough */ 170 case _ASCE_TYPE_SEGMENT: 171 table = table + ((address >> 20) & 0x7ff); 172 if (bad_address(table)) 173 goto bad; 174 pr_cont(KERN_CONT "S:%016lx ", *table); 175 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) 176 goto out; 177 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 178 } 179 table = table + ((address >> 12) & 0xff); 180 if (bad_address(table)) 181 goto bad; 182 pr_cont("P:%016lx ", *table); 183 out: 184 pr_cont("\n"); 185 return; 186 bad: 187 pr_cont("BAD\n"); 188 } 189 190 #else /* CONFIG_64BIT */ 191 192 static void dump_pagetable(unsigned long asce, unsigned long address) 193 { 194 unsigned long *table = __va(asce & PAGE_MASK); 195 196 pr_alert("AS:%08lx ", asce); 197 table = table + ((address >> 20) & 0x7ff); 198 if (bad_address(table)) 199 goto bad; 200 pr_cont("S:%08lx ", *table); 201 if (*table & _SEGMENT_ENTRY_INVALID) 202 goto out; 203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 204 table = table + ((address >> 12) & 0xff); 205 if (bad_address(table)) 206 goto bad; 207 pr_cont("P:%08lx ", *table); 208 out: 209 pr_cont("\n"); 210 return; 211 bad: 212 pr_cont("BAD\n"); 213 } 214 215 #endif /* CONFIG_64BIT */ 216 217 static void dump_fault_info(struct pt_regs *regs) 218 { 219 unsigned long asce; 220 221 pr_alert("Fault in "); 222 switch (regs->int_parm_long & 3) { 223 case 3: 224 pr_cont("home space "); 225 break; 226 case 2: 227 pr_cont("secondary space "); 228 break; 229 case 1: 230 pr_cont("access register "); 231 break; 232 case 0: 233 pr_cont("primary space "); 234 break; 235 } 236 pr_cont("mode while using "); 237 if (!user_space_fault(regs)) { 238 asce = S390_lowcore.kernel_asce; 239 pr_cont("kernel "); 240 } 241 #ifdef CONFIG_PGSTE 242 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) { 243 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; 244 asce = gmap->asce; 245 pr_cont("gmap "); 246 } 247 #endif 248 else { 249 asce = S390_lowcore.user_asce; 250 pr_cont("user "); 251 } 252 pr_cont("ASCE.\n"); 253 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); 254 } 255 256 static inline void report_user_fault(struct pt_regs *regs, long signr) 257 { 258 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 259 return; 260 if (!unhandled_signal(current, signr)) 261 return; 262 if (!printk_ratelimit()) 263 return; 264 printk(KERN_ALERT "User process fault: interruption code 0x%X ", 265 regs->int_code); 266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); 267 printk(KERN_CONT "\n"); 268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", 269 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); 270 dump_fault_info(regs); 271 show_regs(regs); 272 } 273 274 /* 275 * Send SIGSEGV to task. This is an external routine 276 * to keep the stack usage of do_page_fault small. 277 */ 278 static noinline void do_sigsegv(struct pt_regs *regs, int si_code) 279 { 280 struct siginfo si; 281 282 report_user_fault(regs, SIGSEGV); 283 si.si_signo = SIGSEGV; 284 si.si_code = si_code; 285 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); 286 force_sig_info(SIGSEGV, &si, current); 287 } 288 289 static noinline void do_no_context(struct pt_regs *regs) 290 { 291 const struct exception_table_entry *fixup; 292 unsigned long address; 293 294 /* Are we prepared to handle this kernel fault? */ 295 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 296 if (fixup) { 297 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; 298 return; 299 } 300 301 /* 302 * Oops. The kernel tried to access some bad page. We'll have to 303 * terminate things with extreme prejudice. 304 */ 305 address = regs->int_parm_long & __FAIL_ADDR_MASK; 306 if (!user_space_fault(regs)) 307 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 308 " in virtual kernel address space\n"); 309 else 310 printk(KERN_ALERT "Unable to handle kernel paging request" 311 " in virtual user address space\n"); 312 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", 313 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); 314 dump_fault_info(regs); 315 die(regs, "Oops"); 316 do_exit(SIGKILL); 317 } 318 319 static noinline void do_low_address(struct pt_regs *regs) 320 { 321 /* Low-address protection hit in kernel mode means 322 NULL pointer write access in kernel mode. */ 323 if (regs->psw.mask & PSW_MASK_PSTATE) { 324 /* Low-address protection hit in user mode 'cannot happen'. */ 325 die (regs, "Low-address protection"); 326 do_exit(SIGKILL); 327 } 328 329 do_no_context(regs); 330 } 331 332 static noinline void do_sigbus(struct pt_regs *regs) 333 { 334 struct task_struct *tsk = current; 335 struct siginfo si; 336 337 /* 338 * Send a sigbus, regardless of whether we were in kernel 339 * or user mode. 340 */ 341 si.si_signo = SIGBUS; 342 si.si_errno = 0; 343 si.si_code = BUS_ADRERR; 344 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); 345 force_sig_info(SIGBUS, &si, tsk); 346 } 347 348 static noinline void do_fault_error(struct pt_regs *regs, int fault) 349 { 350 int si_code; 351 352 switch (fault) { 353 case VM_FAULT_BADACCESS: 354 case VM_FAULT_BADMAP: 355 /* Bad memory access. Check if it is kernel or user space. */ 356 if (user_mode(regs)) { 357 /* User mode accesses just cause a SIGSEGV */ 358 si_code = (fault == VM_FAULT_BADMAP) ? 359 SEGV_MAPERR : SEGV_ACCERR; 360 do_sigsegv(regs, si_code); 361 return; 362 } 363 case VM_FAULT_BADCONTEXT: 364 case VM_FAULT_PFAULT: 365 do_no_context(regs); 366 break; 367 case VM_FAULT_SIGNAL: 368 if (!user_mode(regs)) 369 do_no_context(regs); 370 break; 371 default: /* fault & VM_FAULT_ERROR */ 372 if (fault & VM_FAULT_OOM) { 373 if (!user_mode(regs)) 374 do_no_context(regs); 375 else 376 pagefault_out_of_memory(); 377 } else if (fault & VM_FAULT_SIGBUS) { 378 /* Kernel mode? Handle exceptions or die */ 379 if (!user_mode(regs)) 380 do_no_context(regs); 381 else 382 do_sigbus(regs); 383 } else 384 BUG(); 385 break; 386 } 387 } 388 389 /* 390 * This routine handles page faults. It determines the address, 391 * and the problem, and then passes it off to one of the appropriate 392 * routines. 393 * 394 * interruption code (int_code): 395 * 04 Protection -> Write-Protection (suprression) 396 * 10 Segment translation -> Not present (nullification) 397 * 11 Page translation -> Not present (nullification) 398 * 3b Region third trans. -> Not present (nullification) 399 */ 400 static inline int do_exception(struct pt_regs *regs, int access) 401 { 402 #ifdef CONFIG_PGSTE 403 struct gmap *gmap; 404 #endif 405 struct task_struct *tsk; 406 struct mm_struct *mm; 407 struct vm_area_struct *vma; 408 unsigned long trans_exc_code; 409 unsigned long address; 410 unsigned int flags; 411 int fault; 412 413 tsk = current; 414 /* 415 * The instruction that caused the program check has 416 * been nullified. Don't signal single step via SIGTRAP. 417 */ 418 clear_pt_regs_flag(regs, PIF_PER_TRAP); 419 420 if (notify_page_fault(regs)) 421 return 0; 422 423 mm = tsk->mm; 424 trans_exc_code = regs->int_parm_long; 425 426 /* 427 * Verify that the fault happened in user space, that 428 * we are not in an interrupt and that there is a 429 * user context. 430 */ 431 fault = VM_FAULT_BADCONTEXT; 432 if (unlikely(!user_space_fault(regs) || in_atomic() || !mm)) 433 goto out; 434 435 address = trans_exc_code & __FAIL_ADDR_MASK; 436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 437 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 438 if (user_mode(regs)) 439 flags |= FAULT_FLAG_USER; 440 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 441 flags |= FAULT_FLAG_WRITE; 442 down_read(&mm->mmap_sem); 443 444 #ifdef CONFIG_PGSTE 445 gmap = (current->flags & PF_VCPU) ? 446 (struct gmap *) S390_lowcore.gmap : NULL; 447 if (gmap) { 448 current->thread.gmap_addr = address; 449 address = __gmap_translate(gmap, address); 450 if (address == -EFAULT) { 451 fault = VM_FAULT_BADMAP; 452 goto out_up; 453 } 454 if (gmap->pfault_enabled) 455 flags |= FAULT_FLAG_RETRY_NOWAIT; 456 } 457 #endif 458 459 retry: 460 fault = VM_FAULT_BADMAP; 461 vma = find_vma(mm, address); 462 if (!vma) 463 goto out_up; 464 465 if (unlikely(vma->vm_start > address)) { 466 if (!(vma->vm_flags & VM_GROWSDOWN)) 467 goto out_up; 468 if (expand_stack(vma, address)) 469 goto out_up; 470 } 471 472 /* 473 * Ok, we have a good vm_area for this memory access, so 474 * we can handle it.. 475 */ 476 fault = VM_FAULT_BADACCESS; 477 if (unlikely(!(vma->vm_flags & access))) 478 goto out_up; 479 480 if (is_vm_hugetlb_page(vma)) 481 address &= HPAGE_MASK; 482 /* 483 * If for any reason at all we couldn't handle the fault, 484 * make sure we exit gracefully rather than endlessly redo 485 * the fault. 486 */ 487 fault = handle_mm_fault(mm, vma, address, flags); 488 /* No reason to continue if interrupted by SIGKILL. */ 489 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { 490 fault = VM_FAULT_SIGNAL; 491 goto out; 492 } 493 if (unlikely(fault & VM_FAULT_ERROR)) 494 goto out_up; 495 496 /* 497 * Major/minor page fault accounting is only done on the 498 * initial attempt. If we go through a retry, it is extremely 499 * likely that the page will be found in page cache at that point. 500 */ 501 if (flags & FAULT_FLAG_ALLOW_RETRY) { 502 if (fault & VM_FAULT_MAJOR) { 503 tsk->maj_flt++; 504 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 505 regs, address); 506 } else { 507 tsk->min_flt++; 508 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 509 regs, address); 510 } 511 if (fault & VM_FAULT_RETRY) { 512 #ifdef CONFIG_PGSTE 513 if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) { 514 /* FAULT_FLAG_RETRY_NOWAIT has been set, 515 * mmap_sem has not been released */ 516 current->thread.gmap_pfault = 1; 517 fault = VM_FAULT_PFAULT; 518 goto out_up; 519 } 520 #endif 521 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 522 * of starvation. */ 523 flags &= ~(FAULT_FLAG_ALLOW_RETRY | 524 FAULT_FLAG_RETRY_NOWAIT); 525 flags |= FAULT_FLAG_TRIED; 526 down_read(&mm->mmap_sem); 527 goto retry; 528 } 529 } 530 #ifdef CONFIG_PGSTE 531 if (gmap) { 532 address = __gmap_link(gmap, current->thread.gmap_addr, 533 address); 534 if (address == -EFAULT) { 535 fault = VM_FAULT_BADMAP; 536 goto out_up; 537 } 538 if (address == -ENOMEM) { 539 fault = VM_FAULT_OOM; 540 goto out_up; 541 } 542 } 543 #endif 544 fault = 0; 545 out_up: 546 up_read(&mm->mmap_sem); 547 out: 548 return fault; 549 } 550 551 void __kprobes do_protection_exception(struct pt_regs *regs) 552 { 553 unsigned long trans_exc_code; 554 int fault; 555 556 trans_exc_code = regs->int_parm_long; 557 /* 558 * Protection exceptions are suppressing, decrement psw address. 559 * The exception to this rule are aborted transactions, for these 560 * the PSW already points to the correct location. 561 */ 562 if (!(regs->int_code & 0x200)) 563 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); 564 /* 565 * Check for low-address protection. This needs to be treated 566 * as a special case because the translation exception code 567 * field is not guaranteed to contain valid data in this case. 568 */ 569 if (unlikely(!(trans_exc_code & 4))) { 570 do_low_address(regs); 571 return; 572 } 573 fault = do_exception(regs, VM_WRITE); 574 if (unlikely(fault)) 575 do_fault_error(regs, fault); 576 } 577 578 void __kprobes do_dat_exception(struct pt_regs *regs) 579 { 580 int access, fault; 581 582 access = VM_READ | VM_EXEC | VM_WRITE; 583 fault = do_exception(regs, access); 584 if (unlikely(fault)) 585 do_fault_error(regs, fault); 586 } 587 588 #ifdef CONFIG_PFAULT 589 /* 590 * 'pfault' pseudo page faults routines. 591 */ 592 static int pfault_disable; 593 594 static int __init nopfault(char *str) 595 { 596 pfault_disable = 1; 597 return 1; 598 } 599 600 __setup("nopfault", nopfault); 601 602 struct pfault_refbk { 603 u16 refdiagc; 604 u16 reffcode; 605 u16 refdwlen; 606 u16 refversn; 607 u64 refgaddr; 608 u64 refselmk; 609 u64 refcmpmk; 610 u64 reserved; 611 } __attribute__ ((packed, aligned(8))); 612 613 int pfault_init(void) 614 { 615 struct pfault_refbk refbk = { 616 .refdiagc = 0x258, 617 .reffcode = 0, 618 .refdwlen = 5, 619 .refversn = 2, 620 .refgaddr = __LC_CURRENT_PID, 621 .refselmk = 1ULL << 48, 622 .refcmpmk = 1ULL << 48, 623 .reserved = __PF_RES_FIELD }; 624 int rc; 625 626 if (pfault_disable) 627 return -1; 628 asm volatile( 629 " diag %1,%0,0x258\n" 630 "0: j 2f\n" 631 "1: la %0,8\n" 632 "2:\n" 633 EX_TABLE(0b,1b) 634 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); 635 return rc; 636 } 637 638 void pfault_fini(void) 639 { 640 struct pfault_refbk refbk = { 641 .refdiagc = 0x258, 642 .reffcode = 1, 643 .refdwlen = 5, 644 .refversn = 2, 645 }; 646 647 if (pfault_disable) 648 return; 649 asm volatile( 650 " diag %0,0,0x258\n" 651 "0:\n" 652 EX_TABLE(0b,0b) 653 : : "a" (&refbk), "m" (refbk) : "cc"); 654 } 655 656 static DEFINE_SPINLOCK(pfault_lock); 657 static LIST_HEAD(pfault_list); 658 659 static void pfault_interrupt(struct ext_code ext_code, 660 unsigned int param32, unsigned long param64) 661 { 662 struct task_struct *tsk; 663 __u16 subcode; 664 pid_t pid; 665 666 /* 667 * Get the external interruption subcode & pfault 668 * initial/completion signal bit. VM stores this 669 * in the 'cpu address' field associated with the 670 * external interrupt. 671 */ 672 subcode = ext_code.subcode; 673 if ((subcode & 0xff00) != __SUBCODE_MASK) 674 return; 675 inc_irq_stat(IRQEXT_PFL); 676 /* Get the token (= pid of the affected task). */ 677 pid = sizeof(void *) == 4 ? param32 : param64; 678 rcu_read_lock(); 679 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 680 if (tsk) 681 get_task_struct(tsk); 682 rcu_read_unlock(); 683 if (!tsk) 684 return; 685 spin_lock(&pfault_lock); 686 if (subcode & 0x0080) { 687 /* signal bit is set -> a page has been swapped in by VM */ 688 if (tsk->thread.pfault_wait == 1) { 689 /* Initial interrupt was faster than the completion 690 * interrupt. pfault_wait is valid. Set pfault_wait 691 * back to zero and wake up the process. This can 692 * safely be done because the task is still sleeping 693 * and can't produce new pfaults. */ 694 tsk->thread.pfault_wait = 0; 695 list_del(&tsk->thread.list); 696 wake_up_process(tsk); 697 put_task_struct(tsk); 698 } else { 699 /* Completion interrupt was faster than initial 700 * interrupt. Set pfault_wait to -1 so the initial 701 * interrupt doesn't put the task to sleep. 702 * If the task is not running, ignore the completion 703 * interrupt since it must be a leftover of a PFAULT 704 * CANCEL operation which didn't remove all pending 705 * completion interrupts. */ 706 if (tsk->state == TASK_RUNNING) 707 tsk->thread.pfault_wait = -1; 708 } 709 } else { 710 /* signal bit not set -> a real page is missing. */ 711 if (WARN_ON_ONCE(tsk != current)) 712 goto out; 713 if (tsk->thread.pfault_wait == 1) { 714 /* Already on the list with a reference: put to sleep */ 715 __set_task_state(tsk, TASK_UNINTERRUPTIBLE); 716 set_tsk_need_resched(tsk); 717 } else if (tsk->thread.pfault_wait == -1) { 718 /* Completion interrupt was faster than the initial 719 * interrupt (pfault_wait == -1). Set pfault_wait 720 * back to zero and exit. */ 721 tsk->thread.pfault_wait = 0; 722 } else { 723 /* Initial interrupt arrived before completion 724 * interrupt. Let the task sleep. 725 * An extra task reference is needed since a different 726 * cpu may set the task state to TASK_RUNNING again 727 * before the scheduler is reached. */ 728 get_task_struct(tsk); 729 tsk->thread.pfault_wait = 1; 730 list_add(&tsk->thread.list, &pfault_list); 731 __set_task_state(tsk, TASK_UNINTERRUPTIBLE); 732 set_tsk_need_resched(tsk); 733 } 734 } 735 out: 736 spin_unlock(&pfault_lock); 737 put_task_struct(tsk); 738 } 739 740 static int pfault_cpu_notify(struct notifier_block *self, unsigned long action, 741 void *hcpu) 742 { 743 struct thread_struct *thread, *next; 744 struct task_struct *tsk; 745 746 switch (action & ~CPU_TASKS_FROZEN) { 747 case CPU_DEAD: 748 spin_lock_irq(&pfault_lock); 749 list_for_each_entry_safe(thread, next, &pfault_list, list) { 750 thread->pfault_wait = 0; 751 list_del(&thread->list); 752 tsk = container_of(thread, struct task_struct, thread); 753 wake_up_process(tsk); 754 put_task_struct(tsk); 755 } 756 spin_unlock_irq(&pfault_lock); 757 break; 758 default: 759 break; 760 } 761 return NOTIFY_OK; 762 } 763 764 static int __init pfault_irq_init(void) 765 { 766 int rc; 767 768 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 769 if (rc) 770 goto out_extint; 771 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; 772 if (rc) 773 goto out_pfault; 774 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 775 hotcpu_notifier(pfault_cpu_notify, 0); 776 return 0; 777 778 out_pfault: 779 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt); 780 out_extint: 781 pfault_disable = 1; 782 return rc; 783 } 784 early_initcall(pfault_irq_init); 785 786 #endif /* CONFIG_PFAULT */ 787