1 /* 2 * Page fault handler for SH with an MMU. 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2003 - 2007 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/fault.c: 8 * Copyright (C) 1995 Linus Torvalds 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/hardirq.h> 17 #include <linux/kprobes.h> 18 #include <linux/kdebug.h> 19 #include <asm/system.h> 20 #include <asm/mmu_context.h> 21 #include <asm/tlbflush.h> 22 #include <asm/kgdb.h> 23 24 #ifdef CONFIG_KPROBES 25 ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); 26 27 /* Hook to register for page fault notifications */ 28 int register_page_fault_notifier(struct notifier_block *nb) 29 { 30 return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); 31 } 32 33 int unregister_page_fault_notifier(struct notifier_block *nb) 34 { 35 return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); 36 } 37 38 static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, 39 int trap, int sig) 40 { 41 struct die_args args = { 42 .regs = regs, 43 .trapnr = trap, 44 }; 45 return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); 46 } 47 #else 48 static inline int notify_page_fault(enum die_val val, struct pt_regs *regs, 49 int trap, int sig) 50 { 51 return NOTIFY_DONE; 52 } 53 #endif 54 55 /* 56 * This routine handles page faults. It determines the address, 57 * and the problem, and then passes it off to one of the appropriate 58 * routines. 59 */ 60 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, 61 unsigned long writeaccess, 62 unsigned long address) 63 { 64 struct task_struct *tsk; 65 struct mm_struct *mm; 66 struct vm_area_struct * vma; 67 unsigned long page; 68 int si_code; 69 siginfo_t info; 70 71 trace_hardirqs_on(); 72 73 if (notify_page_fault(DIE_PAGE_FAULT, regs, 74 writeaccess, SIGSEGV) == NOTIFY_STOP) 75 return; 76 77 local_irq_enable(); 78 79 #ifdef CONFIG_SH_KGDB 80 if (kgdb_nofault && kgdb_bus_err_hook) 81 kgdb_bus_err_hook(); 82 #endif 83 84 tsk = current; 85 mm = tsk->mm; 86 si_code = SEGV_MAPERR; 87 88 if (unlikely(address >= TASK_SIZE)) { 89 /* 90 * Synchronize this task's top level page-table 91 * with the 'reference' page table. 92 * 93 * Do _not_ use "tsk" here. We might be inside 94 * an interrupt in the middle of a task switch.. 95 */ 96 int offset = pgd_index(address); 97 pgd_t *pgd, *pgd_k; 98 pud_t *pud, *pud_k; 99 pmd_t *pmd, *pmd_k; 100 101 pgd = get_TTB() + offset; 102 pgd_k = swapper_pg_dir + offset; 103 104 /* This will never happen with the folded page table. */ 105 if (!pgd_present(*pgd)) { 106 if (!pgd_present(*pgd_k)) 107 goto bad_area_nosemaphore; 108 set_pgd(pgd, *pgd_k); 109 return; 110 } 111 112 pud = pud_offset(pgd, address); 113 pud_k = pud_offset(pgd_k, address); 114 if (pud_present(*pud) || !pud_present(*pud_k)) 115 goto bad_area_nosemaphore; 116 set_pud(pud, *pud_k); 117 118 pmd = pmd_offset(pud, address); 119 pmd_k = pmd_offset(pud_k, address); 120 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 121 goto bad_area_nosemaphore; 122 set_pmd(pmd, *pmd_k); 123 124 return; 125 } 126 127 /* 128 * If we're in an interrupt or have no user 129 * context, we must not take the fault.. 130 */ 131 if (in_atomic() || !mm) 132 goto no_context; 133 134 down_read(&mm->mmap_sem); 135 136 vma = find_vma(mm, address); 137 if (!vma) 138 goto bad_area; 139 if (vma->vm_start <= address) 140 goto good_area; 141 if (!(vma->vm_flags & VM_GROWSDOWN)) 142 goto bad_area; 143 if (expand_stack(vma, address)) 144 goto bad_area; 145 /* 146 * Ok, we have a good vm_area for this memory access, so 147 * we can handle it.. 148 */ 149 good_area: 150 si_code = SEGV_ACCERR; 151 if (writeaccess) { 152 if (!(vma->vm_flags & VM_WRITE)) 153 goto bad_area; 154 } else { 155 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 156 goto bad_area; 157 } 158 159 /* 160 * If for any reason at all we couldn't handle the fault, 161 * make sure we exit gracefully rather than endlessly redo 162 * the fault. 163 */ 164 survive: 165 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 166 case VM_FAULT_MINOR: 167 tsk->min_flt++; 168 break; 169 case VM_FAULT_MAJOR: 170 tsk->maj_flt++; 171 break; 172 case VM_FAULT_SIGBUS: 173 goto do_sigbus; 174 case VM_FAULT_OOM: 175 goto out_of_memory; 176 default: 177 BUG(); 178 } 179 180 up_read(&mm->mmap_sem); 181 return; 182 183 /* 184 * Something tried to access memory that isn't in our memory map.. 185 * Fix it, but check if it's kernel or user first.. 186 */ 187 bad_area: 188 up_read(&mm->mmap_sem); 189 190 bad_area_nosemaphore: 191 if (user_mode(regs)) { 192 info.si_signo = SIGSEGV; 193 info.si_errno = 0; 194 info.si_code = si_code; 195 info.si_addr = (void *) address; 196 force_sig_info(SIGSEGV, &info, tsk); 197 return; 198 } 199 200 no_context: 201 /* Are we prepared to handle this kernel fault? */ 202 if (fixup_exception(regs)) 203 return; 204 205 /* 206 * Oops. The kernel tried to access some bad page. We'll have to 207 * terminate things with extreme prejudice. 208 * 209 */ 210 if (address < PAGE_SIZE) 211 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 212 else 213 printk(KERN_ALERT "Unable to handle kernel paging request"); 214 printk(" at virtual address %08lx\n", address); 215 printk(KERN_ALERT "pc = %08lx\n", regs->pc); 216 page = (unsigned long)get_TTB(); 217 if (page) { 218 page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; 219 printk(KERN_ALERT "*pde = %08lx\n", page); 220 if (page & _PAGE_PRESENT) { 221 page &= PAGE_MASK; 222 address &= 0x003ff000; 223 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; 224 printk(KERN_ALERT "*pte = %08lx\n", page); 225 } 226 } 227 die("Oops", regs, writeaccess); 228 do_exit(SIGKILL); 229 230 /* 231 * We ran out of memory, or some other thing happened to us that made 232 * us unable to handle the page fault gracefully. 233 */ 234 out_of_memory: 235 up_read(&mm->mmap_sem); 236 if (is_init(current)) { 237 yield(); 238 down_read(&mm->mmap_sem); 239 goto survive; 240 } 241 printk("VM: killing process %s\n", tsk->comm); 242 if (user_mode(regs)) 243 do_exit(SIGKILL); 244 goto no_context; 245 246 do_sigbus: 247 up_read(&mm->mmap_sem); 248 249 /* 250 * Send a sigbus, regardless of whether we were in kernel 251 * or user mode. 252 */ 253 info.si_signo = SIGBUS; 254 info.si_errno = 0; 255 info.si_code = BUS_ADRERR; 256 info.si_addr = (void *)address; 257 force_sig_info(SIGBUS, &info, tsk); 258 259 /* Kernel mode? Handle exceptions or die */ 260 if (!user_mode(regs)) 261 goto no_context; 262 } 263 264 #ifdef CONFIG_SH_STORE_QUEUES 265 /* 266 * This is a special case for the SH-4 store queues, as pages for this 267 * space still need to be faulted in before it's possible to flush the 268 * store queue cache for writeout to the remapped region. 269 */ 270 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) 271 #else 272 #define P3_ADDR_MAX P4SEG 273 #endif 274 275 /* 276 * Called with interrupts disabled. 277 */ 278 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, 279 unsigned long writeaccess, 280 unsigned long address) 281 { 282 pgd_t *pgd; 283 pud_t *pud; 284 pmd_t *pmd; 285 pte_t *pte; 286 pte_t entry; 287 struct mm_struct *mm = current->mm; 288 spinlock_t *ptl = NULL; 289 int ret = 1; 290 291 #ifdef CONFIG_SH_KGDB 292 if (kgdb_nofault && kgdb_bus_err_hook) 293 kgdb_bus_err_hook(); 294 #endif 295 296 /* 297 * We don't take page faults for P1, P2, and parts of P4, these 298 * are always mapped, whether it be due to legacy behaviour in 299 * 29-bit mode, or due to PMB configuration in 32-bit mode. 300 */ 301 if (address >= P3SEG && address < P3_ADDR_MAX) { 302 pgd = pgd_offset_k(address); 303 mm = NULL; 304 } else { 305 if (unlikely(address >= TASK_SIZE || !mm)) 306 return 1; 307 308 pgd = pgd_offset(mm, address); 309 } 310 311 pud = pud_offset(pgd, address); 312 if (pud_none_or_clear_bad(pud)) 313 return 1; 314 pmd = pmd_offset(pud, address); 315 if (pmd_none_or_clear_bad(pmd)) 316 return 1; 317 318 if (mm) 319 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 320 else 321 pte = pte_offset_kernel(pmd, address); 322 323 entry = *pte; 324 if (unlikely(pte_none(entry) || pte_not_present(entry))) 325 goto unlock; 326 if (unlikely(writeaccess && !pte_write(entry))) 327 goto unlock; 328 329 if (writeaccess) 330 entry = pte_mkdirty(entry); 331 entry = pte_mkyoung(entry); 332 333 #ifdef CONFIG_CPU_SH4 334 /* 335 * ITLB is not affected by "ldtlb" instruction. 336 * So, we need to flush the entry by ourselves. 337 */ 338 local_flush_tlb_one(get_asid(), address & PAGE_MASK); 339 #endif 340 341 set_pte(pte, entry); 342 update_mmu_cache(NULL, address, entry); 343 ret = 0; 344 unlock: 345 if (mm) 346 pte_unmap_unlock(pte, ptl); 347 return ret; 348 } 349