1 /* 2 * Page fault handler for SH with an MMU. 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2003 - 2007 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/fault.c: 8 * Copyright (C) 1995 Linus Torvalds 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/hardirq.h> 17 #include <linux/kprobes.h> 18 #include <asm/system.h> 19 #include <asm/mmu_context.h> 20 #include <asm/tlbflush.h> 21 #include <asm/kgdb.h> 22 23 /* 24 * This routine handles page faults. It determines the address, 25 * and the problem, and then passes it off to one of the appropriate 26 * routines. 27 */ 28 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, 29 unsigned long writeaccess, 30 unsigned long address) 31 { 32 struct task_struct *tsk; 33 struct mm_struct *mm; 34 struct vm_area_struct * vma; 35 unsigned long page; 36 int si_code; 37 siginfo_t info; 38 39 trace_hardirqs_on(); 40 local_irq_enable(); 41 42 #ifdef CONFIG_SH_KGDB 43 if (kgdb_nofault && kgdb_bus_err_hook) 44 kgdb_bus_err_hook(); 45 #endif 46 47 tsk = current; 48 mm = tsk->mm; 49 si_code = SEGV_MAPERR; 50 51 if (unlikely(address >= TASK_SIZE)) { 52 /* 53 * Synchronize this task's top level page-table 54 * with the 'reference' page table. 55 * 56 * Do _not_ use "tsk" here. We might be inside 57 * an interrupt in the middle of a task switch.. 58 */ 59 int offset = pgd_index(address); 60 pgd_t *pgd, *pgd_k; 61 pud_t *pud, *pud_k; 62 pmd_t *pmd, *pmd_k; 63 64 pgd = get_TTB() + offset; 65 pgd_k = swapper_pg_dir + offset; 66 67 /* This will never happen with the folded page table. */ 68 if (!pgd_present(*pgd)) { 69 if (!pgd_present(*pgd_k)) 70 goto bad_area_nosemaphore; 71 set_pgd(pgd, *pgd_k); 72 return; 73 } 74 75 pud = pud_offset(pgd, address); 76 pud_k = pud_offset(pgd_k, address); 77 if (pud_present(*pud) || !pud_present(*pud_k)) 78 goto bad_area_nosemaphore; 79 set_pud(pud, *pud_k); 80 81 pmd = pmd_offset(pud, address); 82 pmd_k = pmd_offset(pud_k, address); 83 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 84 goto bad_area_nosemaphore; 85 set_pmd(pmd, *pmd_k); 86 87 return; 88 } 89 90 /* 91 * If we're in an interrupt or have no user 92 * context, we must not take the fault.. 93 */ 94 if (in_atomic() || !mm) 95 goto no_context; 96 97 down_read(&mm->mmap_sem); 98 99 vma = find_vma(mm, address); 100 if (!vma) 101 goto bad_area; 102 if (vma->vm_start <= address) 103 goto good_area; 104 if (!(vma->vm_flags & VM_GROWSDOWN)) 105 goto bad_area; 106 if (expand_stack(vma, address)) 107 goto bad_area; 108 /* 109 * Ok, we have a good vm_area for this memory access, so 110 * we can handle it.. 111 */ 112 good_area: 113 si_code = SEGV_ACCERR; 114 if (writeaccess) { 115 if (!(vma->vm_flags & VM_WRITE)) 116 goto bad_area; 117 } else { 118 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 119 goto bad_area; 120 } 121 122 /* 123 * If for any reason at all we couldn't handle the fault, 124 * make sure we exit gracefully rather than endlessly redo 125 * the fault. 126 */ 127 survive: 128 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 129 case VM_FAULT_MINOR: 130 tsk->min_flt++; 131 break; 132 case VM_FAULT_MAJOR: 133 tsk->maj_flt++; 134 break; 135 case VM_FAULT_SIGBUS: 136 goto do_sigbus; 137 case VM_FAULT_OOM: 138 goto out_of_memory; 139 default: 140 BUG(); 141 } 142 143 up_read(&mm->mmap_sem); 144 return; 145 146 /* 147 * Something tried to access memory that isn't in our memory map.. 148 * Fix it, but check if it's kernel or user first.. 149 */ 150 bad_area: 151 up_read(&mm->mmap_sem); 152 153 bad_area_nosemaphore: 154 if (user_mode(regs)) { 155 info.si_signo = SIGSEGV; 156 info.si_errno = 0; 157 info.si_code = si_code; 158 info.si_addr = (void *) address; 159 force_sig_info(SIGSEGV, &info, tsk); 160 return; 161 } 162 163 no_context: 164 /* Are we prepared to handle this kernel fault? */ 165 if (fixup_exception(regs)) 166 return; 167 168 /* 169 * Oops. The kernel tried to access some bad page. We'll have to 170 * terminate things with extreme prejudice. 171 * 172 */ 173 if (address < PAGE_SIZE) 174 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 175 else 176 printk(KERN_ALERT "Unable to handle kernel paging request"); 177 printk(" at virtual address %08lx\n", address); 178 printk(KERN_ALERT "pc = %08lx\n", regs->pc); 179 page = (unsigned long)get_TTB(); 180 if (page) { 181 page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; 182 printk(KERN_ALERT "*pde = %08lx\n", page); 183 if (page & _PAGE_PRESENT) { 184 page &= PAGE_MASK; 185 address &= 0x003ff000; 186 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; 187 printk(KERN_ALERT "*pte = %08lx\n", page); 188 } 189 } 190 die("Oops", regs, writeaccess); 191 do_exit(SIGKILL); 192 193 /* 194 * We ran out of memory, or some other thing happened to us that made 195 * us unable to handle the page fault gracefully. 196 */ 197 out_of_memory: 198 up_read(&mm->mmap_sem); 199 if (is_init(current)) { 200 yield(); 201 down_read(&mm->mmap_sem); 202 goto survive; 203 } 204 printk("VM: killing process %s\n", tsk->comm); 205 if (user_mode(regs)) 206 do_exit(SIGKILL); 207 goto no_context; 208 209 do_sigbus: 210 up_read(&mm->mmap_sem); 211 212 /* 213 * Send a sigbus, regardless of whether we were in kernel 214 * or user mode. 215 */ 216 info.si_signo = SIGBUS; 217 info.si_errno = 0; 218 info.si_code = BUS_ADRERR; 219 info.si_addr = (void *)address; 220 force_sig_info(SIGBUS, &info, tsk); 221 222 /* Kernel mode? Handle exceptions or die */ 223 if (!user_mode(regs)) 224 goto no_context; 225 } 226 227 #ifdef CONFIG_SH_STORE_QUEUES 228 /* 229 * This is a special case for the SH-4 store queues, as pages for this 230 * space still need to be faulted in before it's possible to flush the 231 * store queue cache for writeout to the remapped region. 232 */ 233 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) 234 #else 235 #define P3_ADDR_MAX P4SEG 236 #endif 237 238 /* 239 * Called with interrupts disabled. 240 */ 241 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, 242 unsigned long writeaccess, 243 unsigned long address) 244 { 245 pgd_t *pgd; 246 pud_t *pud; 247 pmd_t *pmd; 248 pte_t *pte; 249 pte_t entry; 250 struct mm_struct *mm = current->mm; 251 spinlock_t *ptl = NULL; 252 int ret = 1; 253 254 #ifdef CONFIG_SH_KGDB 255 if (kgdb_nofault && kgdb_bus_err_hook) 256 kgdb_bus_err_hook(); 257 #endif 258 259 /* 260 * We don't take page faults for P1, P2, and parts of P4, these 261 * are always mapped, whether it be due to legacy behaviour in 262 * 29-bit mode, or due to PMB configuration in 32-bit mode. 263 */ 264 if (address >= P3SEG && address < P3_ADDR_MAX) { 265 pgd = pgd_offset_k(address); 266 mm = NULL; 267 } else { 268 if (unlikely(address >= TASK_SIZE || !mm)) 269 return 1; 270 271 pgd = pgd_offset(mm, address); 272 } 273 274 pud = pud_offset(pgd, address); 275 if (pud_none_or_clear_bad(pud)) 276 return 1; 277 pmd = pmd_offset(pud, address); 278 if (pmd_none_or_clear_bad(pmd)) 279 return 1; 280 281 if (mm) 282 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 283 else 284 pte = pte_offset_kernel(pmd, address); 285 286 entry = *pte; 287 if (unlikely(pte_none(entry) || pte_not_present(entry))) 288 goto unlock; 289 if (unlikely(writeaccess && !pte_write(entry))) 290 goto unlock; 291 292 if (writeaccess) 293 entry = pte_mkdirty(entry); 294 entry = pte_mkyoung(entry); 295 296 #ifdef CONFIG_CPU_SH4 297 /* 298 * ITLB is not affected by "ldtlb" instruction. 299 * So, we need to flush the entry by ourselves. 300 */ 301 local_flush_tlb_one(get_asid(), address & PAGE_MASK); 302 #endif 303 304 set_pte(pte, entry); 305 update_mmu_cache(NULL, address, entry); 306 ret = 0; 307 unlock: 308 if (mm) 309 pte_unmap_unlock(pte, ptl); 310 return ret; 311 } 312