1 /* 2 * Page fault handler for SH with an MMU. 3 * 4 * Copyright (C) 1999 Niibe Yutaka 5 * Copyright (C) 2003 Paul Mundt 6 * 7 * Based on linux/arch/i386/mm/fault.c: 8 * Copyright (C) 1995 Linus Torvalds 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file "COPYING" in the main directory of this archive 12 * for more details. 13 */ 14 #include <linux/kernel.h> 15 #include <linux/mm.h> 16 #include <linux/hardirq.h> 17 #include <linux/kprobes.h> 18 #include <asm/system.h> 19 #include <asm/mmu_context.h> 20 #include <asm/tlbflush.h> 21 #include <asm/kgdb.h> 22 23 extern void die(const char *,struct pt_regs *,long); 24 25 /* 26 * This routine handles page faults. It determines the address, 27 * and the problem, and then passes it off to one of the appropriate 28 * routines. 29 */ 30 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, 31 unsigned long writeaccess, 32 unsigned long address) 33 { 34 struct task_struct *tsk; 35 struct mm_struct *mm; 36 struct vm_area_struct * vma; 37 unsigned long page; 38 int si_code; 39 siginfo_t info; 40 41 trace_hardirqs_on(); 42 local_irq_enable(); 43 44 #ifdef CONFIG_SH_KGDB 45 if (kgdb_nofault && kgdb_bus_err_hook) 46 kgdb_bus_err_hook(); 47 #endif 48 49 tsk = current; 50 mm = tsk->mm; 51 si_code = SEGV_MAPERR; 52 53 if (unlikely(address >= TASK_SIZE)) { 54 /* 55 * Synchronize this task's top level page-table 56 * with the 'reference' page table. 57 * 58 * Do _not_ use "tsk" here. We might be inside 59 * an interrupt in the middle of a task switch.. 60 */ 61 int offset = pgd_index(address); 62 pgd_t *pgd, *pgd_k; 63 pud_t *pud, *pud_k; 64 pmd_t *pmd, *pmd_k; 65 66 pgd = get_TTB() + offset; 67 pgd_k = swapper_pg_dir + offset; 68 69 /* This will never happen with the folded page table. */ 70 if (!pgd_present(*pgd)) { 71 if (!pgd_present(*pgd_k)) 72 goto bad_area_nosemaphore; 73 set_pgd(pgd, *pgd_k); 74 return; 75 } 76 77 pud = pud_offset(pgd, address); 78 pud_k = pud_offset(pgd_k, address); 79 if (pud_present(*pud) || !pud_present(*pud_k)) 80 goto bad_area_nosemaphore; 81 set_pud(pud, *pud_k); 82 83 pmd = pmd_offset(pud, address); 84 pmd_k = pmd_offset(pud_k, address); 85 if (pmd_present(*pmd) || !pmd_present(*pmd_k)) 86 goto bad_area_nosemaphore; 87 set_pmd(pmd, *pmd_k); 88 89 return; 90 } 91 92 /* 93 * If we're in an interrupt or have no user 94 * context, we must not take the fault.. 95 */ 96 if (in_atomic() || !mm) 97 goto no_context; 98 99 down_read(&mm->mmap_sem); 100 101 vma = find_vma(mm, address); 102 if (!vma) 103 goto bad_area; 104 if (vma->vm_start <= address) 105 goto good_area; 106 if (!(vma->vm_flags & VM_GROWSDOWN)) 107 goto bad_area; 108 if (expand_stack(vma, address)) 109 goto bad_area; 110 /* 111 * Ok, we have a good vm_area for this memory access, so 112 * we can handle it.. 113 */ 114 good_area: 115 si_code = SEGV_ACCERR; 116 if (writeaccess) { 117 if (!(vma->vm_flags & VM_WRITE)) 118 goto bad_area; 119 } else { 120 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 121 goto bad_area; 122 } 123 124 /* 125 * If for any reason at all we couldn't handle the fault, 126 * make sure we exit gracefully rather than endlessly redo 127 * the fault. 128 */ 129 survive: 130 switch (handle_mm_fault(mm, vma, address, writeaccess)) { 131 case VM_FAULT_MINOR: 132 tsk->min_flt++; 133 break; 134 case VM_FAULT_MAJOR: 135 tsk->maj_flt++; 136 break; 137 case VM_FAULT_SIGBUS: 138 goto do_sigbus; 139 case VM_FAULT_OOM: 140 goto out_of_memory; 141 default: 142 BUG(); 143 } 144 145 up_read(&mm->mmap_sem); 146 return; 147 148 /* 149 * Something tried to access memory that isn't in our memory map.. 150 * Fix it, but check if it's kernel or user first.. 151 */ 152 bad_area: 153 up_read(&mm->mmap_sem); 154 155 bad_area_nosemaphore: 156 if (user_mode(regs)) { 157 info.si_signo = SIGSEGV; 158 info.si_errno = 0; 159 info.si_code = si_code; 160 info.si_addr = (void *) address; 161 force_sig_info(SIGSEGV, &info, tsk); 162 return; 163 } 164 165 no_context: 166 /* Are we prepared to handle this kernel fault? */ 167 if (fixup_exception(regs)) 168 return; 169 170 /* 171 * Oops. The kernel tried to access some bad page. We'll have to 172 * terminate things with extreme prejudice. 173 * 174 */ 175 if (address < PAGE_SIZE) 176 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); 177 else 178 printk(KERN_ALERT "Unable to handle kernel paging request"); 179 printk(" at virtual address %08lx\n", address); 180 printk(KERN_ALERT "pc = %08lx\n", regs->pc); 181 page = (unsigned long)get_TTB(); 182 if (page) { 183 page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; 184 printk(KERN_ALERT "*pde = %08lx\n", page); 185 if (page & _PAGE_PRESENT) { 186 page &= PAGE_MASK; 187 address &= 0x003ff000; 188 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; 189 printk(KERN_ALERT "*pte = %08lx\n", page); 190 } 191 } 192 die("Oops", regs, writeaccess); 193 do_exit(SIGKILL); 194 195 /* 196 * We ran out of memory, or some other thing happened to us that made 197 * us unable to handle the page fault gracefully. 198 */ 199 out_of_memory: 200 up_read(&mm->mmap_sem); 201 if (is_init(current)) { 202 yield(); 203 down_read(&mm->mmap_sem); 204 goto survive; 205 } 206 printk("VM: killing process %s\n", tsk->comm); 207 if (user_mode(regs)) 208 do_exit(SIGKILL); 209 goto no_context; 210 211 do_sigbus: 212 up_read(&mm->mmap_sem); 213 214 /* 215 * Send a sigbus, regardless of whether we were in kernel 216 * or user mode. 217 */ 218 info.si_signo = SIGBUS; 219 info.si_errno = 0; 220 info.si_code = BUS_ADRERR; 221 info.si_addr = (void *)address; 222 force_sig_info(SIGBUS, &info, tsk); 223 224 /* Kernel mode? Handle exceptions or die */ 225 if (!user_mode(regs)) 226 goto no_context; 227 } 228 229 #ifdef CONFIG_SH_STORE_QUEUES 230 /* 231 * This is a special case for the SH-4 store queues, as pages for this 232 * space still need to be faulted in before it's possible to flush the 233 * store queue cache for writeout to the remapped region. 234 */ 235 #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) 236 #else 237 #define P3_ADDR_MAX P4SEG 238 #endif 239 240 /* 241 * Called with interrupts disabled. 242 */ 243 asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, 244 unsigned long writeaccess, 245 unsigned long address) 246 { 247 pgd_t *pgd; 248 pud_t *pud; 249 pmd_t *pmd; 250 pte_t *pte; 251 pte_t entry; 252 struct mm_struct *mm = current->mm; 253 spinlock_t *ptl; 254 int ret = 1; 255 256 #ifdef CONFIG_SH_KGDB 257 if (kgdb_nofault && kgdb_bus_err_hook) 258 kgdb_bus_err_hook(); 259 #endif 260 261 /* 262 * We don't take page faults for P1, P2, and parts of P4, these 263 * are always mapped, whether it be due to legacy behaviour in 264 * 29-bit mode, or due to PMB configuration in 32-bit mode. 265 */ 266 if (address >= P3SEG && address < P3_ADDR_MAX) { 267 pgd = pgd_offset_k(address); 268 mm = NULL; 269 } else { 270 if (unlikely(address >= TASK_SIZE || !mm)) 271 return 1; 272 273 pgd = pgd_offset(mm, address); 274 } 275 276 pud = pud_offset(pgd, address); 277 if (pud_none_or_clear_bad(pud)) 278 return 1; 279 pmd = pmd_offset(pud, address); 280 if (pmd_none_or_clear_bad(pmd)) 281 return 1; 282 283 if (mm) 284 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 285 else 286 pte = pte_offset_kernel(pmd, address); 287 288 entry = *pte; 289 if (unlikely(pte_none(entry) || pte_not_present(entry))) 290 goto unlock; 291 if (unlikely(writeaccess && !pte_write(entry))) 292 goto unlock; 293 294 if (writeaccess) 295 entry = pte_mkdirty(entry); 296 entry = pte_mkyoung(entry); 297 298 #ifdef CONFIG_CPU_SH4 299 /* 300 * ITLB is not affected by "ldtlb" instruction. 301 * So, we need to flush the entry by ourselves. 302 */ 303 local_flush_tlb_one(get_asid(), address & PAGE_MASK); 304 #endif 305 306 set_pte(pte, entry); 307 update_mmu_cache(NULL, address, entry); 308 ret = 0; 309 unlock: 310 if (mm) 311 pte_unmap_unlock(pte, ptl); 312 return ret; 313 } 314