1 /* 2 * This file contains common routines for dealing with free of page tables 3 * Along with common page table handling code 4 * 5 * Derived from arch/powerpc/mm/tlb_64.c: 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * 8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 9 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 10 * Copyright (C) 1996 Paul Mackerras 11 * 12 * Derived from "arch/i386/mm/init.c" 13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 14 * 15 * Dave Engebretsen <engebret@us.ibm.com> 16 * Rework for PPC64 port. 17 * 18 * This program is free software; you can redistribute it and/or 19 * modify it under the terms of the GNU General Public License 20 * as published by the Free Software Foundation; either version 21 * 2 of the License, or (at your option) any later version. 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/gfp.h> 26 #include <linux/mm.h> 27 #include <linux/percpu.h> 28 #include <linux/hardirq.h> 29 #include <linux/hugetlb.h> 30 #include <asm/pgalloc.h> 31 #include <asm/tlbflush.h> 32 #include <asm/tlb.h> 33 34 static inline int is_exec_fault(void) 35 { 36 return current->thread.regs && TRAP(current->thread.regs) == 0x400; 37 } 38 39 /* We only try to do i/d cache coherency on stuff that looks like 40 * reasonably "normal" PTEs. We currently require a PTE to be present 41 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that 42 * on userspace PTEs 43 */ 44 static inline int pte_looks_normal(pte_t pte) 45 { 46 47 if (pte_present(pte) && !pte_special(pte)) { 48 if (pte_ci(pte)) 49 return 0; 50 if (pte_user(pte)) 51 return 1; 52 } 53 return 0; 54 } 55 56 static struct page *maybe_pte_to_page(pte_t pte) 57 { 58 unsigned long pfn = pte_pfn(pte); 59 struct page *page; 60 61 if (unlikely(!pfn_valid(pfn))) 62 return NULL; 63 page = pfn_to_page(pfn); 64 if (PageReserved(page)) 65 return NULL; 66 return page; 67 } 68 69 #ifdef CONFIG_PPC_BOOK3S 70 71 /* Server-style MMU handles coherency when hashing if HW exec permission 72 * is supposed per page (currently 64-bit only). If not, then, we always 73 * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec 74 * support falls into the same category. 75 */ 76 77 static pte_t set_pte_filter(pte_t pte) 78 { 79 if (radix_enabled()) 80 return pte; 81 82 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 83 if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || 84 cpu_has_feature(CPU_FTR_NOEXECUTE))) { 85 struct page *pg = maybe_pte_to_page(pte); 86 if (!pg) 87 return pte; 88 if (!test_bit(PG_arch_1, &pg->flags)) { 89 flush_dcache_icache_page(pg); 90 set_bit(PG_arch_1, &pg->flags); 91 } 92 } 93 return pte; 94 } 95 96 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, 97 int dirty) 98 { 99 return pte; 100 } 101 102 #else /* CONFIG_PPC_BOOK3S */ 103 104 /* Embedded type MMU with HW exec support. This is a bit more complicated 105 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so 106 * instead we "filter out" the exec permission for non clean pages. 107 */ 108 static pte_t set_pte_filter(pte_t pte) 109 { 110 struct page *pg; 111 112 /* No exec permission in the first place, move on */ 113 if (!pte_exec(pte) || !pte_looks_normal(pte)) 114 return pte; 115 116 /* If you set _PAGE_EXEC on weird pages you're on your own */ 117 pg = maybe_pte_to_page(pte); 118 if (unlikely(!pg)) 119 return pte; 120 121 /* If the page clean, we move on */ 122 if (test_bit(PG_arch_1, &pg->flags)) 123 return pte; 124 125 /* If it's an exec fault, we flush the cache and make it clean */ 126 if (is_exec_fault()) { 127 flush_dcache_icache_page(pg); 128 set_bit(PG_arch_1, &pg->flags); 129 return pte; 130 } 131 132 /* Else, we filter out _PAGE_EXEC */ 133 return pte_exprotect(pte); 134 } 135 136 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, 137 int dirty) 138 { 139 struct page *pg; 140 141 /* So here, we only care about exec faults, as we use them 142 * to recover lost _PAGE_EXEC and perform I$/D$ coherency 143 * if necessary. Also if _PAGE_EXEC is already set, same deal, 144 * we just bail out 145 */ 146 if (dirty || pte_exec(pte) || !is_exec_fault()) 147 return pte; 148 149 #ifdef CONFIG_DEBUG_VM 150 /* So this is an exec fault, _PAGE_EXEC is not set. If it was 151 * an error we would have bailed out earlier in do_page_fault() 152 * but let's make sure of it 153 */ 154 if (WARN_ON(!(vma->vm_flags & VM_EXEC))) 155 return pte; 156 #endif /* CONFIG_DEBUG_VM */ 157 158 /* If you set _PAGE_EXEC on weird pages you're on your own */ 159 pg = maybe_pte_to_page(pte); 160 if (unlikely(!pg)) 161 goto bail; 162 163 /* If the page is already clean, we move on */ 164 if (test_bit(PG_arch_1, &pg->flags)) 165 goto bail; 166 167 /* Clean the page and set PG_arch_1 */ 168 flush_dcache_icache_page(pg); 169 set_bit(PG_arch_1, &pg->flags); 170 171 bail: 172 return pte_mkexec(pte); 173 } 174 175 #endif /* CONFIG_PPC_BOOK3S */ 176 177 /* 178 * set_pte stores a linux PTE into the linux page table. 179 */ 180 void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 181 pte_t pte) 182 { 183 /* 184 * Make sure hardware valid bit is not set. We don't do 185 * tlb flush for this update. 186 */ 187 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep)); 188 189 /* Add the pte bit when trying to set a pte */ 190 pte = pte_mkpte(pte); 191 192 /* Note: mm->context.id might not yet have been assigned as 193 * this context might not have been activated yet when this 194 * is called. 195 */ 196 pte = set_pte_filter(pte); 197 198 /* Perform the setting of the PTE */ 199 __set_pte_at(mm, addr, ptep, pte, 0); 200 } 201 202 /* 203 * This is called when relaxing access to a PTE. It's also called in the page 204 * fault path when we don't hit any of the major fault cases, ie, a minor 205 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have 206 * handled those two for us, we additionally deal with missing execute 207 * permission here on some processors 208 */ 209 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 210 pte_t *ptep, pte_t entry, int dirty) 211 { 212 int changed; 213 entry = set_access_flags_filter(entry, vma, dirty); 214 changed = !pte_same(*(ptep), entry); 215 if (changed) { 216 assert_pte_locked(vma->vm_mm, address); 217 __ptep_set_access_flags(vma, ptep, entry, 218 address, mmu_virtual_psize); 219 } 220 return changed; 221 } 222 223 #ifdef CONFIG_HUGETLB_PAGE 224 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, 225 unsigned long addr, pte_t *ptep, 226 pte_t pte, int dirty) 227 { 228 #ifdef HUGETLB_NEED_PRELOAD 229 /* 230 * The "return 1" forces a call of update_mmu_cache, which will write a 231 * TLB entry. Without this, platforms that don't do a write of the TLB 232 * entry in the TLB miss handler asm will fault ad infinitum. 233 */ 234 ptep_set_access_flags(vma, addr, ptep, pte, dirty); 235 return 1; 236 #else 237 int changed, psize; 238 239 pte = set_access_flags_filter(pte, vma, dirty); 240 changed = !pte_same(*(ptep), pte); 241 if (changed) { 242 243 #ifdef CONFIG_PPC_BOOK3S_64 244 struct hstate *h = hstate_vma(vma); 245 246 psize = hstate_get_psize(h); 247 #ifdef CONFIG_DEBUG_VM 248 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); 249 #endif 250 251 #else 252 /* 253 * Not used on non book3s64 platforms. But 8xx 254 * can possibly use tsize derived from hstate. 255 */ 256 psize = 0; 257 #endif 258 __ptep_set_access_flags(vma, ptep, pte, addr, psize); 259 } 260 return changed; 261 #endif 262 } 263 #endif /* CONFIG_HUGETLB_PAGE */ 264 265 #ifdef CONFIG_DEBUG_VM 266 void assert_pte_locked(struct mm_struct *mm, unsigned long addr) 267 { 268 pgd_t *pgd; 269 pud_t *pud; 270 pmd_t *pmd; 271 272 if (mm == &init_mm) 273 return; 274 pgd = mm->pgd + pgd_index(addr); 275 BUG_ON(pgd_none(*pgd)); 276 pud = pud_offset(pgd, addr); 277 BUG_ON(pud_none(*pud)); 278 pmd = pmd_offset(pud, addr); 279 /* 280 * khugepaged to collapse normal pages to hugepage, first set 281 * pmd to none to force page fault/gup to take mmap_sem. After 282 * pmd is set to none, we do a pte_clear which does this assertion 283 * so if we find pmd none, return. 284 */ 285 if (pmd_none(*pmd)) 286 return; 287 BUG_ON(!pmd_present(*pmd)); 288 assert_spin_locked(pte_lockptr(mm, pmd)); 289 } 290 #endif /* CONFIG_DEBUG_VM */ 291 292 unsigned long vmalloc_to_phys(void *va) 293 { 294 unsigned long pfn = vmalloc_to_pfn(va); 295 296 BUG_ON(!pfn); 297 return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); 298 } 299 EXPORT_SYMBOL_GPL(vmalloc_to_phys); 300