1 /* 2 * linux/arch/arm/mm/fault-armv.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * Modifications for ARM processor (c) 1995-2002 Russell King 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/kernel.h> 14 #include <linux/mm.h> 15 #include <linux/bitops.h> 16 #include <linux/vmalloc.h> 17 #include <linux/init.h> 18 #include <linux/pagemap.h> 19 20 #include <asm/bugs.h> 21 #include <asm/cacheflush.h> 22 #include <asm/cachetype.h> 23 #include <asm/pgtable.h> 24 #include <asm/tlbflush.h> 25 26 static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 27 28 /* 29 * We take the easy way out of this problem - we make the 30 * PTE uncacheable. However, we leave the write buffer on. 31 * 32 * Note that the pte lock held when calling update_mmu_cache must also 33 * guard the pte (somewhere else in the same mm) that we modify here. 34 * Therefore those configurations which might call adjust_pte (those 35 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. 36 */ 37 static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 38 { 39 pgd_t *pgd; 40 pmd_t *pmd; 41 pte_t *pte, entry; 42 int ret; 43 44 pgd = pgd_offset(vma->vm_mm, address); 45 if (pgd_none(*pgd)) 46 goto no_pgd; 47 if (pgd_bad(*pgd)) 48 goto bad_pgd; 49 50 pmd = pmd_offset(pgd, address); 51 if (pmd_none(*pmd)) 52 goto no_pmd; 53 if (pmd_bad(*pmd)) 54 goto bad_pmd; 55 56 pte = pte_offset_map(pmd, address); 57 entry = *pte; 58 59 /* 60 * If this page is present, it's actually being shared. 61 */ 62 ret = pte_present(entry); 63 64 /* 65 * If this page isn't present, or is already setup to 66 * fault (ie, is old), we can safely ignore any issues. 67 */ 68 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { 69 unsigned long pfn = pte_pfn(entry); 70 flush_cache_page(vma, address, pfn); 71 outer_flush_range((pfn << PAGE_SHIFT), 72 (pfn << PAGE_SHIFT) + PAGE_SIZE); 73 pte_val(entry) &= ~L_PTE_MT_MASK; 74 pte_val(entry) |= shared_pte_mask; 75 set_pte_at(vma->vm_mm, address, pte, entry); 76 flush_tlb_page(vma, address); 77 } 78 pte_unmap(pte); 79 return ret; 80 81 bad_pgd: 82 pgd_ERROR(*pgd); 83 pgd_clear(pgd); 84 no_pgd: 85 return 0; 86 87 bad_pmd: 88 pmd_ERROR(*pmd); 89 pmd_clear(pmd); 90 no_pmd: 91 return 0; 92 } 93 94 static void 95 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 96 { 97 struct mm_struct *mm = vma->vm_mm; 98 struct vm_area_struct *mpnt; 99 struct prio_tree_iter iter; 100 unsigned long offset; 101 pgoff_t pgoff; 102 int aliases = 0; 103 104 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); 105 106 /* 107 * If we have any shared mappings that are in the same mm 108 * space, then we need to handle them specially to maintain 109 * cache coherency. 110 */ 111 flush_dcache_mmap_lock(mapping); 112 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { 113 /* 114 * If this VMA is not in our MM, we can ignore it. 115 * Note that we intentionally mask out the VMA 116 * that we are fixing up. 117 */ 118 if (mpnt->vm_mm != mm || mpnt == vma) 119 continue; 120 if (!(mpnt->vm_flags & VM_MAYSHARE)) 121 continue; 122 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 123 aliases += adjust_pte(mpnt, mpnt->vm_start + offset); 124 } 125 flush_dcache_mmap_unlock(mapping); 126 if (aliases) 127 adjust_pte(vma, addr); 128 else 129 flush_cache_page(vma, addr, pfn); 130 } 131 132 /* 133 * Take care of architecture specific things when placing a new PTE into 134 * a page table, or changing an existing PTE. Basically, there are two 135 * things that we need to take care of: 136 * 137 * 1. If PG_dcache_dirty is set for the page, we need to ensure 138 * that any cache entries for the kernels virtual memory 139 * range are written back to the page. 140 * 2. If we have multiple shared mappings of the same space in 141 * an object, we need to deal with the cache aliasing issues. 142 * 143 * Note that the pte lock will be held. 144 */ 145 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 146 { 147 unsigned long pfn = pte_pfn(pte); 148 struct address_space *mapping; 149 struct page *page; 150 151 if (!pfn_valid(pfn)) 152 return; 153 154 page = pfn_to_page(pfn); 155 mapping = page_mapping(page); 156 if (mapping) { 157 #ifndef CONFIG_SMP 158 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); 159 160 if (dirty) 161 __flush_dcache_page(mapping, page); 162 #endif 163 164 if (cache_is_vivt()) 165 make_coherent(mapping, vma, addr, pfn); 166 else if (vma->vm_flags & VM_EXEC) 167 __flush_icache_all(); 168 } 169 } 170 171 /* 172 * Check whether the write buffer has physical address aliasing 173 * issues. If it has, we need to avoid them for the case where 174 * we have several shared mappings of the same object in user 175 * space. 176 */ 177 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) 178 { 179 register unsigned long zero = 0, one = 1, val; 180 181 local_irq_disable(); 182 mb(); 183 *p1 = one; 184 mb(); 185 *p2 = zero; 186 mb(); 187 val = *p1; 188 mb(); 189 local_irq_enable(); 190 return val != zero; 191 } 192 193 void __init check_writebuffer_bugs(void) 194 { 195 struct page *page; 196 const char *reason; 197 unsigned long v = 1; 198 199 printk(KERN_INFO "CPU: Testing write buffer coherency: "); 200 201 page = alloc_page(GFP_KERNEL); 202 if (page) { 203 unsigned long *p1, *p2; 204 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| 205 L_PTE_DIRTY|L_PTE_WRITE| 206 L_PTE_MT_BUFFERABLE); 207 208 p1 = vmap(&page, 1, VM_IOREMAP, prot); 209 p2 = vmap(&page, 1, VM_IOREMAP, prot); 210 211 if (p1 && p2) { 212 v = check_writebuffer(p1, p2); 213 reason = "enabling work-around"; 214 } else { 215 reason = "unable to map memory\n"; 216 } 217 218 vunmap(p1); 219 vunmap(p2); 220 put_page(page); 221 } else { 222 reason = "unable to grab page\n"; 223 } 224 225 if (v) { 226 printk("failed, %s\n", reason); 227 shared_pte_mask = L_PTE_MT_UNCACHED; 228 } else { 229 printk("ok\n"); 230 } 231 } 232