xref: /openbmc/linux/arch/arm/mm/fault-armv.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /*
2  *  linux/arch/arm/mm/fault-armv.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Modifications for ARM processor (c) 1995-2002 Russell King
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 
20 #include <asm/cacheflush.h>
21 #include <asm/pgtable.h>
22 #include <asm/tlbflush.h>
23 
24 static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
25 
26 /*
27  * We take the easy way out of this problem - we make the
28  * PTE uncacheable.  However, we leave the write buffer on.
29  *
30  * Note that the pte lock held when calling update_mmu_cache must also
31  * guard the pte (somewhere else in the same mm) that we modify here.
32  * Therefore those configurations which might call adjust_pte (those
33  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
34  */
35 static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
36 {
37 	pgd_t *pgd;
38 	pmd_t *pmd;
39 	pte_t *pte, entry;
40 	int ret;
41 
42 	pgd = pgd_offset(vma->vm_mm, address);
43 	if (pgd_none(*pgd))
44 		goto no_pgd;
45 	if (pgd_bad(*pgd))
46 		goto bad_pgd;
47 
48 	pmd = pmd_offset(pgd, address);
49 	if (pmd_none(*pmd))
50 		goto no_pmd;
51 	if (pmd_bad(*pmd))
52 		goto bad_pmd;
53 
54 	pte = pte_offset_map(pmd, address);
55 	entry = *pte;
56 
57 	/*
58 	 * If this page is present, it's actually being shared.
59 	 */
60 	ret = pte_present(entry);
61 
62 	/*
63 	 * If this page isn't present, or is already setup to
64 	 * fault (ie, is old), we can safely ignore any issues.
65 	 */
66 	if (ret && pte_val(entry) & shared_pte_mask) {
67 		flush_cache_page(vma, address, pte_pfn(entry));
68 		pte_val(entry) &= ~shared_pte_mask;
69 		set_pte_at(vma->vm_mm, address, pte, entry);
70 		flush_tlb_page(vma, address);
71 	}
72 	pte_unmap(pte);
73 	return ret;
74 
75 bad_pgd:
76 	pgd_ERROR(*pgd);
77 	pgd_clear(pgd);
78 no_pgd:
79 	return 0;
80 
81 bad_pmd:
82 	pmd_ERROR(*pmd);
83 	pmd_clear(pmd);
84 no_pmd:
85 	return 0;
86 }
87 
88 static void
89 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
90 {
91 	struct mm_struct *mm = vma->vm_mm;
92 	struct vm_area_struct *mpnt;
93 	struct prio_tree_iter iter;
94 	unsigned long offset;
95 	pgoff_t pgoff;
96 	int aliases = 0;
97 
98 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
99 
100 	/*
101 	 * If we have any shared mappings that are in the same mm
102 	 * space, then we need to handle them specially to maintain
103 	 * cache coherency.
104 	 */
105 	flush_dcache_mmap_lock(mapping);
106 	vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
107 		/*
108 		 * If this VMA is not in our MM, we can ignore it.
109 		 * Note that we intentionally mask out the VMA
110 		 * that we are fixing up.
111 		 */
112 		if (mpnt->vm_mm != mm || mpnt == vma)
113 			continue;
114 		if (!(mpnt->vm_flags & VM_MAYSHARE))
115 			continue;
116 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
117 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
118 	}
119 	flush_dcache_mmap_unlock(mapping);
120 	if (aliases)
121 		adjust_pte(vma, addr);
122 	else
123 		flush_cache_page(vma, addr, pfn);
124 }
125 
126 /*
127  * Take care of architecture specific things when placing a new PTE into
128  * a page table, or changing an existing PTE.  Basically, there are two
129  * things that we need to take care of:
130  *
131  *  1. If PG_dcache_dirty is set for the page, we need to ensure
132  *     that any cache entries for the kernels virtual memory
133  *     range are written back to the page.
134  *  2. If we have multiple shared mappings of the same space in
135  *     an object, we need to deal with the cache aliasing issues.
136  *
137  * Note that the pte lock will be held.
138  */
139 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
140 {
141 	unsigned long pfn = pte_pfn(pte);
142 	struct address_space *mapping;
143 	struct page *page;
144 
145 	if (!pfn_valid(pfn))
146 		return;
147 
148 	page = pfn_to_page(pfn);
149 	mapping = page_mapping(page);
150 	if (mapping) {
151 #ifndef CONFIG_SMP
152 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
153 
154 		if (dirty)
155 			__flush_dcache_page(mapping, page);
156 #endif
157 
158 		if (cache_is_vivt())
159 			make_coherent(mapping, vma, addr, pfn);
160 		else if (vma->vm_flags & VM_EXEC)
161 			__flush_icache_all();
162 	}
163 }
164 
165 /*
166  * Check whether the write buffer has physical address aliasing
167  * issues.  If it has, we need to avoid them for the case where
168  * we have several shared mappings of the same object in user
169  * space.
170  */
171 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
172 {
173 	register unsigned long zero = 0, one = 1, val;
174 
175 	local_irq_disable();
176 	mb();
177 	*p1 = one;
178 	mb();
179 	*p2 = zero;
180 	mb();
181 	val = *p1;
182 	mb();
183 	local_irq_enable();
184 	return val != zero;
185 }
186 
187 void __init check_writebuffer_bugs(void)
188 {
189 	struct page *page;
190 	const char *reason;
191 	unsigned long v = 1;
192 
193 	printk(KERN_INFO "CPU: Testing write buffer coherency: ");
194 
195 	page = alloc_page(GFP_KERNEL);
196 	if (page) {
197 		unsigned long *p1, *p2;
198 		pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
199 					 L_PTE_DIRTY|L_PTE_WRITE|
200 					 L_PTE_BUFFERABLE);
201 
202 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
203 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
204 
205 		if (p1 && p2) {
206 			v = check_writebuffer(p1, p2);
207 			reason = "enabling work-around";
208 		} else {
209 			reason = "unable to map memory\n";
210 		}
211 
212 		vunmap(p1);
213 		vunmap(p2);
214 		put_page(page);
215 	} else {
216 		reason = "unable to grab page\n";
217 	}
218 
219 	if (v) {
220 		printk("failed, %s\n", reason);
221 		shared_pte_mask |= L_PTE_BUFFERABLE;
222 	} else {
223 		printk("ok\n");
224 	}
225 }
226