xref: /openbmc/linux/arch/arm/mm/fault-armv.c (revision 6db6b729)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/arch/arm/mm/fault-armv.c
4  *
5  *  Copyright (C) 1995  Linus Torvalds
6  *  Modifications for ARM processor (c) 1995-2002 Russell King
7  */
8 #include <linux/sched.h>
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/bitops.h>
12 #include <linux/vmalloc.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/gfp.h>
16 
17 #include <asm/bugs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/tlbflush.h>
21 
22 #include "mm.h"
23 
24 static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
25 
26 #if __LINUX_ARM_ARCH__ < 6
27 /*
28  * We take the easy way out of this problem - we make the
29  * PTE uncacheable.  However, we leave the write buffer on.
30  *
31  * Note that the pte lock held when calling update_mmu_cache must also
32  * guard the pte (somewhere else in the same mm) that we modify here.
33  * Therefore those configurations which might call adjust_pte (those
34  * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
35  */
36 static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
37 	unsigned long pfn, pte_t *ptep)
38 {
39 	pte_t entry = *ptep;
40 	int ret;
41 
42 	/*
43 	 * If this page is present, it's actually being shared.
44 	 */
45 	ret = pte_present(entry);
46 
47 	/*
48 	 * If this page isn't present, or is already setup to
49 	 * fault (ie, is old), we can safely ignore any issues.
50 	 */
51 	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
52 		flush_cache_page(vma, address, pfn);
53 		outer_flush_range((pfn << PAGE_SHIFT),
54 				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
55 		pte_val(entry) &= ~L_PTE_MT_MASK;
56 		pte_val(entry) |= shared_pte_mask;
57 		set_pte_at(vma->vm_mm, address, ptep, entry);
58 		flush_tlb_page(vma, address);
59 	}
60 
61 	return ret;
62 }
63 
64 #if USE_SPLIT_PTE_PTLOCKS
65 /*
66  * If we are using split PTE locks, then we need to take the page
67  * lock here.  Otherwise we are using shared mm->page_table_lock
68  * which is already locked, thus cannot take it.
69  */
70 static inline void do_pte_lock(spinlock_t *ptl)
71 {
72 	/*
73 	 * Use nested version here to indicate that we are already
74 	 * holding one similar spinlock.
75 	 */
76 	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
77 }
78 
79 static inline void do_pte_unlock(spinlock_t *ptl)
80 {
81 	spin_unlock(ptl);
82 }
83 #else /* !USE_SPLIT_PTE_PTLOCKS */
84 static inline void do_pte_lock(spinlock_t *ptl) {}
85 static inline void do_pte_unlock(spinlock_t *ptl) {}
86 #endif /* USE_SPLIT_PTE_PTLOCKS */
87 
88 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
89 	unsigned long pfn)
90 {
91 	spinlock_t *ptl;
92 	pgd_t *pgd;
93 	p4d_t *p4d;
94 	pud_t *pud;
95 	pmd_t *pmd;
96 	pte_t *pte;
97 	int ret;
98 
99 	pgd = pgd_offset(vma->vm_mm, address);
100 	if (pgd_none_or_clear_bad(pgd))
101 		return 0;
102 
103 	p4d = p4d_offset(pgd, address);
104 	if (p4d_none_or_clear_bad(p4d))
105 		return 0;
106 
107 	pud = pud_offset(p4d, address);
108 	if (pud_none_or_clear_bad(pud))
109 		return 0;
110 
111 	pmd = pmd_offset(pud, address);
112 	if (pmd_none_or_clear_bad(pmd))
113 		return 0;
114 
115 	/*
116 	 * This is called while another page table is mapped, so we
117 	 * must use the nested version.  This also means we need to
118 	 * open-code the spin-locking.
119 	 */
120 	pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
121 	if (!pte)
122 		return 0;
123 
124 	do_pte_lock(ptl);
125 
126 	ret = do_adjust_pte(vma, address, pfn, pte);
127 
128 	do_pte_unlock(ptl);
129 	pte_unmap(pte);
130 
131 	return ret;
132 }
133 
134 static void
135 make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
136 	unsigned long addr, pte_t *ptep, unsigned long pfn)
137 {
138 	struct mm_struct *mm = vma->vm_mm;
139 	struct vm_area_struct *mpnt;
140 	unsigned long offset;
141 	pgoff_t pgoff;
142 	int aliases = 0;
143 
144 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
145 
146 	/*
147 	 * If we have any shared mappings that are in the same mm
148 	 * space, then we need to handle them specially to maintain
149 	 * cache coherency.
150 	 */
151 	flush_dcache_mmap_lock(mapping);
152 	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
153 		/*
154 		 * If this VMA is not in our MM, we can ignore it.
155 		 * Note that we intentionally mask out the VMA
156 		 * that we are fixing up.
157 		 */
158 		if (mpnt->vm_mm != mm || mpnt == vma)
159 			continue;
160 		if (!(mpnt->vm_flags & VM_MAYSHARE))
161 			continue;
162 		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
163 		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
164 	}
165 	flush_dcache_mmap_unlock(mapping);
166 	if (aliases)
167 		do_adjust_pte(vma, addr, pfn, ptep);
168 }
169 
170 /*
171  * Take care of architecture specific things when placing a new PTE into
172  * a page table, or changing an existing PTE.  Basically, there are two
173  * things that we need to take care of:
174  *
175  *  1. If PG_dcache_clean is not set for the page, we need to ensure
176  *     that any cache entries for the kernels virtual memory
177  *     range are written back to the page.
178  *  2. If we have multiple shared mappings of the same space in
179  *     an object, we need to deal with the cache aliasing issues.
180  *
181  * Note that the pte lock will be held.
182  */
183 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
184 		unsigned long addr, pte_t *ptep, unsigned int nr)
185 {
186 	unsigned long pfn = pte_pfn(*ptep);
187 	struct address_space *mapping;
188 	struct folio *folio;
189 
190 	if (!pfn_valid(pfn))
191 		return;
192 
193 	/*
194 	 * The zero page is never written to, so never has any dirty
195 	 * cache lines, and therefore never needs to be flushed.
196 	 */
197 	if (is_zero_pfn(pfn))
198 		return;
199 
200 	folio = page_folio(pfn_to_page(pfn));
201 	mapping = folio_flush_mapping(folio);
202 	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
203 		__flush_dcache_folio(mapping, folio);
204 	if (mapping) {
205 		if (cache_is_vivt())
206 			make_coherent(mapping, vma, addr, ptep, pfn);
207 		else if (vma->vm_flags & VM_EXEC)
208 			__flush_icache_all();
209 	}
210 }
211 #endif	/* __LINUX_ARM_ARCH__ < 6 */
212 
213 /*
214  * Check whether the write buffer has physical address aliasing
215  * issues.  If it has, we need to avoid them for the case where
216  * we have several shared mappings of the same object in user
217  * space.
218  */
219 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
220 {
221 	register unsigned long zero = 0, one = 1, val;
222 
223 	local_irq_disable();
224 	mb();
225 	*p1 = one;
226 	mb();
227 	*p2 = zero;
228 	mb();
229 	val = *p1;
230 	mb();
231 	local_irq_enable();
232 	return val != zero;
233 }
234 
235 void __init check_writebuffer_bugs(void)
236 {
237 	struct page *page;
238 	const char *reason;
239 	unsigned long v = 1;
240 
241 	pr_info("CPU: Testing write buffer coherency: ");
242 
243 	page = alloc_page(GFP_KERNEL);
244 	if (page) {
245 		unsigned long *p1, *p2;
246 		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
247 					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
248 
249 		p1 = vmap(&page, 1, VM_IOREMAP, prot);
250 		p2 = vmap(&page, 1, VM_IOREMAP, prot);
251 
252 		if (p1 && p2) {
253 			v = check_writebuffer(p1, p2);
254 			reason = "enabling work-around";
255 		} else {
256 			reason = "unable to map memory\n";
257 		}
258 
259 		vunmap(p1);
260 		vunmap(p2);
261 		put_page(page);
262 	} else {
263 		reason = "unable to grab page\n";
264 	}
265 
266 	if (v) {
267 		pr_cont("failed, %s\n", reason);
268 		shared_pte_mask = L_PTE_MT_UNCACHED;
269 	} else {
270 		pr_cont("ok\n");
271 	}
272 }
273