xref: /openbmc/linux/arch/xtensa/mm/tlb.c (revision 12eb4683)
1 /*
2  * arch/xtensa/mm/tlb.c
3  *
4  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2003 Tensilica Inc.
11  *
12  * Joe Taylor
13  * Chris Zankel	<chris@zankel.net>
14  * Marc Gauthier
15  */
16 
17 #include <linux/mm.h>
18 #include <asm/processor.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cacheflush.h>
22 
23 
24 static inline void __flush_itlb_all (void)
25 {
26 	int w, i;
27 
28 	for (w = 0; w < ITLB_ARF_WAYS; w++) {
29 		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30 			int e = w + (i << PAGE_SHIFT);
31 			invalidate_itlb_entry_no_isync(e);
32 		}
33 	}
34 	asm volatile ("isync\n");
35 }
36 
37 static inline void __flush_dtlb_all (void)
38 {
39 	int w, i;
40 
41 	for (w = 0; w < DTLB_ARF_WAYS; w++) {
42 		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43 			int e = w + (i << PAGE_SHIFT);
44 			invalidate_dtlb_entry_no_isync(e);
45 		}
46 	}
47 	asm volatile ("isync\n");
48 }
49 
50 
51 void flush_tlb_all (void)
52 {
53 	__flush_itlb_all();
54 	__flush_dtlb_all();
55 }
56 
57 /* If mm is current, we simply assign the current task a new ASID, thus,
58  * invalidating all previous tlb entries. If mm is someone else's user mapping,
59  * wie invalidate the context, thus, when that user mapping is swapped in,
60  * a new context will be assigned to it.
61  */
62 
63 void flush_tlb_mm(struct mm_struct *mm)
64 {
65 	if (mm == current->active_mm) {
66 		unsigned long flags;
67 		local_irq_save(flags);
68 		__get_new_mmu_context(mm);
69 		__load_mmu_context(mm);
70 		local_irq_restore(flags);
71 	}
72 	else
73 		mm->context = 0;
74 }
75 
76 #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
77 #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
78 #if _ITLB_ENTRIES > _DTLB_ENTRIES
79 # define _TLB_ENTRIES _ITLB_ENTRIES
80 #else
81 # define _TLB_ENTRIES _DTLB_ENTRIES
82 #endif
83 
84 void flush_tlb_range (struct vm_area_struct *vma,
85 		      unsigned long start, unsigned long end)
86 {
87 	struct mm_struct *mm = vma->vm_mm;
88 	unsigned long flags;
89 
90 	if (mm->context == NO_CONTEXT)
91 		return;
92 
93 #if 0
94 	printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
95 			(unsigned long)mm->context, start, end);
96 #endif
97 	local_irq_save(flags);
98 
99 	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
100 		int oldpid = get_rasid_register();
101 		set_rasid_register (ASID_INSERT(mm->context));
102 		start &= PAGE_MASK;
103 		if (vma->vm_flags & VM_EXEC)
104 			while(start < end) {
105 				invalidate_itlb_mapping(start);
106 				invalidate_dtlb_mapping(start);
107 				start += PAGE_SIZE;
108 			}
109 		else
110 			while(start < end) {
111 				invalidate_dtlb_mapping(start);
112 				start += PAGE_SIZE;
113 			}
114 
115 		set_rasid_register(oldpid);
116 	} else {
117 		flush_tlb_mm(mm);
118 	}
119 	local_irq_restore(flags);
120 }
121 
122 void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
123 {
124 	struct mm_struct* mm = vma->vm_mm;
125 	unsigned long flags;
126 	int oldpid;
127 
128 	if(mm->context == NO_CONTEXT)
129 		return;
130 
131 	local_irq_save(flags);
132 
133 	oldpid = get_rasid_register();
134 	set_rasid_register(ASID_INSERT(mm->context));
135 
136 	if (vma->vm_flags & VM_EXEC)
137 		invalidate_itlb_mapping(page);
138 	invalidate_dtlb_mapping(page);
139 
140 	set_rasid_register(oldpid);
141 
142 	local_irq_restore(flags);
143 }
144 
145 #ifdef CONFIG_DEBUG_TLB_SANITY
146 
147 static unsigned get_pte_for_vaddr(unsigned vaddr)
148 {
149 	struct task_struct *task = get_current();
150 	struct mm_struct *mm = task->mm;
151 	pgd_t *pgd;
152 	pmd_t *pmd;
153 	pte_t *pte;
154 
155 	if (!mm)
156 		mm = task->active_mm;
157 	pgd = pgd_offset(mm, vaddr);
158 	if (pgd_none_or_clear_bad(pgd))
159 		return 0;
160 	pmd = pmd_offset(pgd, vaddr);
161 	if (pmd_none_or_clear_bad(pmd))
162 		return 0;
163 	pte = pte_offset_map(pmd, vaddr);
164 	if (!pte)
165 		return 0;
166 	return pte_val(*pte);
167 }
168 
169 enum {
170 	TLB_SUSPICIOUS	= 1,
171 	TLB_INSANE	= 2,
172 };
173 
174 static void tlb_insane(void)
175 {
176 	BUG_ON(1);
177 }
178 
179 static void tlb_suspicious(void)
180 {
181 	WARN_ON(1);
182 }
183 
184 /*
185  * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
186  * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
187  *
188  * Check that valid TLB entries either have the same PA as the PTE, or PTE is
189  * marked as non-present. Non-present PTE and the page with non-zero refcount
190  * and zero mapcount is normal for batched TLB flush operation. Zero refcount
191  * means that the page was freed prematurely. Non-zero mapcount is unusual,
192  * but does not necessary means an error, thus marked as suspicious.
193  */
194 static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
195 {
196 	unsigned tlbidx = w | (e << PAGE_SHIFT);
197 	unsigned r0 = dtlb ?
198 		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
199 	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
200 	unsigned pte = get_pte_for_vaddr(vpn);
201 	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
202 	unsigned tlb_asid = r0 & ASID_MASK;
203 	bool kernel = tlb_asid == 1;
204 	int rc = 0;
205 
206 	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
207 		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
208 				dtlb ? 'D' : 'I', w, e, vpn,
209 				kernel ? "kernel" : "user");
210 		rc |= TLB_INSANE;
211 	}
212 
213 	if (tlb_asid == mm_asid) {
214 		unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
215 			read_itlb_translation(tlbidx);
216 		if ((pte ^ r1) & PAGE_MASK) {
217 			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
218 					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
219 			if (pte == 0 || !pte_present(__pte(pte))) {
220 				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
221 				pr_err("page refcount: %d, mapcount: %d\n",
222 						page_count(p),
223 						page_mapcount(p));
224 				if (!page_count(p))
225 					rc |= TLB_INSANE;
226 				else if (page_mapped(p))
227 					rc |= TLB_SUSPICIOUS;
228 			} else {
229 				rc |= TLB_INSANE;
230 			}
231 		}
232 	}
233 	return rc;
234 }
235 
236 void check_tlb_sanity(void)
237 {
238 	unsigned long flags;
239 	unsigned w, e;
240 	int bug = 0;
241 
242 	local_irq_save(flags);
243 	for (w = 0; w < DTLB_ARF_WAYS; ++w)
244 		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
245 			bug |= check_tlb_entry(w, e, true);
246 	for (w = 0; w < ITLB_ARF_WAYS; ++w)
247 		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
248 			bug |= check_tlb_entry(w, e, false);
249 	if (bug & TLB_INSANE)
250 		tlb_insane();
251 	if (bug & TLB_SUSPICIOUS)
252 		tlb_suspicious();
253 	local_irq_restore(flags);
254 }
255 
256 #endif /* CONFIG_DEBUG_TLB_SANITY */
257