xref: /openbmc/linux/arch/xtensa/mm/tlb.c (revision f5ee2567)
13f65ce4dSChris Zankel /*
2f30c2269SUwe Zeisberger  * arch/xtensa/mm/tlb.c
33f65ce4dSChris Zankel  *
43f65ce4dSChris Zankel  * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
53f65ce4dSChris Zankel  *
63f65ce4dSChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
73f65ce4dSChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
83f65ce4dSChris Zankel  * for more details.
93f65ce4dSChris Zankel  *
103f65ce4dSChris Zankel  * Copyright (C) 2001 - 2003 Tensilica Inc.
113f65ce4dSChris Zankel  *
123f65ce4dSChris Zankel  * Joe Taylor
133f65ce4dSChris Zankel  * Chris Zankel	<chris@zankel.net>
143f65ce4dSChris Zankel  * Marc Gauthier
153f65ce4dSChris Zankel  */
163f65ce4dSChris Zankel 
173f65ce4dSChris Zankel #include <linux/mm.h>
183f65ce4dSChris Zankel #include <asm/processor.h>
193f65ce4dSChris Zankel #include <asm/mmu_context.h>
203f65ce4dSChris Zankel #include <asm/tlbflush.h>
213f65ce4dSChris Zankel #include <asm/cacheflush.h>
223f65ce4dSChris Zankel 
233f65ce4dSChris Zankel 
243f65ce4dSChris Zankel static inline void __flush_itlb_all (void)
253f65ce4dSChris Zankel {
26173d6681SChris Zankel 	int w, i;
273f65ce4dSChris Zankel 
28173d6681SChris Zankel 	for (w = 0; w < ITLB_ARF_WAYS; w++) {
29173d6681SChris Zankel 		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30173d6681SChris Zankel 			int e = w + (i << PAGE_SHIFT);
31173d6681SChris Zankel 			invalidate_itlb_entry_no_isync(e);
323f65ce4dSChris Zankel 		}
333f65ce4dSChris Zankel 	}
343f65ce4dSChris Zankel 	asm volatile ("isync\n");
353f65ce4dSChris Zankel }
363f65ce4dSChris Zankel 
373f65ce4dSChris Zankel static inline void __flush_dtlb_all (void)
383f65ce4dSChris Zankel {
39173d6681SChris Zankel 	int w, i;
403f65ce4dSChris Zankel 
41173d6681SChris Zankel 	for (w = 0; w < DTLB_ARF_WAYS; w++) {
42173d6681SChris Zankel 		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43173d6681SChris Zankel 			int e = w + (i << PAGE_SHIFT);
44173d6681SChris Zankel 			invalidate_dtlb_entry_no_isync(e);
453f65ce4dSChris Zankel 		}
463f65ce4dSChris Zankel 	}
473f65ce4dSChris Zankel 	asm volatile ("isync\n");
483f65ce4dSChris Zankel }
493f65ce4dSChris Zankel 
503f65ce4dSChris Zankel 
51f615136cSMax Filippov void local_flush_tlb_all(void)
523f65ce4dSChris Zankel {
533f65ce4dSChris Zankel 	__flush_itlb_all();
543f65ce4dSChris Zankel 	__flush_dtlb_all();
553f65ce4dSChris Zankel }
563f65ce4dSChris Zankel 
573f65ce4dSChris Zankel /* If mm is current, we simply assign the current task a new ASID, thus,
583f65ce4dSChris Zankel  * invalidating all previous tlb entries. If mm is someone else's user mapping,
593f65ce4dSChris Zankel  * wie invalidate the context, thus, when that user mapping is swapped in,
603f65ce4dSChris Zankel  * a new context will be assigned to it.
613f65ce4dSChris Zankel  */
623f65ce4dSChris Zankel 
63f615136cSMax Filippov void local_flush_tlb_mm(struct mm_struct *mm)
643f65ce4dSChris Zankel {
65f615136cSMax Filippov 	int cpu = smp_processor_id();
66f615136cSMax Filippov 
673f65ce4dSChris Zankel 	if (mm == current->active_mm) {
68382cb5b9SMax Filippov 		unsigned long flags;
6987962c4dSMax Filippov 		local_irq_save(flags);
70f615136cSMax Filippov 		mm->context.asid[cpu] = NO_CONTEXT;
71f615136cSMax Filippov 		activate_context(mm, cpu);
723f65ce4dSChris Zankel 		local_irq_restore(flags);
73f615136cSMax Filippov 	} else {
74f615136cSMax Filippov 		mm->context.asid[cpu] = NO_CONTEXT;
75f615136cSMax Filippov 		mm->context.cpu = -1;
763f65ce4dSChris Zankel 	}
773f65ce4dSChris Zankel }
783f65ce4dSChris Zankel 
79f615136cSMax Filippov 
80173d6681SChris Zankel #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
81173d6681SChris Zankel #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
82173d6681SChris Zankel #if _ITLB_ENTRIES > _DTLB_ENTRIES
83173d6681SChris Zankel # define _TLB_ENTRIES _ITLB_ENTRIES
84173d6681SChris Zankel #else
85173d6681SChris Zankel # define _TLB_ENTRIES _DTLB_ENTRIES
86173d6681SChris Zankel #endif
87173d6681SChris Zankel 
88f615136cSMax Filippov void local_flush_tlb_range(struct vm_area_struct *vma,
893f65ce4dSChris Zankel 		unsigned long start, unsigned long end)
903f65ce4dSChris Zankel {
91f615136cSMax Filippov 	int cpu = smp_processor_id();
923f65ce4dSChris Zankel 	struct mm_struct *mm = vma->vm_mm;
933f65ce4dSChris Zankel 	unsigned long flags;
943f65ce4dSChris Zankel 
95f615136cSMax Filippov 	if (mm->context.asid[cpu] == NO_CONTEXT)
963f65ce4dSChris Zankel 		return;
973f65ce4dSChris Zankel 
98c130d3beSMax Filippov 	pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
99f615136cSMax Filippov 		 (unsigned long)mm->context.asid[cpu], start, end);
10087962c4dSMax Filippov 	local_irq_save(flags);
1013f65ce4dSChris Zankel 
102173d6681SChris Zankel 	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
1033f65ce4dSChris Zankel 		int oldpid = get_rasid_register();
104f615136cSMax Filippov 
105f615136cSMax Filippov 		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
1063f65ce4dSChris Zankel 		start &= PAGE_MASK;
1073f65ce4dSChris Zankel 		if (vma->vm_flags & VM_EXEC)
1083f65ce4dSChris Zankel 			while(start < end) {
1093f65ce4dSChris Zankel 				invalidate_itlb_mapping(start);
1103f65ce4dSChris Zankel 				invalidate_dtlb_mapping(start);
1113f65ce4dSChris Zankel 				start += PAGE_SIZE;
1123f65ce4dSChris Zankel 			}
1133f65ce4dSChris Zankel 		else
1143f65ce4dSChris Zankel 			while(start < end) {
1153f65ce4dSChris Zankel 				invalidate_dtlb_mapping(start);
1163f65ce4dSChris Zankel 				start += PAGE_SIZE;
1173f65ce4dSChris Zankel 			}
1183f65ce4dSChris Zankel 
1193f65ce4dSChris Zankel 		set_rasid_register(oldpid);
1203f65ce4dSChris Zankel 	} else {
121f615136cSMax Filippov 		local_flush_tlb_mm(mm);
1223f65ce4dSChris Zankel 	}
1233f65ce4dSChris Zankel 	local_irq_restore(flags);
1243f65ce4dSChris Zankel }
1253f65ce4dSChris Zankel 
126f615136cSMax Filippov void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1273f65ce4dSChris Zankel {
128f615136cSMax Filippov 	int cpu = smp_processor_id();
1293f65ce4dSChris Zankel 	struct mm_struct* mm = vma->vm_mm;
1303f65ce4dSChris Zankel 	unsigned long flags;
1313f65ce4dSChris Zankel 	int oldpid;
1323f65ce4dSChris Zankel 
133f615136cSMax Filippov 	if (mm->context.asid[cpu] == NO_CONTEXT)
1343f65ce4dSChris Zankel 		return;
1353f65ce4dSChris Zankel 
13687962c4dSMax Filippov 	local_irq_save(flags);
1373f65ce4dSChris Zankel 
1383f65ce4dSChris Zankel 	oldpid = get_rasid_register();
139f615136cSMax Filippov 	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
1403f65ce4dSChris Zankel 
1413f65ce4dSChris Zankel 	if (vma->vm_flags & VM_EXEC)
1423f65ce4dSChris Zankel 		invalidate_itlb_mapping(page);
1433f65ce4dSChris Zankel 	invalidate_dtlb_mapping(page);
1443f65ce4dSChris Zankel 
1453f65ce4dSChris Zankel 	set_rasid_register(oldpid);
1463f65ce4dSChris Zankel 
1473f65ce4dSChris Zankel 	local_irq_restore(flags);
1483f65ce4dSChris Zankel }
149a99e07eeSMax Filippov 
15004c6b3e2SMax Filippov void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
15104c6b3e2SMax Filippov {
15204c6b3e2SMax Filippov 	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
15304c6b3e2SMax Filippov 	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
15404c6b3e2SMax Filippov 		start &= PAGE_MASK;
15504c6b3e2SMax Filippov 		while (start < end) {
15604c6b3e2SMax Filippov 			invalidate_itlb_mapping(start);
15704c6b3e2SMax Filippov 			invalidate_dtlb_mapping(start);
15804c6b3e2SMax Filippov 			start += PAGE_SIZE;
15904c6b3e2SMax Filippov 		}
16004c6b3e2SMax Filippov 	} else {
16104c6b3e2SMax Filippov 		local_flush_tlb_all();
16204c6b3e2SMax Filippov 	}
16304c6b3e2SMax Filippov }
16404c6b3e2SMax Filippov 
165a99e07eeSMax Filippov #ifdef CONFIG_DEBUG_TLB_SANITY
166a99e07eeSMax Filippov 
167a99e07eeSMax Filippov static unsigned get_pte_for_vaddr(unsigned vaddr)
168a99e07eeSMax Filippov {
169a99e07eeSMax Filippov 	struct task_struct *task = get_current();
170a99e07eeSMax Filippov 	struct mm_struct *mm = task->mm;
171a99e07eeSMax Filippov 	pgd_t *pgd;
172f5ee2567SMike Rapoport 	p4d_t *p4d;
173f0d1eab8SMike Rapoport 	pud_t *pud;
174a99e07eeSMax Filippov 	pmd_t *pmd;
175a99e07eeSMax Filippov 	pte_t *pte;
176a99e07eeSMax Filippov 
177a99e07eeSMax Filippov 	if (!mm)
178a99e07eeSMax Filippov 		mm = task->active_mm;
179a99e07eeSMax Filippov 	pgd = pgd_offset(mm, vaddr);
180a99e07eeSMax Filippov 	if (pgd_none_or_clear_bad(pgd))
181a99e07eeSMax Filippov 		return 0;
182f5ee2567SMike Rapoport 	p4d = p4d_offset(pgd, vaddr);
183f5ee2567SMike Rapoport 	if (p4d_none_or_clear_bad(p4d))
184f5ee2567SMike Rapoport 		return 0;
185f5ee2567SMike Rapoport 	pud = pud_offset(p4d, vaddr);
186f0d1eab8SMike Rapoport 	if (pud_none_or_clear_bad(pud))
187f0d1eab8SMike Rapoport 		return 0;
188f0d1eab8SMike Rapoport 	pmd = pmd_offset(pud, vaddr);
189a99e07eeSMax Filippov 	if (pmd_none_or_clear_bad(pmd))
190a99e07eeSMax Filippov 		return 0;
191a99e07eeSMax Filippov 	pte = pte_offset_map(pmd, vaddr);
192a99e07eeSMax Filippov 	if (!pte)
193a99e07eeSMax Filippov 		return 0;
194a99e07eeSMax Filippov 	return pte_val(*pte);
195a99e07eeSMax Filippov }
196a99e07eeSMax Filippov 
197a99e07eeSMax Filippov enum {
198a99e07eeSMax Filippov 	TLB_SUSPICIOUS	= 1,
199a99e07eeSMax Filippov 	TLB_INSANE	= 2,
200a99e07eeSMax Filippov };
201a99e07eeSMax Filippov 
202a99e07eeSMax Filippov static void tlb_insane(void)
203a99e07eeSMax Filippov {
204a99e07eeSMax Filippov 	BUG_ON(1);
205a99e07eeSMax Filippov }
206a99e07eeSMax Filippov 
207a99e07eeSMax Filippov static void tlb_suspicious(void)
208a99e07eeSMax Filippov {
209a99e07eeSMax Filippov 	WARN_ON(1);
210a99e07eeSMax Filippov }
211a99e07eeSMax Filippov 
212a99e07eeSMax Filippov /*
213a99e07eeSMax Filippov  * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
214a99e07eeSMax Filippov  * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
215a99e07eeSMax Filippov  *
216a99e07eeSMax Filippov  * Check that valid TLB entries either have the same PA as the PTE, or PTE is
217a99e07eeSMax Filippov  * marked as non-present. Non-present PTE and the page with non-zero refcount
218a99e07eeSMax Filippov  * and zero mapcount is normal for batched TLB flush operation. Zero refcount
219a99e07eeSMax Filippov  * means that the page was freed prematurely. Non-zero mapcount is unusual,
220a99e07eeSMax Filippov  * but does not necessary means an error, thus marked as suspicious.
221a99e07eeSMax Filippov  */
222a99e07eeSMax Filippov static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
223a99e07eeSMax Filippov {
224a99e07eeSMax Filippov 	unsigned tlbidx = w | (e << PAGE_SHIFT);
225a99e07eeSMax Filippov 	unsigned r0 = dtlb ?
226a99e07eeSMax Filippov 		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
227a99e07eeSMax Filippov 	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
228a99e07eeSMax Filippov 	unsigned pte = get_pte_for_vaddr(vpn);
229a99e07eeSMax Filippov 	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
230a99e07eeSMax Filippov 	unsigned tlb_asid = r0 & ASID_MASK;
231a99e07eeSMax Filippov 	bool kernel = tlb_asid == 1;
232a99e07eeSMax Filippov 	int rc = 0;
233a99e07eeSMax Filippov 
234a99e07eeSMax Filippov 	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
235a99e07eeSMax Filippov 		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
236a99e07eeSMax Filippov 				dtlb ? 'D' : 'I', w, e, vpn,
237a99e07eeSMax Filippov 				kernel ? "kernel" : "user");
238a99e07eeSMax Filippov 		rc |= TLB_INSANE;
239a99e07eeSMax Filippov 	}
240a99e07eeSMax Filippov 
241a99e07eeSMax Filippov 	if (tlb_asid == mm_asid) {
242a99e07eeSMax Filippov 		unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
243a99e07eeSMax Filippov 			read_itlb_translation(tlbidx);
244a99e07eeSMax Filippov 		if ((pte ^ r1) & PAGE_MASK) {
245a99e07eeSMax Filippov 			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
246a99e07eeSMax Filippov 					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
247a99e07eeSMax Filippov 			if (pte == 0 || !pte_present(__pte(pte))) {
248a99e07eeSMax Filippov 				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
249a99e07eeSMax Filippov 				pr_err("page refcount: %d, mapcount: %d\n",
250a99e07eeSMax Filippov 						page_count(p),
251a99e07eeSMax Filippov 						page_mapcount(p));
252a99e07eeSMax Filippov 				if (!page_count(p))
253a99e07eeSMax Filippov 					rc |= TLB_INSANE;
254e1534ae9SKirill A. Shutemov 				else if (page_mapcount(p))
255a99e07eeSMax Filippov 					rc |= TLB_SUSPICIOUS;
256a99e07eeSMax Filippov 			} else {
257a99e07eeSMax Filippov 				rc |= TLB_INSANE;
258a99e07eeSMax Filippov 			}
259a99e07eeSMax Filippov 		}
260a99e07eeSMax Filippov 	}
261a99e07eeSMax Filippov 	return rc;
262a99e07eeSMax Filippov }
263a99e07eeSMax Filippov 
264a99e07eeSMax Filippov void check_tlb_sanity(void)
265a99e07eeSMax Filippov {
266a99e07eeSMax Filippov 	unsigned long flags;
267a99e07eeSMax Filippov 	unsigned w, e;
268a99e07eeSMax Filippov 	int bug = 0;
269a99e07eeSMax Filippov 
270a99e07eeSMax Filippov 	local_irq_save(flags);
271a99e07eeSMax Filippov 	for (w = 0; w < DTLB_ARF_WAYS; ++w)
272a99e07eeSMax Filippov 		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
273a99e07eeSMax Filippov 			bug |= check_tlb_entry(w, e, true);
274a99e07eeSMax Filippov 	for (w = 0; w < ITLB_ARF_WAYS; ++w)
275a99e07eeSMax Filippov 		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
276a99e07eeSMax Filippov 			bug |= check_tlb_entry(w, e, false);
277a99e07eeSMax Filippov 	if (bug & TLB_INSANE)
278a99e07eeSMax Filippov 		tlb_insane();
279a99e07eeSMax Filippov 	if (bug & TLB_SUSPICIOUS)
280a99e07eeSMax Filippov 		tlb_suspicious();
281a99e07eeSMax Filippov 	local_irq_restore(flags);
282a99e07eeSMax Filippov }
283a99e07eeSMax Filippov 
284a99e07eeSMax Filippov #endif /* CONFIG_DEBUG_TLB_SANITY */
285