xref: /openbmc/linux/arch/xtensa/mm/cache.c (revision 4fbb7e7f)
16656920bSChris Zankel /*
26656920bSChris Zankel  * arch/xtensa/mm/cache.c
36656920bSChris Zankel  *
46656920bSChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
56656920bSChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
66656920bSChris Zankel  * for more details.
76656920bSChris Zankel  *
86656920bSChris Zankel  * Copyright (C) 2001-2006 Tensilica Inc.
96656920bSChris Zankel  *
106656920bSChris Zankel  * Chris Zankel	<chris@zankel.net>
116656920bSChris Zankel  * Joe Taylor
126656920bSChris Zankel  * Marc Gauthier
136656920bSChris Zankel  *
146656920bSChris Zankel  */
156656920bSChris Zankel 
166656920bSChris Zankel #include <linux/init.h>
176656920bSChris Zankel #include <linux/signal.h>
186656920bSChris Zankel #include <linux/sched.h>
196656920bSChris Zankel #include <linux/kernel.h>
206656920bSChris Zankel #include <linux/errno.h>
216656920bSChris Zankel #include <linux/string.h>
226656920bSChris Zankel #include <linux/types.h>
236656920bSChris Zankel #include <linux/ptrace.h>
2457c8a661SMike Rapoport #include <linux/memblock.h>
256656920bSChris Zankel #include <linux/swap.h>
266656920bSChris Zankel #include <linux/pagemap.h>
2765fddcfcSMike Rapoport #include <linux/pgtable.h>
286656920bSChris Zankel 
296656920bSChris Zankel #include <asm/bootparam.h>
306656920bSChris Zankel #include <asm/mmu_context.h>
316656920bSChris Zankel #include <asm/tlb.h>
326656920bSChris Zankel #include <asm/tlbflush.h>
336656920bSChris Zankel #include <asm/page.h>
346656920bSChris Zankel 
356656920bSChris Zankel /*
366656920bSChris Zankel  * Note:
376656920bSChris Zankel  * The kernel provides one architecture bit PG_arch_1 in the page flags that
386656920bSChris Zankel  * can be used for cache coherency.
396656920bSChris Zankel  *
406656920bSChris Zankel  * I$-D$ coherency.
416656920bSChris Zankel  *
426656920bSChris Zankel  * The Xtensa architecture doesn't keep the instruction cache coherent with
436656920bSChris Zankel  * the data cache. We use the architecture bit to indicate if the caches
446656920bSChris Zankel  * are coherent. The kernel clears this bit whenever a page is added to the
456656920bSChris Zankel  * page cache. At that time, the caches might not be in sync. We, therefore,
466656920bSChris Zankel  * define this flag as 'clean' if set.
476656920bSChris Zankel  *
486656920bSChris Zankel  * D-cache aliasing.
496656920bSChris Zankel  *
506656920bSChris Zankel  * With cache aliasing, we have to always flush the cache when pages are
516656920bSChris Zankel  * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
526656920bSChris Zankel  * page.
536656920bSChris Zankel  *
546656920bSChris Zankel  *
556656920bSChris Zankel  *
566656920bSChris Zankel  */
576656920bSChris Zankel 
58a91902dbSMax Filippov #if (DCACHE_WAY_SIZE > PAGE_SIZE)
kmap_invalidate_coherent(struct page * page,unsigned long vaddr)59a91902dbSMax Filippov static inline void kmap_invalidate_coherent(struct page *page,
60a91902dbSMax Filippov 					    unsigned long vaddr)
61a91902dbSMax Filippov {
62a91902dbSMax Filippov 	if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
63a91902dbSMax Filippov 		unsigned long kvaddr;
64a91902dbSMax Filippov 
65a91902dbSMax Filippov 		if (!PageHighMem(page)) {
66a91902dbSMax Filippov 			kvaddr = (unsigned long)page_to_virt(page);
67a91902dbSMax Filippov 
68a91902dbSMax Filippov 			__invalidate_dcache_page(kvaddr);
69a91902dbSMax Filippov 		} else {
70a91902dbSMax Filippov 			kvaddr = TLBTEMP_BASE_1 +
71a91902dbSMax Filippov 				(page_to_phys(page) & DCACHE_ALIAS_MASK);
72a91902dbSMax Filippov 
733a860d16SMax Filippov 			preempt_disable();
74a91902dbSMax Filippov 			__invalidate_dcache_page_alias(kvaddr,
75a91902dbSMax Filippov 						       page_to_phys(page));
763a860d16SMax Filippov 			preempt_enable();
77a91902dbSMax Filippov 		}
78a91902dbSMax Filippov 	}
79a91902dbSMax Filippov }
80a91902dbSMax Filippov 
coherent_kvaddr(struct page * page,unsigned long base,unsigned long vaddr,unsigned long * paddr)81a91902dbSMax Filippov static inline void *coherent_kvaddr(struct page *page, unsigned long base,
82a91902dbSMax Filippov 				    unsigned long vaddr, unsigned long *paddr)
83a91902dbSMax Filippov {
84a91902dbSMax Filippov 	*paddr = page_to_phys(page);
85a91902dbSMax Filippov 	return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
86a91902dbSMax Filippov }
87a91902dbSMax Filippov 
clear_user_highpage(struct page * page,unsigned long vaddr)88a91902dbSMax Filippov void clear_user_highpage(struct page *page, unsigned long vaddr)
89a91902dbSMax Filippov {
90a91902dbSMax Filippov 	unsigned long paddr;
91a91902dbSMax Filippov 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
92a91902dbSMax Filippov 
93a67cc9aaSMax Filippov 	preempt_disable();
94a91902dbSMax Filippov 	kmap_invalidate_coherent(page, vaddr);
95a91902dbSMax Filippov 	set_bit(PG_arch_1, &page->flags);
96a91902dbSMax Filippov 	clear_page_alias(kvaddr, paddr);
97a67cc9aaSMax Filippov 	preempt_enable();
98a91902dbSMax Filippov }
99bc652eb6SMax Filippov EXPORT_SYMBOL(clear_user_highpage);
100a91902dbSMax Filippov 
copy_user_highpage(struct page * dst,struct page * src,unsigned long vaddr,struct vm_area_struct * vma)101a91902dbSMax Filippov void copy_user_highpage(struct page *dst, struct page *src,
102a91902dbSMax Filippov 			unsigned long vaddr, struct vm_area_struct *vma)
103a91902dbSMax Filippov {
104a91902dbSMax Filippov 	unsigned long dst_paddr, src_paddr;
105a91902dbSMax Filippov 	void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
106a91902dbSMax Filippov 					  &dst_paddr);
107a91902dbSMax Filippov 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
108a91902dbSMax Filippov 					  &src_paddr);
109a91902dbSMax Filippov 
110a67cc9aaSMax Filippov 	preempt_disable();
111a91902dbSMax Filippov 	kmap_invalidate_coherent(dst, vaddr);
112a91902dbSMax Filippov 	set_bit(PG_arch_1, &dst->flags);
113a91902dbSMax Filippov 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
114a67cc9aaSMax Filippov 	preempt_enable();
115a91902dbSMax Filippov }
116bc652eb6SMax Filippov EXPORT_SYMBOL(copy_user_highpage);
117a91902dbSMax Filippov 
1186656920bSChris Zankel /*
1196656920bSChris Zankel  * Any time the kernel writes to a user page cache page, or it is about to
1206656920bSChris Zankel  * read from a page cache page this routine is called.
1216656920bSChris Zankel  *
1226656920bSChris Zankel  */
1236656920bSChris Zankel 
flush_dcache_folio(struct folio * folio)124*4fbb7e7fSMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio)
1256656920bSChris Zankel {
126*4fbb7e7fSMatthew Wilcox (Oracle) 	struct address_space *mapping = folio_flush_mapping(folio);
1276656920bSChris Zankel 
1286656920bSChris Zankel 	/*
1296656920bSChris Zankel 	 * If we have a mapping but the page is not mapped to user-space
1306656920bSChris Zankel 	 * yet, we simply mark this page dirty and defer flushing the
1316656920bSChris Zankel 	 * caches until update_mmu().
1326656920bSChris Zankel 	 */
1336656920bSChris Zankel 
1346656920bSChris Zankel 	if (mapping && !mapping_mapped(mapping)) {
135*4fbb7e7fSMatthew Wilcox (Oracle) 		if (!test_bit(PG_arch_1, &folio->flags))
136*4fbb7e7fSMatthew Wilcox (Oracle) 			set_bit(PG_arch_1, &folio->flags);
1376656920bSChris Zankel 		return;
1386656920bSChris Zankel 
1396656920bSChris Zankel 	} else {
140*4fbb7e7fSMatthew Wilcox (Oracle) 		unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
141*4fbb7e7fSMatthew Wilcox (Oracle) 		unsigned long temp = folio_pos(folio);
142*4fbb7e7fSMatthew Wilcox (Oracle) 		unsigned int i, nr = folio_nr_pages(folio);
1436656920bSChris Zankel 		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
1446656920bSChris Zankel 		unsigned long virt;
1456656920bSChris Zankel 
1466656920bSChris Zankel 		/*
1476656920bSChris Zankel 		 * Flush the page in kernel space and user space.
1486656920bSChris Zankel 		 * Note that we can omit that step if aliasing is not
1496656920bSChris Zankel 		 * an issue, but we do have to synchronize I$ and D$
1506656920bSChris Zankel 		 * if we have a mapping.
1516656920bSChris Zankel 		 */
1526656920bSChris Zankel 
1536656920bSChris Zankel 		if (!alias && !mapping)
1546656920bSChris Zankel 			return;
1556656920bSChris Zankel 
1563a860d16SMax Filippov 		preempt_disable();
157*4fbb7e7fSMatthew Wilcox (Oracle) 		for (i = 0; i < nr; i++) {
158270eec76SMax Filippov 			virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
159270eec76SMax Filippov 			__flush_invalidate_dcache_page_alias(virt, phys);
1606656920bSChris Zankel 
1616656920bSChris Zankel 			virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
1626656920bSChris Zankel 
1636656920bSChris Zankel 			if (alias)
1646656920bSChris Zankel 				__flush_invalidate_dcache_page_alias(virt, phys);
1656656920bSChris Zankel 
1666656920bSChris Zankel 			if (mapping)
1676656920bSChris Zankel 				__invalidate_icache_page_alias(virt, phys);
168*4fbb7e7fSMatthew Wilcox (Oracle) 			phys += PAGE_SIZE;
169*4fbb7e7fSMatthew Wilcox (Oracle) 			temp += PAGE_SIZE;
170*4fbb7e7fSMatthew Wilcox (Oracle) 		}
1713a860d16SMax Filippov 		preempt_enable();
1726656920bSChris Zankel 	}
1736656920bSChris Zankel 
1746656920bSChris Zankel 	/* There shouldn't be an entry in the cache for this page anymore. */
1756656920bSChris Zankel }
176*4fbb7e7fSMatthew Wilcox (Oracle) EXPORT_SYMBOL(flush_dcache_folio);
1776656920bSChris Zankel 
1786656920bSChris Zankel /*
1796656920bSChris Zankel  * For now, flush the whole cache. FIXME??
1806656920bSChris Zankel  */
1816656920bSChris Zankel 
local_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)182f615136cSMax Filippov void local_flush_cache_range(struct vm_area_struct *vma,
1836656920bSChris Zankel 		       unsigned long start, unsigned long end)
1846656920bSChris Zankel {
1856656920bSChris Zankel 	__flush_invalidate_dcache_all();
1866656920bSChris Zankel 	__invalidate_icache_all();
1876656920bSChris Zankel }
188bc652eb6SMax Filippov EXPORT_SYMBOL(local_flush_cache_range);
1896656920bSChris Zankel 
1906656920bSChris Zankel /*
1916656920bSChris Zankel  * Remove any entry in the cache for this page.
1926656920bSChris Zankel  *
1936656920bSChris Zankel  * Note that this function is only called for user pages, so use the
1946656920bSChris Zankel  * alias versions of the cache flush functions.
1956656920bSChris Zankel  */
1966656920bSChris Zankel 
local_flush_cache_page(struct vm_area_struct * vma,unsigned long address,unsigned long pfn)197f615136cSMax Filippov void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
1986656920bSChris Zankel 		      unsigned long pfn)
1996656920bSChris Zankel {
2006656920bSChris Zankel 	/* Note that we have to use the 'alias' address to avoid multi-hit */
2016656920bSChris Zankel 
2026656920bSChris Zankel 	unsigned long phys = page_to_phys(pfn_to_page(pfn));
2036656920bSChris Zankel 	unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
2046656920bSChris Zankel 
2053a860d16SMax Filippov 	preempt_disable();
2066656920bSChris Zankel 	__flush_invalidate_dcache_page_alias(virt, phys);
2076656920bSChris Zankel 	__invalidate_icache_page_alias(virt, phys);
2083a860d16SMax Filippov 	preempt_enable();
2096656920bSChris Zankel }
210bc652eb6SMax Filippov EXPORT_SYMBOL(local_flush_cache_page);
2116656920bSChris Zankel 
2126d0f581dSMax Filippov #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
2136656920bSChris Zankel 
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,unsigned int nr)214*4fbb7e7fSMatthew Wilcox (Oracle) void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
215*4fbb7e7fSMatthew Wilcox (Oracle) 		unsigned long addr, pte_t *ptep, unsigned int nr)
2166656920bSChris Zankel {
2174b3073e1SRussell King 	unsigned long pfn = pte_pfn(*ptep);
218*4fbb7e7fSMatthew Wilcox (Oracle) 	struct folio *folio;
219*4fbb7e7fSMatthew Wilcox (Oracle) 	unsigned int i;
2206656920bSChris Zankel 
2216656920bSChris Zankel 	if (!pfn_valid(pfn))
2226656920bSChris Zankel 		return;
2236656920bSChris Zankel 
224*4fbb7e7fSMatthew Wilcox (Oracle) 	folio = page_folio(pfn_to_page(pfn));
2256656920bSChris Zankel 
226*4fbb7e7fSMatthew Wilcox (Oracle) 	/* Invalidate old entries in TLBs */
227*4fbb7e7fSMatthew Wilcox (Oracle) 	for (i = 0; i < nr; i++)
228*4fbb7e7fSMatthew Wilcox (Oracle) 		flush_tlb_page(vma, addr + i * PAGE_SIZE);
229*4fbb7e7fSMatthew Wilcox (Oracle) 	nr = folio_nr_pages(folio);
2306656920bSChris Zankel 
2316d0f581dSMax Filippov #if (DCACHE_WAY_SIZE > PAGE_SIZE)
2326656920bSChris Zankel 
233*4fbb7e7fSMatthew Wilcox (Oracle) 	if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
234*4fbb7e7fSMatthew Wilcox (Oracle) 		unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
235270eec76SMax Filippov 		unsigned long tmp;
2366656920bSChris Zankel 
2373a860d16SMax Filippov 		preempt_disable();
238*4fbb7e7fSMatthew Wilcox (Oracle) 		for (i = 0; i < nr; i++) {
239270eec76SMax Filippov 			tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
240270eec76SMax Filippov 			__flush_invalidate_dcache_page_alias(tmp, phys);
241270eec76SMax Filippov 			tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
242c4c4594bSChris Zankel 			__flush_invalidate_dcache_page_alias(tmp, phys);
243c4c4594bSChris Zankel 			__invalidate_icache_page_alias(tmp, phys);
244*4fbb7e7fSMatthew Wilcox (Oracle) 			phys += PAGE_SIZE;
245*4fbb7e7fSMatthew Wilcox (Oracle) 		}
2463a860d16SMax Filippov 		preempt_enable();
2476656920bSChris Zankel 
248*4fbb7e7fSMatthew Wilcox (Oracle) 		clear_bit(PG_arch_1, &folio->flags);
2496656920bSChris Zankel 	}
2506656920bSChris Zankel #else
251*4fbb7e7fSMatthew Wilcox (Oracle) 	if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
2526656920bSChris Zankel 	    && (vma->vm_flags & VM_EXEC) != 0) {
253*4fbb7e7fSMatthew Wilcox (Oracle) 		for (i = 0; i < nr; i++) {
254*4fbb7e7fSMatthew Wilcox (Oracle) 			void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
255*4fbb7e7fSMatthew Wilcox (Oracle) 			__flush_dcache_page((unsigned long)paddr);
256*4fbb7e7fSMatthew Wilcox (Oracle) 			__invalidate_icache_page((unsigned long)paddr);
257*4fbb7e7fSMatthew Wilcox (Oracle) 			kunmap_local(paddr);
258*4fbb7e7fSMatthew Wilcox (Oracle) 		}
259*4fbb7e7fSMatthew Wilcox (Oracle) 		set_bit(PG_arch_1, &folio->flags);
2606656920bSChris Zankel 	}
2616656920bSChris Zankel #endif
2626656920bSChris Zankel }
2636656920bSChris Zankel 
2646656920bSChris Zankel /*
2656656920bSChris Zankel  * access_process_vm() has called get_user_pages(), which has done a
2666656920bSChris Zankel  * flush_dcache_page() on the page.
2676656920bSChris Zankel  */
2686656920bSChris Zankel 
2696d0f581dSMax Filippov #if (DCACHE_WAY_SIZE > PAGE_SIZE)
2706656920bSChris Zankel 
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)2716656920bSChris Zankel void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
2726656920bSChris Zankel 		unsigned long vaddr, void *dst, const void *src,
2736656920bSChris Zankel 		unsigned long len)
2746656920bSChris Zankel {
2756656920bSChris Zankel 	unsigned long phys = page_to_phys(page);
2766656920bSChris Zankel 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
2776656920bSChris Zankel 
2786656920bSChris Zankel 	/* Flush and invalidate user page if aliased. */
2796656920bSChris Zankel 
2806656920bSChris Zankel 	if (alias) {
281c4c4594bSChris Zankel 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
2823a860d16SMax Filippov 		preempt_disable();
283c4c4594bSChris Zankel 		__flush_invalidate_dcache_page_alias(t, phys);
2843a860d16SMax Filippov 		preempt_enable();
2856656920bSChris Zankel 	}
2866656920bSChris Zankel 
2876656920bSChris Zankel 	/* Copy data */
2886656920bSChris Zankel 
2896656920bSChris Zankel 	memcpy(dst, src, len);
2906656920bSChris Zankel 
2916656920bSChris Zankel 	/*
2926656920bSChris Zankel 	 * Flush and invalidate kernel page if aliased and synchronize
2936656920bSChris Zankel 	 * data and instruction caches for executable pages.
2946656920bSChris Zankel 	 */
2956656920bSChris Zankel 
2966656920bSChris Zankel 	if (alias) {
297c4c4594bSChris Zankel 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
2986656920bSChris Zankel 
2993a860d16SMax Filippov 		preempt_disable();
3006656920bSChris Zankel 		__flush_invalidate_dcache_range((unsigned long) dst, len);
301c4c4594bSChris Zankel 		if ((vma->vm_flags & VM_EXEC) != 0)
302c4c4594bSChris Zankel 			__invalidate_icache_page_alias(t, phys);
3033a860d16SMax Filippov 		preempt_enable();
3046656920bSChris Zankel 
3056656920bSChris Zankel 	} else if ((vma->vm_flags & VM_EXEC) != 0) {
3066656920bSChris Zankel 		__flush_dcache_range((unsigned long)dst,len);
3076656920bSChris Zankel 		__invalidate_icache_range((unsigned long) dst, len);
3086656920bSChris Zankel 	}
3096656920bSChris Zankel }
3106656920bSChris Zankel 
copy_from_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,const void * src,unsigned long len)3116656920bSChris Zankel extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
3126656920bSChris Zankel 		unsigned long vaddr, void *dst, const void *src,
3136656920bSChris Zankel 		unsigned long len)
3146656920bSChris Zankel {
3156656920bSChris Zankel 	unsigned long phys = page_to_phys(page);
3166656920bSChris Zankel 	unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
3176656920bSChris Zankel 
3186656920bSChris Zankel 	/*
3196656920bSChris Zankel 	 * Flush user page if aliased.
3206656920bSChris Zankel 	 * (Note: a simply flush would be sufficient)
3216656920bSChris Zankel 	 */
3226656920bSChris Zankel 
3236656920bSChris Zankel 	if (alias) {
324c4c4594bSChris Zankel 		unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
3253a860d16SMax Filippov 		preempt_disable();
326c4c4594bSChris Zankel 		__flush_invalidate_dcache_page_alias(t, phys);
3273a860d16SMax Filippov 		preempt_enable();
3286656920bSChris Zankel 	}
3296656920bSChris Zankel 
3306656920bSChris Zankel 	memcpy(dst, src, len);
3316656920bSChris Zankel }
3326656920bSChris Zankel 
3336656920bSChris Zankel #endif
334