xref: /openbmc/linux/arch/xtensa/include/asm/cacheflush.h (revision 695c312ec5a68e4373d063ee649c7b925ffb5da7)
1367b8112SChris Zankel /*
2367b8112SChris Zankel  * This file is subject to the terms and conditions of the GNU General Public
3367b8112SChris Zankel  * License.  See the file "COPYING" in the main directory of this archive
4367b8112SChris Zankel  * for more details.
5367b8112SChris Zankel  *
6f615136cSMax Filippov  * (C) 2001 - 2013 Tensilica Inc.
7367b8112SChris Zankel  */
8367b8112SChris Zankel 
9367b8112SChris Zankel #ifndef _XTENSA_CACHEFLUSH_H
10367b8112SChris Zankel #define _XTENSA_CACHEFLUSH_H
11367b8112SChris Zankel 
12367b8112SChris Zankel #include <linux/mm.h>
13367b8112SChris Zankel #include <asm/processor.h>
14367b8112SChris Zankel #include <asm/page.h>
15367b8112SChris Zankel 
16367b8112SChris Zankel /*
17367b8112SChris Zankel  * Lo-level routines for cache flushing.
18367b8112SChris Zankel  *
19367b8112SChris Zankel  * invalidate data or instruction cache:
20367b8112SChris Zankel  *
21367b8112SChris Zankel  * __invalidate_icache_all()
22367b8112SChris Zankel  * __invalidate_icache_page(adr)
23367b8112SChris Zankel  * __invalidate_dcache_page(adr)
24367b8112SChris Zankel  * __invalidate_icache_range(from,size)
25367b8112SChris Zankel  * __invalidate_dcache_range(from,size)
26367b8112SChris Zankel  *
27367b8112SChris Zankel  * flush data cache:
28367b8112SChris Zankel  *
29367b8112SChris Zankel  * __flush_dcache_page(adr)
30367b8112SChris Zankel  *
31367b8112SChris Zankel  * flush and invalidate data cache:
32367b8112SChris Zankel  *
33367b8112SChris Zankel  * __flush_invalidate_dcache_all()
34367b8112SChris Zankel  * __flush_invalidate_dcache_page(adr)
35367b8112SChris Zankel  * __flush_invalidate_dcache_range(from,size)
36367b8112SChris Zankel  *
37367b8112SChris Zankel  * specials for cache aliasing:
38367b8112SChris Zankel  *
39367b8112SChris Zankel  * __flush_invalidate_dcache_page_alias(vaddr,paddr)
40a91902dbSMax Filippov  * __invalidate_dcache_page_alias(vaddr,paddr)
41367b8112SChris Zankel  * __invalidate_icache_page_alias(vaddr,paddr)
42367b8112SChris Zankel  */
43367b8112SChris Zankel 
44367b8112SChris Zankel extern void __invalidate_dcache_all(void);
45367b8112SChris Zankel extern void __invalidate_icache_all(void);
46367b8112SChris Zankel extern void __invalidate_dcache_page(unsigned long);
47367b8112SChris Zankel extern void __invalidate_icache_page(unsigned long);
48367b8112SChris Zankel extern void __invalidate_icache_range(unsigned long, unsigned long);
49367b8112SChris Zankel extern void __invalidate_dcache_range(unsigned long, unsigned long);
50367b8112SChris Zankel 
51367b8112SChris Zankel #if XCHAL_DCACHE_IS_WRITEBACK
52367b8112SChris Zankel extern void __flush_invalidate_dcache_all(void);
53367b8112SChris Zankel extern void __flush_dcache_page(unsigned long);
54367b8112SChris Zankel extern void __flush_dcache_range(unsigned long, unsigned long);
55367b8112SChris Zankel extern void __flush_invalidate_dcache_page(unsigned long);
56367b8112SChris Zankel extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
57367b8112SChris Zankel #else
__flush_dcache_page(unsigned long va)58c7ca9fe1SMax Filippov static inline void __flush_dcache_page(unsigned long va)
59c7ca9fe1SMax Filippov {
60c7ca9fe1SMax Filippov }
__flush_dcache_range(unsigned long va,unsigned long sz)61c7ca9fe1SMax Filippov static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
62c7ca9fe1SMax Filippov {
63c7ca9fe1SMax Filippov }
64c7ca9fe1SMax Filippov # define __flush_invalidate_dcache_all()	__invalidate_dcache_all()
65367b8112SChris Zankel # define __flush_invalidate_dcache_page(p)	__invalidate_dcache_page(p)
66367b8112SChris Zankel # define __flush_invalidate_dcache_range(p,s)	__invalidate_dcache_range(p,s)
67367b8112SChris Zankel #endif
68367b8112SChris Zankel 
69e5083a63SJohannes Weiner #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
70367b8112SChris Zankel extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
71a91902dbSMax Filippov extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
72e5083a63SJohannes Weiner #else
__flush_invalidate_dcache_page_alias(unsigned long virt,unsigned long phys)73e5083a63SJohannes Weiner static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
74e5083a63SJohannes Weiner 							unsigned long phys) { }
__invalidate_dcache_page_alias(unsigned long virt,unsigned long phys)754d5ea702SMax Filippov static inline void __invalidate_dcache_page_alias(unsigned long virt,
764d5ea702SMax Filippov 						  unsigned long phys) { }
77367b8112SChris Zankel #endif
78e5083a63SJohannes Weiner #if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
79367b8112SChris Zankel extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
80367b8112SChris Zankel #else
__invalidate_icache_page_alias(unsigned long virt,unsigned long phys)81e5083a63SJohannes Weiner static inline void __invalidate_icache_page_alias(unsigned long virt,
82e5083a63SJohannes Weiner 						unsigned long phys) { }
83367b8112SChris Zankel #endif
84367b8112SChris Zankel 
85367b8112SChris Zankel /*
86367b8112SChris Zankel  * We have physically tagged caches - nothing to do here -
87367b8112SChris Zankel  * unless we have cache aliasing.
88367b8112SChris Zankel  *
89367b8112SChris Zankel  * Pages can get remapped. Because this might change the 'color' of that page,
90367b8112SChris Zankel  * we have to flush the cache before the PTE is changed.
915fb94e9cSMauro Carvalho Chehab  * (see also Documentation/core-api/cachetlb.rst)
92367b8112SChris Zankel  */
93367b8112SChris Zankel 
94b6cee17bSMax Filippov #if defined(CONFIG_MMU) && \
95b6cee17bSMax Filippov 	((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
96367b8112SChris Zankel 
97f615136cSMax Filippov #ifdef CONFIG_SMP
98f615136cSMax Filippov void flush_cache_all(void);
99f615136cSMax Filippov void flush_cache_range(struct vm_area_struct*, ulong, ulong);
100f615136cSMax Filippov void flush_icache_range(unsigned long start, unsigned long end);
101f615136cSMax Filippov void flush_cache_page(struct vm_area_struct*,
102f615136cSMax Filippov 			     unsigned long, unsigned long);
103f615136cSMax Filippov #else
104f615136cSMax Filippov #define flush_cache_all local_flush_cache_all
105f615136cSMax Filippov #define flush_cache_range local_flush_cache_range
106f615136cSMax Filippov #define flush_icache_range local_flush_icache_range
107f615136cSMax Filippov #define flush_cache_page  local_flush_cache_page
108f615136cSMax Filippov #endif
109f615136cSMax Filippov 
110f615136cSMax Filippov #define local_flush_cache_all()						\
111367b8112SChris Zankel 	do {								\
112367b8112SChris Zankel 		__flush_invalidate_dcache_all();			\
113367b8112SChris Zankel 		__invalidate_icache_all();				\
114367b8112SChris Zankel 	} while (0)
115367b8112SChris Zankel 
116367b8112SChris Zankel #define flush_cache_mm(mm)		flush_cache_all()
117367b8112SChris Zankel #define flush_cache_dup_mm(mm)		flush_cache_mm(mm)
118367b8112SChris Zankel 
119367b8112SChris Zankel #define flush_cache_vmap(start,end)		flush_cache_all()
120*c4a05cf0SAlexandre Ghiti #define flush_cache_vmap_early(start,end)	do { } while (0)
121367b8112SChris Zankel #define flush_cache_vunmap(start,end)		flush_cache_all()
122367b8112SChris Zankel 
1234fbb7e7fSMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *folio);
1244fbb7e7fSMatthew Wilcox (Oracle) #define flush_dcache_folio flush_dcache_folio
1254fbb7e7fSMatthew Wilcox (Oracle) 
1262d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
flush_dcache_page(struct page * page)1274fbb7e7fSMatthew Wilcox (Oracle) static inline void flush_dcache_page(struct page *page)
1284fbb7e7fSMatthew Wilcox (Oracle) {
1294fbb7e7fSMatthew Wilcox (Oracle) 	flush_dcache_folio(page_folio(page));
1304fbb7e7fSMatthew Wilcox (Oracle) }
131f615136cSMax Filippov 
132f615136cSMax Filippov void local_flush_cache_range(struct vm_area_struct *vma,
133f615136cSMax Filippov 		unsigned long start, unsigned long end);
134f615136cSMax Filippov void local_flush_cache_page(struct vm_area_struct *vma,
135f615136cSMax Filippov 		unsigned long address, unsigned long pfn);
136367b8112SChris Zankel 
137367b8112SChris Zankel #else
138367b8112SChris Zankel 
139367b8112SChris Zankel #define flush_cache_all()				do { } while (0)
140367b8112SChris Zankel #define flush_cache_mm(mm)				do { } while (0)
141367b8112SChris Zankel #define flush_cache_dup_mm(mm)				do { } while (0)
142367b8112SChris Zankel 
143367b8112SChris Zankel #define flush_cache_vmap(start,end)			do { } while (0)
144*c4a05cf0SAlexandre Ghiti #define flush_cache_vmap_early(start,end)		do { } while (0)
145367b8112SChris Zankel #define flush_cache_vunmap(start,end)			do { } while (0)
146367b8112SChris Zankel 
14791e08063SChris Zankel #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
148367b8112SChris Zankel #define flush_dcache_page(page)				do { } while (0)
149367b8112SChris Zankel 
150f615136cSMax Filippov #define flush_icache_range local_flush_icache_range
151367b8112SChris Zankel #define flush_cache_page(vma, addr, pfn)		do { } while (0)
152367b8112SChris Zankel #define flush_cache_range(vma, start, end)		do { } while (0)
153367b8112SChris Zankel 
154367b8112SChris Zankel #endif
155367b8112SChris Zankel 
15670cd3444SChristoph Hellwig #define flush_icache_user_range flush_icache_range
15770cd3444SChristoph Hellwig 
158367b8112SChris Zankel /* Ensure consistency between data and instruction cache. */
159f615136cSMax Filippov #define local_flush_icache_range(start, end)				\
160367b8112SChris Zankel 	do {								\
161367b8112SChris Zankel 		__flush_dcache_range(start, (end) - (start));		\
162367b8112SChris Zankel 		__invalidate_icache_range(start,(end) - (start));	\
163367b8112SChris Zankel 	} while (0)
164367b8112SChris Zankel 
165367b8112SChris Zankel #define flush_dcache_mmap_lock(mapping)			do { } while (0)
166367b8112SChris Zankel #define flush_dcache_mmap_unlock(mapping)		do { } while (0)
167367b8112SChris Zankel 
168b6cee17bSMax Filippov #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
169367b8112SChris Zankel 
170367b8112SChris Zankel extern void copy_to_user_page(struct vm_area_struct*, struct page*,
171367b8112SChris Zankel 		unsigned long, void*, const void*, unsigned long);
172367b8112SChris Zankel extern void copy_from_user_page(struct vm_area_struct*, struct page*,
173367b8112SChris Zankel 		unsigned long, void*, const void*, unsigned long);
174367b8112SChris Zankel 
175367b8112SChris Zankel #else
176367b8112SChris Zankel 
177367b8112SChris Zankel #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
178367b8112SChris Zankel 	do {								\
179367b8112SChris Zankel 		memcpy(dst, src, len);					\
180367b8112SChris Zankel 		__flush_dcache_range((unsigned long) dst, len);		\
181367b8112SChris Zankel 		__invalidate_icache_range((unsigned long) dst, len);	\
182367b8112SChris Zankel 	} while (0)
183367b8112SChris Zankel 
184367b8112SChris Zankel #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
185367b8112SChris Zankel 	memcpy(dst, src, len)
186367b8112SChris Zankel 
187367b8112SChris Zankel #endif
188367b8112SChris Zankel 
189367b8112SChris Zankel #endif /* _XTENSA_CACHEFLUSH_H */
190