xref: /openbmc/linux/arch/csky/abiv1/inc/abi/cacheflush.h (revision c4a05cf0)
100a9730eSGuo Ren /* SPDX-License-Identifier: GPL-2.0 */
200a9730eSGuo Ren 
300a9730eSGuo Ren #ifndef __ABI_CSKY_CACHEFLUSH_H
400a9730eSGuo Ren #define __ABI_CSKY_CACHEFLUSH_H
500a9730eSGuo Ren 
64ad35c1fSGuo Ren #include <linux/mm.h>
700a9730eSGuo Ren #include <asm/string.h>
800a9730eSGuo Ren #include <asm/cache.h>
900a9730eSGuo Ren 
1000a9730eSGuo Ren #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1100a9730eSGuo Ren extern void flush_dcache_page(struct page *);
12e724e7aaSMatthew Wilcox (Oracle) void flush_dcache_folio(struct folio *);
13e724e7aaSMatthew Wilcox (Oracle) #define flush_dcache_folio flush_dcache_folio
1400a9730eSGuo Ren 
154ad35c1fSGuo Ren #define flush_cache_mm(mm)			dcache_wbinv_all()
1600a9730eSGuo Ren #define flush_cache_page(vma, page, pfn)	cache_wbinv_all()
1700a9730eSGuo Ren #define flush_cache_dup_mm(mm)			cache_wbinv_all()
1800a9730eSGuo Ren 
194ad35c1fSGuo Ren #define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
204ad35c1fSGuo Ren #define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)
214ad35c1fSGuo Ren 
22f358afc5SChristoph Hellwig #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
flush_kernel_vmap_range(void * addr,int size)234ad35c1fSGuo Ren static inline void flush_kernel_vmap_range(void *addr, int size)
244ad35c1fSGuo Ren {
254ad35c1fSGuo Ren 	dcache_wbinv_all();
264ad35c1fSGuo Ren }
invalidate_kernel_vmap_range(void * addr,int size)274ad35c1fSGuo Ren static inline void invalidate_kernel_vmap_range(void *addr, int size)
284ad35c1fSGuo Ren {
294ad35c1fSGuo Ren 	dcache_wbinv_all();
304ad35c1fSGuo Ren }
314ad35c1fSGuo Ren 
324ad35c1fSGuo Ren #define ARCH_HAS_FLUSH_ANON_PAGE
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long vmaddr)334ad35c1fSGuo Ren static inline void flush_anon_page(struct vm_area_struct *vma,
344ad35c1fSGuo Ren 			 struct page *page, unsigned long vmaddr)
354ad35c1fSGuo Ren {
364ad35c1fSGuo Ren 	if (PageAnon(page))
374ad35c1fSGuo Ren 		cache_wbinv_all();
384ad35c1fSGuo Ren }
394ad35c1fSGuo Ren 
4000a9730eSGuo Ren /*
4100a9730eSGuo Ren  * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken.
4200a9730eSGuo Ren  * Use cache_wbinv_all() here and need to be improved in future.
4300a9730eSGuo Ren  */
444ad35c1fSGuo Ren extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
454ad35c1fSGuo Ren #define flush_cache_vmap(start, end)		cache_wbinv_all()
46*c4a05cf0SAlexandre Ghiti #define flush_cache_vmap_early(start, end)	do { } while (0)
474ad35c1fSGuo Ren #define flush_cache_vunmap(start, end)		cache_wbinv_all()
4800a9730eSGuo Ren 
4900a9730eSGuo Ren #define flush_icache_range(start, end)		cache_wbinv_range(start, end)
50997153b9SGuo Ren #define flush_icache_mm_range(mm, start, end)	cache_wbinv_range(start, end)
51997153b9SGuo Ren #define flush_icache_deferred(mm)		do {} while (0);
5200a9730eSGuo Ren 
5300a9730eSGuo Ren #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
5400a9730eSGuo Ren do { \
5500a9730eSGuo Ren 	memcpy(dst, src, len); \
5600a9730eSGuo Ren } while (0)
5700a9730eSGuo Ren 
5800a9730eSGuo Ren #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
5900a9730eSGuo Ren do { \
6000a9730eSGuo Ren 	memcpy(dst, src, len); \
6100a9730eSGuo Ren 	cache_wbinv_all(); \
6200a9730eSGuo Ren } while (0)
6300a9730eSGuo Ren 
6400a9730eSGuo Ren #endif /* __ABI_CSKY_CACHEFLUSH_H */
65