100a9730eSGuo Ren /* SPDX-License-Identifier: GPL-2.0 */ 200a9730eSGuo Ren 300a9730eSGuo Ren #ifndef __ABI_CSKY_CACHEFLUSH_H 400a9730eSGuo Ren #define __ABI_CSKY_CACHEFLUSH_H 500a9730eSGuo Ren 64ad35c1fSGuo Ren #include <linux/mm.h> 700a9730eSGuo Ren #include <asm/string.h> 800a9730eSGuo Ren #include <asm/cache.h> 900a9730eSGuo Ren 1000a9730eSGuo Ren #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 1100a9730eSGuo Ren extern void flush_dcache_page(struct page *); 1200a9730eSGuo Ren 134ad35c1fSGuo Ren #define flush_cache_mm(mm) dcache_wbinv_all() 1400a9730eSGuo Ren #define flush_cache_page(vma, page, pfn) cache_wbinv_all() 1500a9730eSGuo Ren #define flush_cache_dup_mm(mm) cache_wbinv_all() 1600a9730eSGuo Ren 174ad35c1fSGuo Ren #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 184ad35c1fSGuo Ren #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 194ad35c1fSGuo Ren 20*f358afc5SChristoph Hellwig #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 214ad35c1fSGuo Ren static inline void flush_kernel_vmap_range(void *addr, int size) 224ad35c1fSGuo Ren { 234ad35c1fSGuo Ren dcache_wbinv_all(); 244ad35c1fSGuo Ren } 254ad35c1fSGuo Ren static inline void invalidate_kernel_vmap_range(void *addr, int size) 264ad35c1fSGuo Ren { 274ad35c1fSGuo Ren dcache_wbinv_all(); 284ad35c1fSGuo Ren } 294ad35c1fSGuo Ren 304ad35c1fSGuo Ren #define ARCH_HAS_FLUSH_ANON_PAGE 314ad35c1fSGuo Ren static inline void flush_anon_page(struct vm_area_struct *vma, 324ad35c1fSGuo Ren struct page *page, unsigned long vmaddr) 334ad35c1fSGuo Ren { 344ad35c1fSGuo Ren if (PageAnon(page)) 354ad35c1fSGuo Ren cache_wbinv_all(); 364ad35c1fSGuo Ren } 374ad35c1fSGuo Ren 3800a9730eSGuo Ren /* 3900a9730eSGuo Ren * if (current_mm != vma->mm) cache_wbinv_range(start, end) will be broken. 4000a9730eSGuo Ren * Use cache_wbinv_all() here and need to be improved in future. 4100a9730eSGuo Ren */ 424ad35c1fSGuo Ren extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 434ad35c1fSGuo Ren #define flush_cache_vmap(start, end) cache_wbinv_all() 444ad35c1fSGuo Ren #define flush_cache_vunmap(start, end) cache_wbinv_all() 4500a9730eSGuo Ren 464ad35c1fSGuo Ren #define flush_icache_page(vma, page) do {} while (0); 4700a9730eSGuo Ren #define flush_icache_range(start, end) cache_wbinv_range(start, end) 48997153b9SGuo Ren #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) 49997153b9SGuo Ren #define flush_icache_deferred(mm) do {} while (0); 5000a9730eSGuo Ren 5100a9730eSGuo Ren #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 5200a9730eSGuo Ren do { \ 5300a9730eSGuo Ren memcpy(dst, src, len); \ 5400a9730eSGuo Ren } while (0) 5500a9730eSGuo Ren 5600a9730eSGuo Ren #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 5700a9730eSGuo Ren do { \ 5800a9730eSGuo Ren memcpy(dst, src, len); \ 5900a9730eSGuo Ren cache_wbinv_all(); \ 6000a9730eSGuo Ren } while (0) 6100a9730eSGuo Ren 6200a9730eSGuo Ren #endif /* __ABI_CSKY_CACHEFLUSH_H */ 63