xref: /openbmc/linux/arch/sh/include/asm/cacheflush.h (revision fd589a8f)
1 #ifndef __ASM_SH_CACHEFLUSH_H
2 #define __ASM_SH_CACHEFLUSH_H
3 
4 #ifdef __KERNEL__
5 
6 #include <linux/mm.h>
7 
8 /*
9  * Cache flushing:
10  *
11  *  - flush_cache_all() flushes entire cache
12  *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
13  *  - flush_cache_dup mm(mm) handles cache flushing when forking
14  *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
15  *  - flush_cache_range(vma, start, end) flushes a range of pages
16  *
17  *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
18  *  - flush_icache_range(start, end) flushes(invalidates) a range for icache
19  *  - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
20  *  - flush_cache_sigtramp(vaddr) flushes the signal trampoline
21  */
22 extern void (*local_flush_cache_all)(void *args);
23 extern void (*local_flush_cache_mm)(void *args);
24 extern void (*local_flush_cache_dup_mm)(void *args);
25 extern void (*local_flush_cache_page)(void *args);
26 extern void (*local_flush_cache_range)(void *args);
27 extern void (*local_flush_dcache_page)(void *args);
28 extern void (*local_flush_icache_range)(void *args);
29 extern void (*local_flush_icache_page)(void *args);
30 extern void (*local_flush_cache_sigtramp)(void *args);
31 
32 static inline void cache_noop(void *args) { }
33 
34 extern void (*__flush_wback_region)(void *start, int size);
35 extern void (*__flush_purge_region)(void *start, int size);
36 extern void (*__flush_invalidate_region)(void *start, int size);
37 
38 extern void flush_cache_all(void);
39 extern void flush_cache_mm(struct mm_struct *mm);
40 extern void flush_cache_dup_mm(struct mm_struct *mm);
41 extern void flush_cache_page(struct vm_area_struct *vma,
42 				unsigned long addr, unsigned long pfn);
43 extern void flush_cache_range(struct vm_area_struct *vma,
44 				 unsigned long start, unsigned long end);
45 extern void flush_dcache_page(struct page *page);
46 extern void flush_icache_range(unsigned long start, unsigned long end);
47 extern void flush_icache_page(struct vm_area_struct *vma,
48 				 struct page *page);
49 extern void flush_cache_sigtramp(unsigned long address);
50 
51 struct flusher_data {
52 	struct vm_area_struct *vma;
53 	unsigned long addr1, addr2;
54 };
55 
56 #define ARCH_HAS_FLUSH_ANON_PAGE
57 extern void __flush_anon_page(struct page *page, unsigned long);
58 
59 static inline void flush_anon_page(struct vm_area_struct *vma,
60 				   struct page *page, unsigned long vmaddr)
61 {
62 	if (boot_cpu_data.dcache.n_aliases && PageAnon(page))
63 		__flush_anon_page(page, vmaddr);
64 }
65 
66 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
67 static inline void flush_kernel_dcache_page(struct page *page)
68 {
69 	flush_dcache_page(page);
70 }
71 
72 extern void copy_to_user_page(struct vm_area_struct *vma,
73 	struct page *page, unsigned long vaddr, void *dst, const void *src,
74 	unsigned long len);
75 
76 extern void copy_from_user_page(struct vm_area_struct *vma,
77 	struct page *page, unsigned long vaddr, void *dst, const void *src,
78 	unsigned long len);
79 
80 #define flush_cache_vmap(start, end)		flush_cache_all()
81 #define flush_cache_vunmap(start, end)		flush_cache_all()
82 
83 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
84 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
85 
86 void kmap_coherent_init(void);
87 void *kmap_coherent(struct page *page, unsigned long addr);
88 void kunmap_coherent(void *kvaddr);
89 
90 #define PG_dcache_dirty	PG_arch_1
91 
92 void cpu_cache_init(void);
93 
94 #endif /* __KERNEL__ */
95 #endif /* __ASM_SH_CACHEFLUSH_H */
96