1 #ifndef _SPARC64_CACHEFLUSH_H
2 #define _SPARC64_CACHEFLUSH_H
3 
4 #include <asm/page.h>
5 
6 #ifndef __ASSEMBLY__
7 
8 #include <linux/mm.h>
9 
10 /* Cache flush operations. */
11 #define flushw_all()	__asm__ __volatile__("flushw")
12 
13 extern void __flushw_user(void);
14 #define flushw_user() __flushw_user()
15 
16 #define flush_user_windows flushw_user
17 #define flush_register_windows flushw_all
18 
19 /* These are the same regardless of whether this is an SMP kernel or not. */
20 #define flush_cache_mm(__mm) \
21 	do { if ((__mm) == current->mm) flushw_user(); } while(0)
22 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
23 #define flush_cache_range(vma, start, end) \
24 	flush_cache_mm((vma)->vm_mm)
25 #define flush_cache_page(vma, page, pfn) \
26 	flush_cache_mm((vma)->vm_mm)
27 
28 /*
29  * On spitfire, the icache doesn't snoop local stores and we don't
30  * use block commit stores (which invalidate icache lines) during
31  * module load, so we need this.
32  */
33 extern void flush_icache_range(unsigned long start, unsigned long end);
34 extern void __flush_icache_page(unsigned long);
35 
36 extern void __flush_dcache_page(void *addr, int flush_icache);
37 extern void flush_dcache_page_impl(struct page *page);
38 #ifdef CONFIG_SMP
39 extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
40 extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
41 #else
42 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
43 #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
44 #endif
45 
46 extern void __flush_dcache_range(unsigned long start, unsigned long end);
47 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
48 extern void flush_dcache_page(struct page *page);
49 
50 #define flush_icache_page(vma, pg)	do { } while(0)
51 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
52 
53 extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
54 				unsigned long uaddr, void *kaddr,
55 				unsigned long len, int write);
56 
57 #define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
58 	do {								\
59 		flush_cache_page(vma, vaddr, page_to_pfn(page));	\
60 		memcpy(dst, src, len);					\
61 		flush_ptrace_access(vma, page, vaddr, src, len, 0);	\
62 	} while (0)
63 
64 #define copy_from_user_page(vma, page, vaddr, dst, src, len) 		\
65 	do {								\
66 		flush_cache_page(vma, vaddr, page_to_pfn(page));	\
67 		memcpy(dst, src, len);					\
68 		flush_ptrace_access(vma, page, vaddr, dst, len, 1);	\
69 	} while (0)
70 
71 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
72 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
73 
74 #define flush_cache_vmap(start, end)		do { } while (0)
75 #define flush_cache_vunmap(start, end)		do { } while (0)
76 
77 #ifdef CONFIG_DEBUG_PAGEALLOC
78 /* internal debugging function */
79 void kernel_map_pages(struct page *page, int numpages, int enable);
80 #endif
81 
82 #endif /* !__ASSEMBLY__ */
83 
84 #endif /* _SPARC64_CACHEFLUSH_H */
85