1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
3 
4 #include <linux/mm.h>
5 #include <linux/uaccess.h>
6 
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8  * Unfortunately, that doesn't apply to PA-RISC. */
9 
10 /* Internal implementation */
11 void flush_data_cache_local(void *);  /* flushes local data-cache only */
12 void flush_instruction_cache_local(void *); /* flushes local code-cache only */
13 #ifdef CONFIG_SMP
14 void flush_data_cache(void); /* flushes data-cache only (all processors) */
15 void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
16 #else
17 #define flush_data_cache() flush_data_cache_local(NULL)
18 #define flush_instruction_cache() flush_instruction_cache_local(NULL)
19 #endif
20 
21 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
22 
23 void flush_user_icache_range_asm(unsigned long, unsigned long);
24 void flush_kernel_icache_range_asm(unsigned long, unsigned long);
25 void flush_user_dcache_range_asm(unsigned long, unsigned long);
26 void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
27 void flush_kernel_dcache_page_asm(void *);
28 void flush_kernel_icache_page(void *);
29 void flush_user_dcache_page(unsigned long);
30 void flush_user_icache_page(unsigned long);
31 void flush_user_dcache_range(unsigned long, unsigned long);
32 void flush_user_icache_range(unsigned long, unsigned long);
33 
34 /* Cache flush operations */
35 
36 void flush_cache_all_local(void);
37 void flush_cache_all(void);
38 void flush_cache_mm(struct mm_struct *mm);
39 
40 #define flush_kernel_dcache_range(start,size) \
41 	flush_kernel_dcache_range_asm((start), (start)+(size));
42 /* vmap range flushes and invalidates.  Architecturally, we don't need
43  * the invalidate, because the CPU should refuse to speculate once an
44  * area has been flushed, so invalidate is left empty */
45 static inline void flush_kernel_vmap_range(void *vaddr, int size)
46 {
47 	unsigned long start = (unsigned long)vaddr;
48 
49 	flush_kernel_dcache_range_asm(start, start + size);
50 }
51 static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
52 {
53 }
54 
55 #define flush_cache_vmap(start, end)		flush_cache_all()
56 #define flush_cache_vunmap(start, end)		flush_cache_all()
57 
58 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
59 extern void flush_dcache_page(struct page *page);
60 
61 #define flush_dcache_mmap_lock(mapping) \
62 	spin_lock_irq(&(mapping)->tree_lock)
63 #define flush_dcache_mmap_unlock(mapping) \
64 	spin_unlock_irq(&(mapping)->tree_lock)
65 
66 #define flush_icache_page(vma,page)	do { 		\
67 	flush_kernel_dcache_page(page);			\
68 	flush_kernel_icache_page(page_address(page)); 	\
69 } while (0)
70 
71 #define flush_icache_range(s,e)		do { 		\
72 	flush_kernel_dcache_range_asm(s,e); 		\
73 	flush_kernel_icache_range_asm(s,e); 		\
74 } while (0)
75 
76 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
77 do { \
78 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
79 	memcpy(dst, src, len); \
80 	flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
81 } while (0)
82 
83 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
84 do { \
85 	flush_cache_page(vma, vaddr, page_to_pfn(page)); \
86 	memcpy(dst, src, len); \
87 } while (0)
88 
89 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
90 void flush_cache_range(struct vm_area_struct *vma,
91 		unsigned long start, unsigned long end);
92 
93 #define ARCH_HAS_FLUSH_ANON_PAGE
94 static inline void
95 flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
96 {
97 	if (PageAnon(page))
98 		flush_user_dcache_page(vmaddr);
99 }
100 
101 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
102 void flush_kernel_dcache_page_addr(void *addr);
103 static inline void flush_kernel_dcache_page(struct page *page)
104 {
105 	flush_kernel_dcache_page_addr(page_address(page));
106 }
107 
108 #ifdef CONFIG_DEBUG_RODATA
109 void mark_rodata_ro(void);
110 #endif
111 
112 #ifdef CONFIG_PA8X00
113 /* Only pa8800, pa8900 needs this */
114 
115 #include <asm/kmap_types.h>
116 
117 #define ARCH_HAS_KMAP
118 
119 void kunmap_parisc(void *addr);
120 
121 static inline void *kmap(struct page *page)
122 {
123 	might_sleep();
124 	return page_address(page);
125 }
126 
127 #define kunmap(page)			kunmap_parisc(page_address(page))
128 
129 static inline void *__kmap_atomic(struct page *page)
130 {
131 	pagefault_disable();
132 	return page_address(page);
133 }
134 
135 static inline void __kunmap_atomic(void *addr)
136 {
137 	kunmap_parisc(addr);
138 	pagefault_enable();
139 }
140 
141 #define kmap_atomic_prot(page, prot)	kmap_atomic(page)
142 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
143 #define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
144 #endif
145 
146 #endif /* _PARISC_CACHEFLUSH_H */
147 
148