1 /*
2  *  This program is free software; you can redistribute it and/or
3  *  modify it under the terms of the GNU General Public License
4  *  as published by the Free Software Foundation; either version
5  *  2 of the License, or (at your option) any later version.
6  */
7 #ifndef _ASM_POWERPC_CACHEFLUSH_H
8 #define _ASM_POWERPC_CACHEFLUSH_H
9 
10 #ifdef __KERNEL__
11 
12 #include <linux/mm.h>
13 #include <asm/cputable.h>
14 #include <asm/cpu_has_feature.h>
15 
16 /*
17  * No cache flushing is required when address mappings are changed,
18  * because the caches on PowerPCs are physically addressed.
19  */
20 #define flush_cache_all()			do { } while (0)
21 #define flush_cache_mm(mm)			do { } while (0)
22 #define flush_cache_dup_mm(mm)			do { } while (0)
23 #define flush_cache_range(vma, start, end)	do { } while (0)
24 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
25 #define flush_icache_page(vma, page)		do { } while (0)
26 #define flush_cache_vunmap(start, end)		do { } while (0)
27 
28 #ifdef CONFIG_PPC_BOOK3S_64
29 /*
30  * Book3s has no ptesync after setting a pte, so without this ptesync it's
31  * possible for a kernel virtual mapping access to return a spurious fault
32  * if it's accessed right after the pte is set. The page fault handler does
33  * not expect this type of fault. flush_cache_vmap is not exactly the right
34  * place to put this, but it seems to work well enough.
35  */
36 #define flush_cache_vmap(start, end)		do { asm volatile("ptesync" ::: "memory"); } while (0)
37 #else
38 #define flush_cache_vmap(start, end)		do { } while (0)
39 #endif
40 
41 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
42 extern void flush_dcache_page(struct page *page);
43 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
44 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
45 
46 extern void flush_icache_range(unsigned long, unsigned long);
47 extern void flush_icache_user_range(struct vm_area_struct *vma,
48 				    struct page *page, unsigned long addr,
49 				    int len);
50 extern void __flush_dcache_icache(void *page_va);
51 extern void flush_dcache_icache_page(struct page *page);
52 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
53 extern void __flush_dcache_icache_phys(unsigned long physaddr);
54 #else
55 static inline void __flush_dcache_icache_phys(unsigned long physaddr)
56 {
57 	BUG();
58 }
59 #endif
60 
61 #ifdef CONFIG_PPC32
62 /*
63  * Write any modified data cache blocks out to memory and invalidate them.
64  * Does not invalidate the corresponding instruction cache blocks.
65  */
66 static inline void flush_dcache_range(unsigned long start, unsigned long stop)
67 {
68 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
69 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
70 	unsigned long i;
71 
72 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
73 		dcbf(addr);
74 	mb();	/* sync */
75 }
76 
77 /*
78  * Write any modified data cache blocks out to memory.
79  * Does not invalidate the corresponding cache lines (especially for
80  * any corresponding instruction cache).
81  */
82 static inline void clean_dcache_range(unsigned long start, unsigned long stop)
83 {
84 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
85 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
86 	unsigned long i;
87 
88 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
89 		dcbst(addr);
90 	mb();	/* sync */
91 }
92 
93 /*
94  * Like above, but invalidate the D-cache.  This is used by the 8xx
95  * to invalidate the cache so the PPC core doesn't get stale data
96  * from the CPM (no cache snooping here :-).
97  */
98 static inline void invalidate_dcache_range(unsigned long start,
99 					   unsigned long stop)
100 {
101 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
102 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
103 	unsigned long i;
104 
105 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
106 		dcbi(addr);
107 	mb();	/* sync */
108 }
109 
110 #endif /* CONFIG_PPC32 */
111 #ifdef CONFIG_PPC64
112 extern void flush_dcache_range(unsigned long start, unsigned long stop);
113 extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
114 #endif
115 
116 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
117 	do { \
118 		memcpy(dst, src, len); \
119 		flush_icache_user_range(vma, page, vaddr, len); \
120 	} while (0)
121 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
122 	memcpy(dst, src, len)
123 
124 #endif /* __KERNEL__ */
125 
126 #endif /* _ASM_POWERPC_CACHEFLUSH_H */
127