1b8b572e1SStephen Rothwell /*
2b8b572e1SStephen Rothwell  *  This program is free software; you can redistribute it and/or
3b8b572e1SStephen Rothwell  *  modify it under the terms of the GNU General Public License
4b8b572e1SStephen Rothwell  *  as published by the Free Software Foundation; either version
5b8b572e1SStephen Rothwell  *  2 of the License, or (at your option) any later version.
6b8b572e1SStephen Rothwell  */
7b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHEFLUSH_H
8b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHEFLUSH_H
9b8b572e1SStephen Rothwell 
10b8b572e1SStephen Rothwell #ifdef __KERNEL__
11b8b572e1SStephen Rothwell 
12b8b572e1SStephen Rothwell #include <linux/mm.h>
13b8b572e1SStephen Rothwell #include <asm/cputable.h>
14b92a226eSKevin Hao #include <asm/cpu_has_feature.h>
15b8b572e1SStephen Rothwell 
16b8b572e1SStephen Rothwell /*
17b8b572e1SStephen Rothwell  * No cache flushing is required when address mappings are changed,
18b8b572e1SStephen Rothwell  * because the caches on PowerPCs are physically addressed.
19b8b572e1SStephen Rothwell  */
20b8b572e1SStephen Rothwell #define flush_cache_all()			do { } while (0)
21b8b572e1SStephen Rothwell #define flush_cache_mm(mm)			do { } while (0)
22b8b572e1SStephen Rothwell #define flush_cache_dup_mm(mm)			do { } while (0)
23b8b572e1SStephen Rothwell #define flush_cache_range(vma, start, end)	do { } while (0)
24b8b572e1SStephen Rothwell #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
25b8b572e1SStephen Rothwell #define flush_icache_page(vma, page)		do { } while (0)
26b8b572e1SStephen Rothwell #define flush_cache_vunmap(start, end)		do { } while (0)
27b8b572e1SStephen Rothwell 
28ff5bc793SNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
29f1cb8f9bSNicholas Piggin /*
30f1cb8f9bSNicholas Piggin  * Book3s has no ptesync after setting a pte, so without this ptesync it's
31f1cb8f9bSNicholas Piggin  * possible for a kernel virtual mapping access to return a spurious fault
32f1cb8f9bSNicholas Piggin  * if it's accessed right after the pte is set. The page fault handler does
33f1cb8f9bSNicholas Piggin  * not expect this type of fault. flush_cache_vmap is not exactly the right
34f1cb8f9bSNicholas Piggin  * place to put this, but it seems to work well enough.
35f1cb8f9bSNicholas Piggin  */
36ff5bc793SNicholas Piggin #define flush_cache_vmap(start, end)		do { asm volatile("ptesync" ::: "memory"); } while (0)
37f1cb8f9bSNicholas Piggin #else
38f1cb8f9bSNicholas Piggin #define flush_cache_vmap(start, end)		do { } while (0)
39f1cb8f9bSNicholas Piggin #endif
40f1cb8f9bSNicholas Piggin 
412d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
42b8b572e1SStephen Rothwell extern void flush_dcache_page(struct page *page);
43b8b572e1SStephen Rothwell #define flush_dcache_mmap_lock(mapping)		do { } while (0)
44b8b572e1SStephen Rothwell #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
45b8b572e1SStephen Rothwell 
463b04c300SKevin Hao extern void flush_icache_range(unsigned long, unsigned long);
47b8b572e1SStephen Rothwell extern void flush_icache_user_range(struct vm_area_struct *vma,
48b8b572e1SStephen Rothwell 				    struct page *page, unsigned long addr,
49b8b572e1SStephen Rothwell 				    int len);
50b8b572e1SStephen Rothwell extern void __flush_dcache_icache(void *page_va);
51b8b572e1SStephen Rothwell extern void flush_dcache_icache_page(struct page *page);
52b8b572e1SStephen Rothwell #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
53b8b572e1SStephen Rothwell extern void __flush_dcache_icache_phys(unsigned long physaddr);
542f7d2b74SScott Wood #else
552f7d2b74SScott Wood static inline void __flush_dcache_icache_phys(unsigned long physaddr)
562f7d2b74SScott Wood {
572f7d2b74SScott Wood 	BUG();
582f7d2b74SScott Wood }
592f7d2b74SScott Wood #endif
60b8b572e1SStephen Rothwell 
61b8b572e1SStephen Rothwell #ifdef CONFIG_PPC32
62affe587bSChristophe Leroy /*
63affe587bSChristophe Leroy  * Write any modified data cache blocks out to memory and invalidate them.
64affe587bSChristophe Leroy  * Does not invalidate the corresponding instruction cache blocks.
65affe587bSChristophe Leroy  */
66affe587bSChristophe Leroy static inline void flush_dcache_range(unsigned long start, unsigned long stop)
67affe587bSChristophe Leroy {
68affe587bSChristophe Leroy 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
69affe587bSChristophe Leroy 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
70affe587bSChristophe Leroy 	unsigned long i;
71affe587bSChristophe Leroy 
72affe587bSChristophe Leroy 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
73affe587bSChristophe Leroy 		dcbf(addr);
74affe587bSChristophe Leroy 	mb();	/* sync */
75affe587bSChristophe Leroy }
76affe587bSChristophe Leroy 
77affe587bSChristophe Leroy /*
78affe587bSChristophe Leroy  * Write any modified data cache blocks out to memory.
79affe587bSChristophe Leroy  * Does not invalidate the corresponding cache lines (especially for
80affe587bSChristophe Leroy  * any corresponding instruction cache).
81affe587bSChristophe Leroy  */
82affe587bSChristophe Leroy static inline void clean_dcache_range(unsigned long start, unsigned long stop)
83affe587bSChristophe Leroy {
84affe587bSChristophe Leroy 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
85affe587bSChristophe Leroy 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
86affe587bSChristophe Leroy 	unsigned long i;
87affe587bSChristophe Leroy 
88affe587bSChristophe Leroy 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
89affe587bSChristophe Leroy 		dcbst(addr);
90affe587bSChristophe Leroy 	mb();	/* sync */
91affe587bSChristophe Leroy }
92affe587bSChristophe Leroy 
93affe587bSChristophe Leroy /*
94affe587bSChristophe Leroy  * Like above, but invalidate the D-cache.  This is used by the 8xx
95affe587bSChristophe Leroy  * to invalidate the cache so the PPC core doesn't get stale data
96affe587bSChristophe Leroy  * from the CPM (no cache snooping here :-).
97affe587bSChristophe Leroy  */
98affe587bSChristophe Leroy static inline void invalidate_dcache_range(unsigned long start,
99affe587bSChristophe Leroy 					   unsigned long stop)
100affe587bSChristophe Leroy {
101affe587bSChristophe Leroy 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
102affe587bSChristophe Leroy 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
103affe587bSChristophe Leroy 	unsigned long i;
104affe587bSChristophe Leroy 
105affe587bSChristophe Leroy 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
106affe587bSChristophe Leroy 		dcbi(addr);
107affe587bSChristophe Leroy 	mb();	/* sync */
108affe587bSChristophe Leroy }
109affe587bSChristophe Leroy 
110b8b572e1SStephen Rothwell #endif /* CONFIG_PPC32 */
111b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
112affe587bSChristophe Leroy extern void flush_dcache_range(unsigned long start, unsigned long stop);
113b8b572e1SStephen Rothwell extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
114b8b572e1SStephen Rothwell #endif
115b8b572e1SStephen Rothwell 
116b8b572e1SStephen Rothwell #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
117b8b572e1SStephen Rothwell 	do { \
118b8b572e1SStephen Rothwell 		memcpy(dst, src, len); \
119b8b572e1SStephen Rothwell 		flush_icache_user_range(vma, page, vaddr, len); \
120b8b572e1SStephen Rothwell 	} while (0)
121b8b572e1SStephen Rothwell #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
122b8b572e1SStephen Rothwell 	memcpy(dst, src, len)
123b8b572e1SStephen Rothwell 
124b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
125b8b572e1SStephen Rothwell 
126b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHEFLUSH_H */
127