12874c5fdSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2b8b572e1SStephen Rothwell /*
3b8b572e1SStephen Rothwell  */
4b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHEFLUSH_H
5b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHEFLUSH_H
6b8b572e1SStephen Rothwell 
7b8b572e1SStephen Rothwell #ifdef __KERNEL__
8b8b572e1SStephen Rothwell 
9b8b572e1SStephen Rothwell #include <linux/mm.h>
10b8b572e1SStephen Rothwell #include <asm/cputable.h>
11b8b572e1SStephen Rothwell 
12b8b572e1SStephen Rothwell /*
13b8b572e1SStephen Rothwell  * No cache flushing is required when address mappings are changed,
14b8b572e1SStephen Rothwell  * because the caches on PowerPCs are physically addressed.
15b8b572e1SStephen Rothwell  */
16b8b572e1SStephen Rothwell #define flush_cache_all()			do { } while (0)
17b8b572e1SStephen Rothwell #define flush_cache_mm(mm)			do { } while (0)
18b8b572e1SStephen Rothwell #define flush_cache_dup_mm(mm)			do { } while (0)
19b8b572e1SStephen Rothwell #define flush_cache_range(vma, start, end)	do { } while (0)
20b8b572e1SStephen Rothwell #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
21b8b572e1SStephen Rothwell #define flush_icache_page(vma, page)		do { } while (0)
22b8b572e1SStephen Rothwell #define flush_cache_vunmap(start, end)		do { } while (0)
23b8b572e1SStephen Rothwell 
24ff5bc793SNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
25f1cb8f9bSNicholas Piggin /*
26f1cb8f9bSNicholas Piggin  * Book3s has no ptesync after setting a pte, so without this ptesync it's
27f1cb8f9bSNicholas Piggin  * possible for a kernel virtual mapping access to return a spurious fault
28f1cb8f9bSNicholas Piggin  * if it's accessed right after the pte is set. The page fault handler does
29f1cb8f9bSNicholas Piggin  * not expect this type of fault. flush_cache_vmap is not exactly the right
30f1cb8f9bSNicholas Piggin  * place to put this, but it seems to work well enough.
31f1cb8f9bSNicholas Piggin  */
32ff5bc793SNicholas Piggin #define flush_cache_vmap(start, end)		do { asm volatile("ptesync" ::: "memory"); } while (0)
33f1cb8f9bSNicholas Piggin #else
34f1cb8f9bSNicholas Piggin #define flush_cache_vmap(start, end)		do { } while (0)
35f1cb8f9bSNicholas Piggin #endif
36f1cb8f9bSNicholas Piggin 
372d4dc890SIlya Loginov #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
38b8b572e1SStephen Rothwell extern void flush_dcache_page(struct page *page);
39b8b572e1SStephen Rothwell #define flush_dcache_mmap_lock(mapping)		do { } while (0)
40b8b572e1SStephen Rothwell #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
41b8b572e1SStephen Rothwell 
423b04c300SKevin Hao extern void flush_icache_range(unsigned long, unsigned long);
43b8b572e1SStephen Rothwell extern void flush_icache_user_range(struct vm_area_struct *vma,
44b8b572e1SStephen Rothwell 				    struct page *page, unsigned long addr,
45b8b572e1SStephen Rothwell 				    int len);
46b8b572e1SStephen Rothwell extern void __flush_dcache_icache(void *page_va);
47b8b572e1SStephen Rothwell extern void flush_dcache_icache_page(struct page *page);
48b8b572e1SStephen Rothwell #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
49b8b572e1SStephen Rothwell extern void __flush_dcache_icache_phys(unsigned long physaddr);
502f7d2b74SScott Wood #else
512f7d2b74SScott Wood static inline void __flush_dcache_icache_phys(unsigned long physaddr)
522f7d2b74SScott Wood {
532f7d2b74SScott Wood 	BUG();
542f7d2b74SScott Wood }
552f7d2b74SScott Wood #endif
56b8b572e1SStephen Rothwell 
57b8b572e1SStephen Rothwell #ifdef CONFIG_PPC32
58affe587bSChristophe Leroy /*
59affe587bSChristophe Leroy  * Write any modified data cache blocks out to memory and invalidate them.
60affe587bSChristophe Leroy  * Does not invalidate the corresponding instruction cache blocks.
61affe587bSChristophe Leroy  */
62affe587bSChristophe Leroy static inline void flush_dcache_range(unsigned long start, unsigned long stop)
63affe587bSChristophe Leroy {
64affe587bSChristophe Leroy 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
65affe587bSChristophe Leroy 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
66affe587bSChristophe Leroy 	unsigned long i;
67affe587bSChristophe Leroy 
68affe587bSChristophe Leroy 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
69affe587bSChristophe Leroy 		dcbf(addr);
70affe587bSChristophe Leroy 	mb();	/* sync */
71affe587bSChristophe Leroy }
72affe587bSChristophe Leroy 
73affe587bSChristophe Leroy /*
74affe587bSChristophe Leroy  * Write any modified data cache blocks out to memory.
75affe587bSChristophe Leroy  * Does not invalidate the corresponding cache lines (especially for
76affe587bSChristophe Leroy  * any corresponding instruction cache).
77affe587bSChristophe Leroy  */
78affe587bSChristophe Leroy static inline void clean_dcache_range(unsigned long start, unsigned long stop)
79affe587bSChristophe Leroy {
80affe587bSChristophe Leroy 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
81affe587bSChristophe Leroy 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
82affe587bSChristophe Leroy 	unsigned long i;
83affe587bSChristophe Leroy 
84affe587bSChristophe Leroy 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
85affe587bSChristophe Leroy 		dcbst(addr);
86affe587bSChristophe Leroy 	mb();	/* sync */
87affe587bSChristophe Leroy }
88affe587bSChristophe Leroy 
89affe587bSChristophe Leroy /*
90affe587bSChristophe Leroy  * Like above, but invalidate the D-cache.  This is used by the 8xx
91affe587bSChristophe Leroy  * to invalidate the cache so the PPC core doesn't get stale data
92affe587bSChristophe Leroy  * from the CPM (no cache snooping here :-).
93affe587bSChristophe Leroy  */
94affe587bSChristophe Leroy static inline void invalidate_dcache_range(unsigned long start,
95affe587bSChristophe Leroy 					   unsigned long stop)
96affe587bSChristophe Leroy {
97affe587bSChristophe Leroy 	void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1));
98affe587bSChristophe Leroy 	unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1);
99affe587bSChristophe Leroy 	unsigned long i;
100affe587bSChristophe Leroy 
101affe587bSChristophe Leroy 	for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES)
102affe587bSChristophe Leroy 		dcbi(addr);
103affe587bSChristophe Leroy 	mb();	/* sync */
104affe587bSChristophe Leroy }
105affe587bSChristophe Leroy 
106b8b572e1SStephen Rothwell #endif /* CONFIG_PPC32 */
107b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
108affe587bSChristophe Leroy extern void flush_dcache_range(unsigned long start, unsigned long stop);
109b8b572e1SStephen Rothwell extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
110b8b572e1SStephen Rothwell #endif
111b8b572e1SStephen Rothwell 
112b8b572e1SStephen Rothwell #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
113b8b572e1SStephen Rothwell 	do { \
114b8b572e1SStephen Rothwell 		memcpy(dst, src, len); \
115b8b572e1SStephen Rothwell 		flush_icache_user_range(vma, page, vaddr, len); \
116b8b572e1SStephen Rothwell 	} while (0)
117b8b572e1SStephen Rothwell #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
118b8b572e1SStephen Rothwell 	memcpy(dst, src, len)
119b8b572e1SStephen Rothwell 
120b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
121b8b572e1SStephen Rothwell 
122b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHEFLUSH_H */
123