1b8b572e1SStephen Rothwell /*
2b8b572e1SStephen Rothwell  *  This program is free software; you can redistribute it and/or
3b8b572e1SStephen Rothwell  *  modify it under the terms of the GNU General Public License
4b8b572e1SStephen Rothwell  *  as published by the Free Software Foundation; either version
5b8b572e1SStephen Rothwell  *  2 of the License, or (at your option) any later version.
6b8b572e1SStephen Rothwell  */
7b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHEFLUSH_H
8b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHEFLUSH_H
9b8b572e1SStephen Rothwell 
10b8b572e1SStephen Rothwell #ifdef __KERNEL__
11b8b572e1SStephen Rothwell 
12b8b572e1SStephen Rothwell #include <linux/mm.h>
13b8b572e1SStephen Rothwell #include <asm/cputable.h>
14b8b572e1SStephen Rothwell 
15b8b572e1SStephen Rothwell /*
16b8b572e1SStephen Rothwell  * No cache flushing is required when address mappings are changed,
17b8b572e1SStephen Rothwell  * because the caches on PowerPCs are physically addressed.
18b8b572e1SStephen Rothwell  */
19b8b572e1SStephen Rothwell #define flush_cache_all()			do { } while (0)
20b8b572e1SStephen Rothwell #define flush_cache_mm(mm)			do { } while (0)
21b8b572e1SStephen Rothwell #define flush_cache_dup_mm(mm)			do { } while (0)
22b8b572e1SStephen Rothwell #define flush_cache_range(vma, start, end)	do { } while (0)
23b8b572e1SStephen Rothwell #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
24b8b572e1SStephen Rothwell #define flush_icache_page(vma, page)		do { } while (0)
25b8b572e1SStephen Rothwell #define flush_cache_vmap(start, end)		do { } while (0)
26b8b572e1SStephen Rothwell #define flush_cache_vunmap(start, end)		do { } while (0)
27b8b572e1SStephen Rothwell 
28b8b572e1SStephen Rothwell extern void flush_dcache_page(struct page *page);
29b8b572e1SStephen Rothwell #define flush_dcache_mmap_lock(mapping)		do { } while (0)
30b8b572e1SStephen Rothwell #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
31b8b572e1SStephen Rothwell 
32b8b572e1SStephen Rothwell extern void __flush_icache_range(unsigned long, unsigned long);
33b8b572e1SStephen Rothwell static inline void flush_icache_range(unsigned long start, unsigned long stop)
34b8b572e1SStephen Rothwell {
35b8b572e1SStephen Rothwell 	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
36b8b572e1SStephen Rothwell 		__flush_icache_range(start, stop);
37b8b572e1SStephen Rothwell }
38b8b572e1SStephen Rothwell 
39b8b572e1SStephen Rothwell extern void flush_icache_user_range(struct vm_area_struct *vma,
40b8b572e1SStephen Rothwell 				    struct page *page, unsigned long addr,
41b8b572e1SStephen Rothwell 				    int len);
42b8b572e1SStephen Rothwell extern void __flush_dcache_icache(void *page_va);
43b8b572e1SStephen Rothwell extern void flush_dcache_icache_page(struct page *page);
44b8b572e1SStephen Rothwell #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
45b8b572e1SStephen Rothwell extern void __flush_dcache_icache_phys(unsigned long physaddr);
46b8b572e1SStephen Rothwell #endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
47b8b572e1SStephen Rothwell 
48b8b572e1SStephen Rothwell extern void flush_dcache_range(unsigned long start, unsigned long stop);
49b8b572e1SStephen Rothwell #ifdef CONFIG_PPC32
50b8b572e1SStephen Rothwell extern void clean_dcache_range(unsigned long start, unsigned long stop);
51b8b572e1SStephen Rothwell extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
52b8b572e1SStephen Rothwell #endif /* CONFIG_PPC32 */
53b8b572e1SStephen Rothwell #ifdef CONFIG_PPC64
54b8b572e1SStephen Rothwell extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
55b8b572e1SStephen Rothwell extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
56b8b572e1SStephen Rothwell #endif
57b8b572e1SStephen Rothwell 
58b8b572e1SStephen Rothwell #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
59b8b572e1SStephen Rothwell 	do { \
60b8b572e1SStephen Rothwell 		memcpy(dst, src, len); \
61b8b572e1SStephen Rothwell 		flush_icache_user_range(vma, page, vaddr, len); \
62b8b572e1SStephen Rothwell 	} while (0)
63b8b572e1SStephen Rothwell #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
64b8b572e1SStephen Rothwell 	memcpy(dst, src, len)
65b8b572e1SStephen Rothwell 
66b8b572e1SStephen Rothwell 
67b8b572e1SStephen Rothwell 
68b8b572e1SStephen Rothwell #ifdef CONFIG_DEBUG_PAGEALLOC
69b8b572e1SStephen Rothwell /* internal debugging function */
70b8b572e1SStephen Rothwell void kernel_map_pages(struct page *page, int numpages, int enable);
71b8b572e1SStephen Rothwell #endif
72b8b572e1SStephen Rothwell 
73b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
74b8b572e1SStephen Rothwell 
75b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHEFLUSH_H */
76