1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Cache flush operations for the Hexagon architecture
4  *
5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6  */
7 
8 #ifndef _ASM_CACHEFLUSH_H
9 #define _ASM_CACHEFLUSH_H
10 
11 #include <linux/mm_types.h>
12 
13 /* Cache flushing:
14  *
15  *  - flush_cache_all() flushes entire cache
16  *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
17  *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
18  *  - flush_cache_range(vma, start, end) flushes a range of pages
19  *  - flush_icache_range(start, end) flush a range of instructions
20  *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
21  *  - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
22  *
23  *  Need to doublecheck which one is really needed for ptrace stuff to work.
24  */
25 #define LINESIZE	32
26 #define LINEBITS	5
27 
28 #define flush_cache_all()			do { } while (0)
29 #define flush_cache_mm(mm)			do { } while (0)
30 #define flush_cache_dup_mm(mm)			do { } while (0)
31 #define flush_cache_range(vma, start, end)	do { } while (0)
32 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
33 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
34 #define flush_dcache_page(page)			do { } while (0)
35 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
36 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
37 #define flush_icache_page(vma, pg)		do { } while (0)
38 #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
39 #define flush_cache_vmap(start, end)		do { } while (0)
40 #define flush_cache_vunmap(start, end)		do { } while (0)
41 
42 /*
43  * Flush Dcache range through current map.
44  */
45 extern void flush_dcache_range(unsigned long start, unsigned long end);
46 
47 /*
48  * Flush Icache range through current map.
49  */
50 extern void flush_icache_range(unsigned long start, unsigned long end);
51 
52 /*
53  * Memory-management related flushes are there to ensure in non-physically
54  * indexed cache schemes that stale lines belonging to a given ASID aren't
55  * in the cache to confuse things.  The prototype Hexagon Virtual Machine
56  * only uses a single ASID for all user-mode maps, which should
57  * mean that they aren't necessary.  A brute-force, flush-everything
58  * implementation, with the name xxxxx_hexagon() is present in
59  * arch/hexagon/mm/cache.c, but let's not wire it up until we know
60  * it is needed.
61  */
62 extern void flush_cache_all_hexagon(void);
63 
64 /*
65  * This may or may not ever have to be non-null, depending on the
66  * virtual machine MMU.  For a native kernel, it's definitiely  a no-op
67  *
68  * This is also the place where deferred cache coherency stuff seems
69  * to happen, classically...  but instead we do it like ia64 and
70  * clean the cache when the PTE is set.
71  *
72  */
73 static inline void update_mmu_cache(struct vm_area_struct *vma,
74 					unsigned long address, pte_t *ptep)
75 {
76 	/*  generic_ptrace_pokedata doesn't wind up here, does it?  */
77 }
78 
79 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
80 		       unsigned long vaddr, void *dst, void *src, int len);
81 
82 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
83 	memcpy(dst, src, len)
84 
85 extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
86 extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
87 
88 #endif
89