1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_CACHEFLUSH_H
3 #define _ALPHA_CACHEFLUSH_H
4 
5 #include <linux/mm.h>
6 
7 /* Note that the following two definitions are _highly_ dependent
8    on the contexts in which they are used in the kernel.  I personally
9    think it is criminal how loosely defined these macros are.  */
10 
11 /* We need to flush the kernel's icache after loading modules.  The
12    only other use of this macro is in load_aout_interp which is not
13    used on Alpha.
14 
15    Note that this definition should *not* be used for userspace
16    icache flushing.  While functional, it is _way_ overkill.  The
17    icache is tagged with ASNs and it suffices to allocate a new ASN
18    for the process.  */
19 #ifndef CONFIG_SMP
20 #define flush_icache_range(start, end)		imb()
21 #else
22 #define flush_icache_range(start, end)		smp_imb()
23 extern void smp_imb(void);
24 #endif
25 
26 /* We need to flush the userspace icache after setting breakpoints in
27    ptrace.
28 
29    Instead of indiscriminately using imb, take advantage of the fact
30    that icache entries are tagged with the ASN and load a new mm context.  */
31 /* ??? Ought to use this in arch/alpha/kernel/signal.c too.  */
32 
33 #ifndef CONFIG_SMP
34 #include <linux/sched.h>
35 
36 extern void __load_new_mm_context(struct mm_struct *);
37 static inline void
38 flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
39 			unsigned long addr, int len)
40 {
41 	if (vma->vm_flags & VM_EXEC) {
42 		struct mm_struct *mm = vma->vm_mm;
43 		if (current->active_mm == mm)
44 			__load_new_mm_context(mm);
45 		else
46 			mm->context[smp_processor_id()] = 0;
47 	}
48 }
49 #define flush_icache_user_page flush_icache_user_page
50 #else /* CONFIG_SMP */
51 extern void flush_icache_user_page(struct vm_area_struct *vma,
52 		struct page *page, unsigned long addr, int len);
53 #define flush_icache_user_page flush_icache_user_page
54 #endif /* CONFIG_SMP */
55 
56 /*
57  * Both implementations of flush_icache_user_page flush the entire
58  * address space, so one call, no matter how many pages.
59  */
60 static inline void flush_icache_pages(struct vm_area_struct *vma,
61 		struct page *page, unsigned int nr)
62 {
63 	flush_icache_user_page(vma, page, 0, 0);
64 }
65 #define flush_icache_pages flush_icache_pages
66 
67 #include <asm-generic/cacheflush.h>
68 
69 #endif /* _ALPHA_CACHEFLUSH_H */
70