xref: /openbmc/linux/arch/arm/include/asm/mmu_context.h (revision 3e26a691)
1 /*
2  *  arch/arm/include/asm/mmu_context.h
3  *
4  *  Copyright (C) 1996 Russell King.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Changelog:
11  *   27-06-1996	RMK	Created
12  */
13 #ifndef __ASM_ARM_MMU_CONTEXT_H
14 #define __ASM_ARM_MMU_CONTEXT_H
15 
16 #include <linux/compiler.h>
17 #include <linux/sched.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/proc-fns.h>
21 #include <asm/smp_plat.h>
22 #include <asm-generic/mm_hooks.h>
23 
24 void __check_vmalloc_seq(struct mm_struct *mm);
25 
26 #ifdef CONFIG_CPU_HAS_ASID
27 
28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29 static inline int
30 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
31 {
32 	atomic64_set(&mm->context.id, 0);
33 	return 0;
34 }
35 
36 #ifdef CONFIG_ARM_ERRATA_798181
37 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
38 			     cpumask_t *mask);
39 #else  /* !CONFIG_ARM_ERRATA_798181 */
40 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
41 					   cpumask_t *mask)
42 {
43 }
44 #endif /* CONFIG_ARM_ERRATA_798181 */
45 
46 #else	/* !CONFIG_CPU_HAS_ASID */
47 
48 #ifdef CONFIG_MMU
49 
50 static inline void check_and_switch_context(struct mm_struct *mm,
51 					    struct task_struct *tsk)
52 {
53 	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
54 		__check_vmalloc_seq(mm);
55 
56 	if (irqs_disabled())
57 		/*
58 		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
59 		 * high interrupt latencies, defer the call and continue
60 		 * running with the old mm. Since we only support UP systems
61 		 * on non-ASID CPUs, the old mm will remain valid until the
62 		 * finish_arch_post_lock_switch() call.
63 		 */
64 		mm->context.switch_pending = 1;
65 	else
66 		cpu_switch_mm(mm->pgd, mm);
67 }
68 
69 #define finish_arch_post_lock_switch \
70 	finish_arch_post_lock_switch
71 static inline void finish_arch_post_lock_switch(void)
72 {
73 	struct mm_struct *mm = current->mm;
74 
75 	if (mm && mm->context.switch_pending) {
76 		/*
77 		 * Preemption must be disabled during cpu_switch_mm() as we
78 		 * have some stateful cache flush implementations. Check
79 		 * switch_pending again in case we were preempted and the
80 		 * switch to this mm was already done.
81 		 */
82 		preempt_disable();
83 		if (mm->context.switch_pending) {
84 			mm->context.switch_pending = 0;
85 			cpu_switch_mm(mm->pgd, mm);
86 		}
87 		preempt_enable_no_resched();
88 	}
89 }
90 
91 #endif	/* CONFIG_MMU */
92 
93 static inline int
94 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
95 {
96 	return 0;
97 }
98 
99 
100 #endif	/* CONFIG_CPU_HAS_ASID */
101 
102 #define destroy_context(mm)		do { } while(0)
103 #define activate_mm(prev,next)		switch_mm(prev, next, NULL)
104 
105 /*
106  * This is called when "tsk" is about to enter lazy TLB mode.
107  *
108  * mm:  describes the currently active mm context
109  * tsk: task which is entering lazy tlb
110  * cpu: cpu number which is entering lazy tlb
111  *
112  * tsk->mm will be NULL
113  */
114 static inline void
115 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
116 {
117 }
118 
119 /*
120  * This is the actual mm switch as far as the scheduler
121  * is concerned.  No registers are touched.  We avoid
122  * calling the CPU specific function when the mm hasn't
123  * actually changed.
124  */
125 static inline void
126 switch_mm(struct mm_struct *prev, struct mm_struct *next,
127 	  struct task_struct *tsk)
128 {
129 #ifdef CONFIG_MMU
130 	unsigned int cpu = smp_processor_id();
131 
132 	/*
133 	 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
134 	 * so check for possible thread migration and invalidate the I-cache
135 	 * if we're new to this CPU.
136 	 */
137 	if (cache_ops_need_broadcast() &&
138 	    !cpumask_empty(mm_cpumask(next)) &&
139 	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
140 		__flush_icache_all();
141 
142 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
143 		check_and_switch_context(next, tsk);
144 		if (cache_is_vivt())
145 			cpumask_clear_cpu(cpu, mm_cpumask(prev));
146 	}
147 #endif
148 }
149 
150 #define deactivate_mm(tsk,mm)	do { } while (0)
151 
152 #endif
153