1 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
2 #define __ASM_POWERPC_MMU_CONTEXT_H
3 #ifdef __KERNEL__
4 
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/spinlock.h>
9 #include <asm/mmu.h>
10 #include <asm/cputable.h>
11 #include <asm/cputhreads.h>
12 
13 /*
14  * Most if the context management is out of line
15  */
16 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
17 extern void destroy_context(struct mm_struct *mm);
18 #ifdef CONFIG_SPAPR_TCE_IOMMU
19 struct mm_iommu_table_group_mem_t;
20 
21 extern int isolate_lru_page(struct page *page);	/* from internal.h */
22 extern bool mm_iommu_preregistered(struct mm_struct *mm);
23 extern long mm_iommu_get(struct mm_struct *mm,
24 		unsigned long ua, unsigned long entries,
25 		struct mm_iommu_table_group_mem_t **pmem);
26 extern long mm_iommu_put(struct mm_struct *mm,
27 		struct mm_iommu_table_group_mem_t *mem);
28 extern void mm_iommu_init(struct mm_struct *mm);
29 extern void mm_iommu_cleanup(struct mm_struct *mm);
30 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
31 		unsigned long ua, unsigned long size);
32 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
33 		struct mm_struct *mm, unsigned long ua, unsigned long size);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
35 		unsigned long ua, unsigned long entries);
36 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
37 		unsigned long ua, unsigned long *hpa);
38 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
39 		unsigned long ua, unsigned long *hpa);
40 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
41 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
42 #endif
43 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
44 extern void set_context(unsigned long id, pgd_t *pgd);
45 
46 #ifdef CONFIG_PPC_BOOK3S_64
47 extern void radix__switch_mmu_context(struct mm_struct *prev,
48 				      struct mm_struct *next);
49 static inline void switch_mmu_context(struct mm_struct *prev,
50 				      struct mm_struct *next,
51 				      struct task_struct *tsk)
52 {
53 	if (radix_enabled())
54 		return radix__switch_mmu_context(prev, next);
55 	return switch_slb(tsk, next);
56 }
57 
58 extern int hash__alloc_context_id(void);
59 extern void hash__reserve_context_id(int id);
60 extern void __destroy_context(int context_id);
61 static inline void mmu_context_init(void) { }
62 #else
63 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
64 			       struct task_struct *tsk);
65 extern unsigned long __init_new_context(void);
66 extern void __destroy_context(unsigned long context_id);
67 extern void mmu_context_init(void);
68 #endif
69 
70 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
71 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
72 #else
73 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
74 #endif
75 
76 extern void switch_cop(struct mm_struct *next);
77 extern int use_cop(unsigned long acop, struct mm_struct *mm);
78 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
79 
80 /*
81  * switch_mm is the entry point called from the architecture independent
82  * code in kernel/sched/core.c
83  */
84 static inline void switch_mm_irqs_off(struct mm_struct *prev,
85 				      struct mm_struct *next,
86 				      struct task_struct *tsk)
87 {
88 	bool new_on_cpu = false;
89 
90 	/* Mark this context has been used on the new CPU */
91 	if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
92 		cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
93 		new_on_cpu = true;
94 	}
95 
96 	/* 32-bit keeps track of the current PGDIR in the thread struct */
97 #ifdef CONFIG_PPC32
98 	tsk->thread.pgdir = next->pgd;
99 #endif /* CONFIG_PPC32 */
100 
101 	/* 64-bit Book3E keeps track of current PGD in the PACA */
102 #ifdef CONFIG_PPC_BOOK3E_64
103 	get_paca()->pgd = next->pgd;
104 #endif
105 	/* Nothing else to do if we aren't actually switching */
106 	if (prev == next)
107 		return;
108 
109 #ifdef CONFIG_PPC_ICSWX
110 	/* Switch coprocessor context only if prev or next uses a coprocessor */
111 	if (prev->context.acop || next->context.acop)
112 		switch_cop(next);
113 #endif /* CONFIG_PPC_ICSWX */
114 
115 	/* We must stop all altivec streams before changing the HW
116 	 * context
117 	 */
118 #ifdef CONFIG_ALTIVEC
119 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
120 		asm volatile ("dssall");
121 #endif /* CONFIG_ALTIVEC */
122 
123 	if (new_on_cpu)
124 		radix_kvm_prefetch_workaround(next);
125 
126 	/*
127 	 * The actual HW switching method differs between the various
128 	 * sub architectures. Out of line for now
129 	 */
130 	switch_mmu_context(prev, next, tsk);
131 }
132 
133 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
134 			     struct task_struct *tsk)
135 {
136 	unsigned long flags;
137 
138 	local_irq_save(flags);
139 	switch_mm_irqs_off(prev, next, tsk);
140 	local_irq_restore(flags);
141 }
142 #define switch_mm_irqs_off switch_mm_irqs_off
143 
144 
145 #define deactivate_mm(tsk,mm)	do { } while (0)
146 
147 /*
148  * After we have set current->mm to a new value, this activates
149  * the context for the new mm so we see the new mappings.
150  */
151 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
152 {
153 	unsigned long flags;
154 
155 	local_irq_save(flags);
156 	switch_mm(prev, next, current);
157 	local_irq_restore(flags);
158 }
159 
160 /* We don't currently use enter_lazy_tlb() for anything */
161 static inline void enter_lazy_tlb(struct mm_struct *mm,
162 				  struct task_struct *tsk)
163 {
164 	/* 64-bit Book3E keeps track of current PGD in the PACA */
165 #ifdef CONFIG_PPC_BOOK3E_64
166 	get_paca()->pgd = NULL;
167 #endif
168 }
169 
170 static inline void arch_dup_mmap(struct mm_struct *oldmm,
171 				 struct mm_struct *mm)
172 {
173 }
174 
175 static inline void arch_exit_mmap(struct mm_struct *mm)
176 {
177 }
178 
179 static inline void arch_unmap(struct mm_struct *mm,
180 			      struct vm_area_struct *vma,
181 			      unsigned long start, unsigned long end)
182 {
183 	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
184 		mm->context.vdso_base = 0;
185 }
186 
187 static inline void arch_bprm_mm_init(struct mm_struct *mm,
188 				     struct vm_area_struct *vma)
189 {
190 }
191 
192 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
193 		bool write, bool execute, bool foreign)
194 {
195 	/* by default, allow everything */
196 	return true;
197 }
198 #endif /* __KERNEL__ */
199 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
200