1 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 2 #define __ASM_POWERPC_MMU_CONTEXT_H 3 #ifdef __KERNEL__ 4 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/spinlock.h> 9 #include <asm/mmu.h> 10 #include <asm/cputable.h> 11 #include <asm-generic/mm_hooks.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 18 extern void destroy_context(struct mm_struct *mm); 19 20 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); 21 extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); 22 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 23 extern void set_context(unsigned long id, pgd_t *pgd); 24 25 #ifdef CONFIG_PPC_BOOK3S_64 26 extern int __init_new_context(void); 27 extern void __destroy_context(int context_id); 28 static inline void mmu_context_init(void) { } 29 #else 30 extern unsigned long __init_new_context(void); 31 extern void __destroy_context(unsigned long context_id); 32 extern void mmu_context_init(void); 33 #endif 34 35 /* 36 * switch_mm is the entry point called from the architecture independent 37 * code in kernel/sched.c 38 */ 39 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 40 struct task_struct *tsk) 41 { 42 /* Mark this context has been used on the new CPU */ 43 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 44 45 /* 32-bit keeps track of the current PGDIR in the thread struct */ 46 #ifdef CONFIG_PPC32 47 tsk->thread.pgdir = next->pgd; 48 #endif /* CONFIG_PPC32 */ 49 50 /* 64-bit Book3E keeps track of current PGD in the PACA */ 51 #ifdef CONFIG_PPC_BOOK3E_64 52 get_paca()->pgd = next->pgd; 53 #endif 54 /* Nothing else to do if we aren't actually switching */ 55 if (prev == next) 56 return; 57 58 /* We must stop all altivec streams before changing the HW 59 * context 60 */ 61 #ifdef CONFIG_ALTIVEC 62 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 63 asm volatile ("dssall"); 64 #endif /* CONFIG_ALTIVEC */ 65 66 /* The actual HW switching method differs between the various 67 * sub architectures. 68 */ 69 #ifdef CONFIG_PPC_STD_MMU_64 70 if (cpu_has_feature(CPU_FTR_SLB)) 71 switch_slb(tsk, next); 72 else 73 switch_stab(tsk, next); 74 #else 75 /* Out of line for now */ 76 switch_mmu_context(prev, next); 77 #endif 78 79 } 80 81 #define deactivate_mm(tsk,mm) do { } while (0) 82 83 /* 84 * After we have set current->mm to a new value, this activates 85 * the context for the new mm so we see the new mappings. 86 */ 87 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 88 { 89 unsigned long flags; 90 91 local_irq_save(flags); 92 switch_mm(prev, next, current); 93 local_irq_restore(flags); 94 } 95 96 /* We don't currently use enter_lazy_tlb() for anything */ 97 static inline void enter_lazy_tlb(struct mm_struct *mm, 98 struct task_struct *tsk) 99 { 100 /* 64-bit Book3E keeps track of current PGD in the PACA */ 101 #ifdef CONFIG_PPC_BOOK3E_64 102 get_paca()->pgd = NULL; 103 #endif 104 } 105 106 #endif /* __KERNEL__ */ 107 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 108