1 #ifndef __ASM_POWERPC_MMU_CONTEXT_H 2 #define __ASM_POWERPC_MMU_CONTEXT_H 3 #ifdef __KERNEL__ 4 5 #include <linux/kernel.h> 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/spinlock.h> 9 #include <asm/mmu.h> 10 #include <asm/cputable.h> 11 #include <asm-generic/mm_hooks.h> 12 #include <asm/cputhreads.h> 13 14 /* 15 * Most if the context management is out of line 16 */ 17 extern void mmu_context_init(void); 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 19 extern void destroy_context(struct mm_struct *mm); 20 21 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next); 22 extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm); 23 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); 24 extern void set_context(unsigned long id, pgd_t *pgd); 25 26 /* 27 * switch_mm is the entry point called from the architecture independent 28 * code in kernel/sched.c 29 */ 30 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 31 struct task_struct *tsk) 32 { 33 /* Mark this context has been used on the new CPU */ 34 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 35 36 /* 32-bit keeps track of the current PGDIR in the thread struct */ 37 #ifdef CONFIG_PPC32 38 tsk->thread.pgdir = next->pgd; 39 #endif /* CONFIG_PPC32 */ 40 41 /* Nothing else to do if we aren't actually switching */ 42 if (prev == next) 43 return; 44 45 /* We must stop all altivec streams before changing the HW 46 * context 47 */ 48 #ifdef CONFIG_ALTIVEC 49 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 50 asm volatile ("dssall"); 51 #endif /* CONFIG_ALTIVEC */ 52 53 /* The actual HW switching method differs between the various 54 * sub architectures. 55 */ 56 #ifdef CONFIG_PPC_STD_MMU_64 57 if (cpu_has_feature(CPU_FTR_SLB)) 58 switch_slb(tsk, next); 59 else 60 switch_stab(tsk, next); 61 #else 62 /* Out of line for now */ 63 switch_mmu_context(prev, next); 64 #endif 65 66 } 67 68 #define deactivate_mm(tsk,mm) do { } while (0) 69 70 /* 71 * After we have set current->mm to a new value, this activates 72 * the context for the new mm so we see the new mappings. 73 */ 74 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 75 { 76 unsigned long flags; 77 78 local_irq_save(flags); 79 switch_mm(prev, next, current); 80 local_irq_restore(flags); 81 } 82 83 /* We don't currently use enter_lazy_tlb() for anything */ 84 static inline void enter_lazy_tlb(struct mm_struct *mm, 85 struct task_struct *tsk) 86 { 87 } 88 89 #endif /* __KERNEL__ */ 90 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ 91