1 #ifndef __PARISC_MMU_CONTEXT_H 2 #define __PARISC_MMU_CONTEXT_H 3 4 #include <linux/mm.h> 5 #include <linux/sched.h> 6 #include <linux/atomic.h> 7 #include <asm/pgalloc.h> 8 #include <asm/pgtable.h> 9 #include <asm-generic/mm_hooks.h> 10 11 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 12 { 13 } 14 15 /* on PA-RISC, we actually have enough contexts to justify an allocator 16 * for them. prumpf */ 17 18 extern unsigned long alloc_sid(void); 19 extern void free_sid(unsigned long); 20 21 static inline int 22 init_new_context(struct task_struct *tsk, struct mm_struct *mm) 23 { 24 BUG_ON(atomic_read(&mm->mm_users) != 1); 25 26 mm->context = alloc_sid(); 27 return 0; 28 } 29 30 static inline void 31 destroy_context(struct mm_struct *mm) 32 { 33 free_sid(mm->context); 34 mm->context = 0; 35 } 36 37 static inline unsigned long __space_to_prot(mm_context_t context) 38 { 39 #if SPACEID_SHIFT == 0 40 return context << 1; 41 #else 42 return context >> (SPACEID_SHIFT - 1); 43 #endif 44 } 45 46 static inline void load_context(mm_context_t context) 47 { 48 mtsp(context, 3); 49 mtctl(__space_to_prot(context), 8); 50 } 51 52 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) 53 { 54 55 if (prev != next) { 56 mtctl(__pa(next->pgd), 25); 57 load_context(next->context); 58 } 59 } 60 61 #define deactivate_mm(tsk,mm) do { } while (0) 62 63 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 64 { 65 /* 66 * Activate_mm is our one chance to allocate a space id 67 * for a new mm created in the exec path. There's also 68 * some lazy tlb stuff, which is currently dead code, but 69 * we only allocate a space id if one hasn't been allocated 70 * already, so we should be OK. 71 */ 72 73 BUG_ON(next == &init_mm); /* Should never happen */ 74 75 if (next->context == 0) 76 next->context = alloc_sid(); 77 78 switch_mm(prev,next,current); 79 } 80 #endif 81