1 /* 2 * S390 version 3 * 4 * Derived from "include/asm-i386/mmu_context.h" 5 */ 6 7 #ifndef __S390_MMU_CONTEXT_H 8 #define __S390_MMU_CONTEXT_H 9 10 #include <asm/pgalloc.h> 11 #include <asm/uaccess.h> 12 #include <asm/tlbflush.h> 13 #include <asm/ctl_reg.h> 14 15 static inline int init_new_context(struct task_struct *tsk, 16 struct mm_struct *mm) 17 { 18 cpumask_clear(&mm->context.cpu_attach_mask); 19 atomic_set(&mm->context.attach_count, 0); 20 mm->context.flush_mm = 0; 21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 22 #ifdef CONFIG_64BIT 23 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 24 #endif 25 mm->context.has_pgste = 0; 26 mm->context.asce_limit = STACK_TOP_MAX; 27 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 28 return 0; 29 } 30 31 #define destroy_context(mm) do { } while (0) 32 33 static inline void update_user_asce(struct mm_struct *mm, int load_primary) 34 { 35 pgd_t *pgd = mm->pgd; 36 37 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); 38 if (load_primary) 39 __ctl_load(S390_lowcore.user_asce, 1, 1); 40 set_fs(current->thread.mm_segment); 41 } 42 43 static inline void clear_user_asce(struct mm_struct *mm, int load_primary) 44 { 45 S390_lowcore.user_asce = S390_lowcore.kernel_asce; 46 47 if (load_primary) 48 __ctl_load(S390_lowcore.user_asce, 1, 1); 49 __ctl_load(S390_lowcore.user_asce, 7, 7); 50 } 51 52 static inline void update_primary_asce(struct task_struct *tsk) 53 { 54 unsigned long asce; 55 56 __ctl_store(asce, 1, 1); 57 if (asce != S390_lowcore.kernel_asce) 58 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 59 set_tsk_thread_flag(tsk, TIF_ASCE); 60 } 61 62 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 63 struct task_struct *tsk) 64 { 65 int cpu = smp_processor_id(); 66 67 update_primary_asce(tsk); 68 if (prev == next) 69 return; 70 if (MACHINE_HAS_TLB_LC) 71 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 72 if (atomic_inc_return(&next->context.attach_count) >> 16) { 73 /* Delay update_user_asce until all TLB flushes are done. */ 74 set_tsk_thread_flag(tsk, TIF_TLB_WAIT); 75 /* Clear old ASCE by loading the kernel ASCE. */ 76 clear_user_asce(next, 0); 77 } else { 78 cpumask_set_cpu(cpu, mm_cpumask(next)); 79 update_user_asce(next, 0); 80 if (next->context.flush_mm) 81 /* Flush pending TLBs */ 82 __tlb_flush_mm(next); 83 } 84 atomic_dec(&prev->context.attach_count); 85 WARN_ON(atomic_read(&prev->context.attach_count) < 0); 86 if (MACHINE_HAS_TLB_LC) 87 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 88 } 89 90 #define finish_arch_post_lock_switch finish_arch_post_lock_switch 91 static inline void finish_arch_post_lock_switch(void) 92 { 93 struct task_struct *tsk = current; 94 struct mm_struct *mm = tsk->mm; 95 96 if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT)) 97 return; 98 preempt_disable(); 99 clear_tsk_thread_flag(tsk, TIF_TLB_WAIT); 100 while (atomic_read(&mm->context.attach_count) >> 16) 101 cpu_relax(); 102 103 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 104 update_user_asce(mm, 0); 105 if (mm->context.flush_mm) 106 __tlb_flush_mm(mm); 107 preempt_enable(); 108 } 109 110 #define enter_lazy_tlb(mm,tsk) do { } while (0) 111 #define deactivate_mm(tsk,mm) do { } while (0) 112 113 static inline void activate_mm(struct mm_struct *prev, 114 struct mm_struct *next) 115 { 116 switch_mm(prev, next, current); 117 } 118 119 static inline void arch_dup_mmap(struct mm_struct *oldmm, 120 struct mm_struct *mm) 121 { 122 #ifdef CONFIG_64BIT 123 if (oldmm->context.asce_limit < mm->context.asce_limit) 124 crst_table_downgrade(mm, oldmm->context.asce_limit); 125 #endif 126 } 127 128 static inline void arch_exit_mmap(struct mm_struct *mm) 129 { 130 } 131 132 #endif /* __S390_MMU_CONTEXT_H */ 133