1 /* 2 * S390 version 3 * 4 * Derived from "include/asm-i386/mmu_context.h" 5 */ 6 7 #ifndef __S390_MMU_CONTEXT_H 8 #define __S390_MMU_CONTEXT_H 9 10 #include <asm/pgalloc.h> 11 #include <asm/uaccess.h> 12 #include <asm/tlbflush.h> 13 #include <asm/ctl_reg.h> 14 15 static inline int init_new_context(struct task_struct *tsk, 16 struct mm_struct *mm) 17 { 18 cpumask_clear(&mm->context.cpu_attach_mask); 19 atomic_set(&mm->context.attach_count, 0); 20 mm->context.flush_mm = 0; 21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; 22 #ifdef CONFIG_64BIT 23 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 24 #endif 25 mm->context.has_pgste = 0; 26 mm->context.use_skey = 0; 27 mm->context.asce_limit = STACK_TOP_MAX; 28 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 29 return 0; 30 } 31 32 #define destroy_context(mm) do { } while (0) 33 34 static inline void set_user_asce(struct mm_struct *mm) 35 { 36 S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); 37 if (current->thread.mm_segment.ar4) 38 __ctl_load(S390_lowcore.user_asce, 7, 7); 39 set_cpu_flag(CIF_ASCE); 40 } 41 42 static inline void clear_user_asce(void) 43 { 44 S390_lowcore.user_asce = S390_lowcore.kernel_asce; 45 46 __ctl_load(S390_lowcore.user_asce, 1, 1); 47 __ctl_load(S390_lowcore.user_asce, 7, 7); 48 } 49 50 static inline void load_kernel_asce(void) 51 { 52 unsigned long asce; 53 54 __ctl_store(asce, 1, 1); 55 if (asce != S390_lowcore.kernel_asce) 56 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 57 set_cpu_flag(CIF_ASCE); 58 } 59 60 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 61 struct task_struct *tsk) 62 { 63 int cpu = smp_processor_id(); 64 65 if (prev == next) 66 return; 67 if (MACHINE_HAS_TLB_LC) 68 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); 69 /* Clear old ASCE by loading the kernel ASCE. */ 70 __ctl_load(S390_lowcore.kernel_asce, 1, 1); 71 __ctl_load(S390_lowcore.kernel_asce, 7, 7); 72 atomic_inc(&next->context.attach_count); 73 atomic_dec(&prev->context.attach_count); 74 if (MACHINE_HAS_TLB_LC) 75 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); 76 S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); 77 } 78 79 #define finish_arch_post_lock_switch finish_arch_post_lock_switch 80 static inline void finish_arch_post_lock_switch(void) 81 { 82 struct task_struct *tsk = current; 83 struct mm_struct *mm = tsk->mm; 84 85 load_kernel_asce(); 86 if (mm) { 87 preempt_disable(); 88 while (atomic_read(&mm->context.attach_count) >> 16) 89 cpu_relax(); 90 91 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 92 if (mm->context.flush_mm) 93 __tlb_flush_mm(mm); 94 preempt_enable(); 95 } 96 set_fs(current->thread.mm_segment); 97 } 98 99 #define enter_lazy_tlb(mm,tsk) do { } while (0) 100 #define deactivate_mm(tsk,mm) do { } while (0) 101 102 static inline void activate_mm(struct mm_struct *prev, 103 struct mm_struct *next) 104 { 105 switch_mm(prev, next, current); 106 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 107 set_user_asce(next); 108 } 109 110 static inline void arch_dup_mmap(struct mm_struct *oldmm, 111 struct mm_struct *mm) 112 { 113 #ifdef CONFIG_64BIT 114 if (oldmm->context.asce_limit < mm->context.asce_limit) 115 crst_table_downgrade(mm, oldmm->context.asce_limit); 116 #endif 117 } 118 119 static inline void arch_exit_mmap(struct mm_struct *mm) 120 { 121 } 122 123 static inline void arch_unmap(struct mm_struct *mm, 124 struct vm_area_struct *vma, 125 unsigned long start, unsigned long end) 126 { 127 } 128 129 static inline void arch_bprm_mm_init(struct mm_struct *mm, 130 struct vm_area_struct *vma) 131 { 132 } 133 134 #endif /* __S390_MMU_CONTEXT_H */ 135