1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * vineetg: May 2011 9 * -Refactored get_new_mmu_context( ) to only handle live-mm. 10 * retiring-mm handled in other hooks 11 * 12 * Vineetg: March 25th, 2008: Bug #92690 13 * -Major rewrite of Core ASID allocation routine get_new_mmu_context 14 * 15 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 16 */ 17 18 #ifndef _ASM_ARC_MMU_CONTEXT_H 19 #define _ASM_ARC_MMU_CONTEXT_H 20 21 #include <asm/arcregs.h> 22 #include <asm/tlb.h> 23 24 #include <asm-generic/mm_hooks.h> 25 26 /* ARC700 ASID Management 27 * 28 * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries 29 * with same vaddr (different tasks) to co-exit. This provides for 30 * "Fast Context Switch" i.e. no TLB flush on ctxt-switch 31 * 32 * Linux assigns each task a unique ASID. A simple round-robin allocation 33 * of H/w ASID is done using software tracker @asid_cache. 34 * When it reaches max 255, the allocation cycle starts afresh by flushing 35 * the entire TLB and wrapping ASID back to zero. 36 * 37 * For book-keeping, Linux uses a couple of data-structures: 38 * -mm_struct has an @asid field to keep a note of task's ASID (needed at the 39 * time of say switch_mm( ) 40 * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, 41 * given an ASID, finding the mm struct associated. 42 * 43 * The round-robin allocation algorithm allows for ASID stealing. 44 * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was 45 * already assigned to another (switched-out) task. Obviously the prev owner 46 * is marked with an invalid ASID to make it request for a new ASID when it 47 * gets scheduled next time. However its TLB entries (with ASID "x") could 48 * exist, which must be cleared before the same ASID is used by the new owner. 49 * Flushing them would be plausible but costly solution. Instead we force a 50 * allocation policy quirk, which ensures that a stolen ASID won't have any 51 * TLB entries associates, alleviating the need to flush. 52 * The quirk essentially is not allowing ASID allocated in prev cycle 53 * to be used past a roll-over in the next cycle. 54 * When this happens (i.e. task ASID > asid tracker), task needs to refresh 55 * its ASID, aligning it to current value of tracker. If the task doesn't get 56 * scheduled past a roll-over, hence its ASID is not yet realigned with 57 * tracker, such ASID is anyways safely reusable because it is 58 * gauranteed that TLB entries with that ASID wont exist. 59 */ 60 61 #define FIRST_ASID 0 62 #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ 63 #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ 64 #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) 65 66 /* ASID to mm struct mapping */ 67 extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; 68 69 extern int asid_cache; 70 71 /* 72 * Assign a new ASID to task. If the task already has an ASID, it is 73 * relinquished. 74 */ 75 static inline void get_new_mmu_context(struct mm_struct *mm) 76 { 77 struct mm_struct *prev_owner; 78 unsigned long flags; 79 80 local_irq_save(flags); 81 82 /* 83 * Relinquish the currently owned ASID (if any). 84 * Doing unconditionally saves a cmp-n-branch; for already unused 85 * ASID slot, the value was/remains NULL 86 */ 87 asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL; 88 89 /* move to new ASID */ 90 if (++asid_cache > MAX_ASID) { /* ASID roll-over */ 91 asid_cache = FIRST_ASID; 92 flush_tlb_all(); 93 } 94 95 /* 96 * Is next ASID already owned by some-one else (we are stealing it). 97 * If so, let the orig owner be aware of this, so when it runs, it 98 * asks for a brand new ASID. This would only happen for a long-lived 99 * task with ASID from prev allocation cycle (before ASID roll-over). 100 * 101 * This might look wrong - if we are re-using some other task's ASID, 102 * won't we use it's stale TLB entries too. Actually switch_mm( ) takes 103 * care of such a case: it ensures that task with ASID from prev alloc 104 * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below 105 * The stealing scenario described here will only happen if that task 106 * didn't get a chance to refresh it's ASID - implying stale entries 107 * won't exist. 108 */ 109 prev_owner = asid_mm_map[asid_cache]; 110 if (prev_owner) 111 prev_owner->context.asid = NO_ASID; 112 113 /* Assign new ASID to tsk */ 114 asid_mm_map[asid_cache] = mm; 115 mm->context.asid = asid_cache; 116 117 #ifdef CONFIG_ARC_TLB_DBG 118 pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s," 119 " pid:%u, assigned asid:%lu\n", 120 (unsigned int)mm, (unsigned int)prev_owner, 121 (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm, 122 (mm->context.tsk)->pid, mm->context.asid); 123 #endif 124 125 write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE); 126 127 local_irq_restore(flags); 128 } 129 130 /* 131 * Initialize the context related info for a new mm_struct 132 * instance. 133 */ 134 static inline int 135 init_new_context(struct task_struct *tsk, struct mm_struct *mm) 136 { 137 mm->context.asid = NO_ASID; 138 #ifdef CONFIG_ARC_TLB_DBG 139 mm->context.tsk = tsk; 140 #endif 141 return 0; 142 } 143 144 /* Prepare the MMU for task: setup PID reg with allocated ASID 145 If task doesn't have an ASID (never alloc or stolen, get a new ASID) 146 */ 147 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 148 struct task_struct *tsk) 149 { 150 #ifndef CONFIG_SMP 151 /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ 152 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 153 #endif 154 155 /* 156 * Get a new ASID if task doesn't have a valid one. Possible when 157 * -task never had an ASID (fresh after fork) 158 * -it's ASID was stolen - past an ASID roll-over. 159 * -There's a third obscure scenario (if this task is running for the 160 * first time afer an ASID rollover), where despite having a valid 161 * ASID, we force a get for new ASID - see comments at top. 162 * 163 * Both the non-alloc scenario and first-use-after-rollover can be 164 * detected using the single condition below: NO_ASID = 256 165 * while asid_cache is always a valid ASID value (0-255). 166 */ 167 if (next->context.asid > asid_cache) { 168 get_new_mmu_context(next); 169 } else { 170 /* 171 * XXX: This will never happen given the chks above 172 * BUG_ON(next->context.asid > MAX_ASID); 173 */ 174 write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE); 175 } 176 177 } 178 179 static inline void destroy_context(struct mm_struct *mm) 180 { 181 unsigned long flags; 182 183 local_irq_save(flags); 184 185 asid_mm_map[mm->context.asid] = NULL; 186 mm->context.asid = NO_ASID; 187 188 local_irq_restore(flags); 189 } 190 191 /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping 192 * for retiring-mm. However destroy_context( ) still needs to do that because 193 * between mm_release( ) = >deactive_mm( ) and 194 * mmput => .. => __mmdrop( ) => destroy_context( ) 195 * there is a good chance that task gets sched-out/in, making it's ASID valid 196 * again (this teased me for a whole day). 197 */ 198 #define deactivate_mm(tsk, mm) do { } while (0) 199 200 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) 201 { 202 #ifndef CONFIG_SMP 203 write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 204 #endif 205 206 /* Unconditionally get a new ASID */ 207 get_new_mmu_context(next); 208 209 } 210 211 #define enter_lazy_tlb(mm, tsk) 212 213 #endif /* __ASM_ARC_MMU_CONTEXT_H */ 214