1f1f3347dSVineet Gupta /* 2f1f3347dSVineet Gupta * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3f1f3347dSVineet Gupta * 4f1f3347dSVineet Gupta * This program is free software; you can redistribute it and/or modify 5f1f3347dSVineet Gupta * it under the terms of the GNU General Public License version 2 as 6f1f3347dSVineet Gupta * published by the Free Software Foundation. 7f1f3347dSVineet Gupta * 8f1f3347dSVineet Gupta * vineetg: May 2011 9f1f3347dSVineet Gupta * -Refactored get_new_mmu_context( ) to only handle live-mm. 10f1f3347dSVineet Gupta * retiring-mm handled in other hooks 11f1f3347dSVineet Gupta * 12f1f3347dSVineet Gupta * Vineetg: March 25th, 2008: Bug #92690 13f1f3347dSVineet Gupta * -Major rewrite of Core ASID allocation routine get_new_mmu_context 14f1f3347dSVineet Gupta * 15f1f3347dSVineet Gupta * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 16f1f3347dSVineet Gupta */ 17f1f3347dSVineet Gupta 18f1f3347dSVineet Gupta #ifndef _ASM_ARC_MMU_CONTEXT_H 19f1f3347dSVineet Gupta #define _ASM_ARC_MMU_CONTEXT_H 20f1f3347dSVineet Gupta 21f1f3347dSVineet Gupta #include <asm/arcregs.h> 22f1f3347dSVineet Gupta #include <asm/tlb.h> 23f1f3347dSVineet Gupta 24f1f3347dSVineet Gupta #include <asm-generic/mm_hooks.h> 25f1f3347dSVineet Gupta 26f1f3347dSVineet Gupta /* ARC700 ASID Management 27f1f3347dSVineet Gupta * 28f1f3347dSVineet Gupta * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries 29f1f3347dSVineet Gupta * with same vaddr (different tasks) to co-exit. This provides for 30f1f3347dSVineet Gupta * "Fast Context Switch" i.e. no TLB flush on ctxt-switch 31f1f3347dSVineet Gupta * 32f1f3347dSVineet Gupta * Linux assigns each task a unique ASID. A simple round-robin allocation 33f1f3347dSVineet Gupta * of H/w ASID is done using software tracker @asid_cache. 34f1f3347dSVineet Gupta * When it reaches max 255, the allocation cycle starts afresh by flushing 35f1f3347dSVineet Gupta * the entire TLB and wrapping ASID back to zero. 36f1f3347dSVineet Gupta * 37f1f3347dSVineet Gupta * For book-keeping, Linux uses a couple of data-structures: 38f1f3347dSVineet Gupta * -mm_struct has an @asid field to keep a note of task's ASID (needed at the 39f1f3347dSVineet Gupta * time of say switch_mm( ) 40f1f3347dSVineet Gupta * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping, 41f1f3347dSVineet Gupta * given an ASID, finding the mm struct associated. 42f1f3347dSVineet Gupta * 43f1f3347dSVineet Gupta * The round-robin allocation algorithm allows for ASID stealing. 44f1f3347dSVineet Gupta * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was 45f1f3347dSVineet Gupta * already assigned to another (switched-out) task. Obviously the prev owner 46f1f3347dSVineet Gupta * is marked with an invalid ASID to make it request for a new ASID when it 47f1f3347dSVineet Gupta * gets scheduled next time. However its TLB entries (with ASID "x") could 48f1f3347dSVineet Gupta * exist, which must be cleared before the same ASID is used by the new owner. 49f1f3347dSVineet Gupta * Flushing them would be plausible but costly solution. Instead we force a 50f1f3347dSVineet Gupta * allocation policy quirk, which ensures that a stolen ASID won't have any 51f1f3347dSVineet Gupta * TLB entries associates, alleviating the need to flush. 52f1f3347dSVineet Gupta * The quirk essentially is not allowing ASID allocated in prev cycle 53f1f3347dSVineet Gupta * to be used past a roll-over in the next cycle. 54f1f3347dSVineet Gupta * When this happens (i.e. task ASID > asid tracker), task needs to refresh 55f1f3347dSVineet Gupta * its ASID, aligning it to current value of tracker. If the task doesn't get 56f1f3347dSVineet Gupta * scheduled past a roll-over, hence its ASID is not yet realigned with 57f1f3347dSVineet Gupta * tracker, such ASID is anyways safely reusable because it is 58f1f3347dSVineet Gupta * gauranteed that TLB entries with that ASID wont exist. 59f1f3347dSVineet Gupta */ 60f1f3347dSVineet Gupta 61f1f3347dSVineet Gupta #define FIRST_ASID 0 62f1f3347dSVineet Gupta #define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */ 63f1f3347dSVineet Gupta #define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */ 64f1f3347dSVineet Gupta #define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1) 65f1f3347dSVineet Gupta 66f1f3347dSVineet Gupta /* ASID to mm struct mapping */ 67f1f3347dSVineet Gupta extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; 68f1f3347dSVineet Gupta 69f1f3347dSVineet Gupta extern int asid_cache; 70f1f3347dSVineet Gupta 71f1f3347dSVineet Gupta /* 723daa48d1SVineet Gupta * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) 733daa48d1SVineet Gupta * Also set the MMU PID register to existing/updated ASID 74f1f3347dSVineet Gupta */ 75f1f3347dSVineet Gupta static inline void get_new_mmu_context(struct mm_struct *mm) 76f1f3347dSVineet Gupta { 77f1f3347dSVineet Gupta struct mm_struct *prev_owner; 78f1f3347dSVineet Gupta unsigned long flags; 79f1f3347dSVineet Gupta 80f1f3347dSVineet Gupta local_irq_save(flags); 81f1f3347dSVineet Gupta 82f1f3347dSVineet Gupta /* 833daa48d1SVineet Gupta * Move to new ASID if it was not from current alloc-cycle/generation. 843daa48d1SVineet Gupta * 853daa48d1SVineet Gupta * Note: Callers needing new ASID unconditionally, independent of 863daa48d1SVineet Gupta * generation, e.g. local_flush_tlb_mm() for forking parent, 873daa48d1SVineet Gupta * first need to destroy the context, setting it to invalid 883daa48d1SVineet Gupta * value. 893daa48d1SVineet Gupta */ 903daa48d1SVineet Gupta if (mm->context.asid <= asid_cache) 913daa48d1SVineet Gupta goto set_hw; 923daa48d1SVineet Gupta 933daa48d1SVineet Gupta /* 94f1f3347dSVineet Gupta * Relinquish the currently owned ASID (if any). 95f1f3347dSVineet Gupta * Doing unconditionally saves a cmp-n-branch; for already unused 96f1f3347dSVineet Gupta * ASID slot, the value was/remains NULL 97f1f3347dSVineet Gupta */ 98f1f3347dSVineet Gupta asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL; 99f1f3347dSVineet Gupta 100f1f3347dSVineet Gupta /* move to new ASID */ 101f1f3347dSVineet Gupta if (++asid_cache > MAX_ASID) { /* ASID roll-over */ 102f1f3347dSVineet Gupta asid_cache = FIRST_ASID; 103f1f3347dSVineet Gupta flush_tlb_all(); 104f1f3347dSVineet Gupta } 105f1f3347dSVineet Gupta 106f1f3347dSVineet Gupta /* 107f1f3347dSVineet Gupta * Is next ASID already owned by some-one else (we are stealing it). 108f1f3347dSVineet Gupta * If so, let the orig owner be aware of this, so when it runs, it 109f1f3347dSVineet Gupta * asks for a brand new ASID. This would only happen for a long-lived 110f1f3347dSVineet Gupta * task with ASID from prev allocation cycle (before ASID roll-over). 111f1f3347dSVineet Gupta * 112f1f3347dSVineet Gupta * This might look wrong - if we are re-using some other task's ASID, 1133daa48d1SVineet Gupta * won't we use it's stale TLB entries too. Actually the algorithm takes 114f1f3347dSVineet Gupta * care of such a case: it ensures that task with ASID from prev alloc 1153daa48d1SVineet Gupta * cycle, when scheduled will refresh it's ASID 116f1f3347dSVineet Gupta * The stealing scenario described here will only happen if that task 117f1f3347dSVineet Gupta * didn't get a chance to refresh it's ASID - implying stale entries 118f1f3347dSVineet Gupta * won't exist. 119f1f3347dSVineet Gupta */ 120f1f3347dSVineet Gupta prev_owner = asid_mm_map[asid_cache]; 121f1f3347dSVineet Gupta if (prev_owner) 122f1f3347dSVineet Gupta prev_owner->context.asid = NO_ASID; 123f1f3347dSVineet Gupta 124f1f3347dSVineet Gupta /* Assign new ASID to tsk */ 125f1f3347dSVineet Gupta asid_mm_map[asid_cache] = mm; 126f1f3347dSVineet Gupta mm->context.asid = asid_cache; 127f1f3347dSVineet Gupta 1283daa48d1SVineet Gupta set_hw: 1293daa48d1SVineet Gupta write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE); 130f1f3347dSVineet Gupta 131f1f3347dSVineet Gupta local_irq_restore(flags); 132f1f3347dSVineet Gupta } 133f1f3347dSVineet Gupta 134f1f3347dSVineet Gupta /* 135f1f3347dSVineet Gupta * Initialize the context related info for a new mm_struct 136f1f3347dSVineet Gupta * instance. 137f1f3347dSVineet Gupta */ 138f1f3347dSVineet Gupta static inline int 139f1f3347dSVineet Gupta init_new_context(struct task_struct *tsk, struct mm_struct *mm) 140f1f3347dSVineet Gupta { 141f1f3347dSVineet Gupta mm->context.asid = NO_ASID; 142f1f3347dSVineet Gupta return 0; 143f1f3347dSVineet Gupta } 144f1f3347dSVineet Gupta 145f1f3347dSVineet Gupta /* Prepare the MMU for task: setup PID reg with allocated ASID 146f1f3347dSVineet Gupta If task doesn't have an ASID (never alloc or stolen, get a new ASID) 147f1f3347dSVineet Gupta */ 148f1f3347dSVineet Gupta static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 149f1f3347dSVineet Gupta struct task_struct *tsk) 150f1f3347dSVineet Gupta { 15141195d23SVineet Gupta #ifndef CONFIG_SMP 152f1f3347dSVineet Gupta /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ 153f1f3347dSVineet Gupta write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); 15441195d23SVineet Gupta #endif 155f1f3347dSVineet Gupta 156f1f3347dSVineet Gupta get_new_mmu_context(next); 157f1f3347dSVineet Gupta } 158f1f3347dSVineet Gupta 159*c6011553SVineet Gupta /* 160*c6011553SVineet Gupta * Called at the time of execve() to get a new ASID 161*c6011553SVineet Gupta * Note the subtlety here: get_new_mmu_context() behaves differently here 162*c6011553SVineet Gupta * vs. in switch_mm(). Here it always returns a new ASID, because mm has 163*c6011553SVineet Gupta * an unallocated "initial" value, while in latter, it moves to a new ASID, 164*c6011553SVineet Gupta * only if it was unallocated 165*c6011553SVineet Gupta */ 166*c6011553SVineet Gupta #define activate_mm(prev, next) switch_mm(prev, next, NULL) 167*c6011553SVineet Gupta 168f1f3347dSVineet Gupta static inline void destroy_context(struct mm_struct *mm) 169f1f3347dSVineet Gupta { 170f1f3347dSVineet Gupta unsigned long flags; 171f1f3347dSVineet Gupta 172f1f3347dSVineet Gupta local_irq_save(flags); 173f1f3347dSVineet Gupta 174f1f3347dSVineet Gupta asid_mm_map[mm->context.asid] = NULL; 175f1f3347dSVineet Gupta mm->context.asid = NO_ASID; 176f1f3347dSVineet Gupta 177f1f3347dSVineet Gupta local_irq_restore(flags); 178f1f3347dSVineet Gupta } 179f1f3347dSVineet Gupta 180f1f3347dSVineet Gupta /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping 181f1f3347dSVineet Gupta * for retiring-mm. However destroy_context( ) still needs to do that because 182f1f3347dSVineet Gupta * between mm_release( ) = >deactive_mm( ) and 183f1f3347dSVineet Gupta * mmput => .. => __mmdrop( ) => destroy_context( ) 184f1f3347dSVineet Gupta * there is a good chance that task gets sched-out/in, making it's ASID valid 185f1f3347dSVineet Gupta * again (this teased me for a whole day). 186f1f3347dSVineet Gupta */ 187f1f3347dSVineet Gupta #define deactivate_mm(tsk, mm) do { } while (0) 188f1f3347dSVineet Gupta 189f1f3347dSVineet Gupta #define enter_lazy_tlb(mm, tsk) 190f1f3347dSVineet Gupta 191f1f3347dSVineet Gupta #endif /* __ASM_ARC_MMU_CONTEXT_H */ 192