1 /* SPDX-License-Identifier: GPL-2.0 */ 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #ifndef __ASM_CSKY_MMU_CONTEXT_H 5 #define __ASM_CSKY_MMU_CONTEXT_H 6 7 #include <asm-generic/mm_hooks.h> 8 #include <asm/setup.h> 9 #include <asm/page.h> 10 #include <asm/cacheflush.h> 11 #include <asm/tlbflush.h> 12 13 #include <linux/errno.h> 14 #include <linux/sched.h> 15 #include <abi/ckmmu.h> 16 17 static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel) 18 { 19 pgd -= PAGE_OFFSET; 20 pgd += PHYS_OFFSET; 21 pgd |= 1; 22 setup_pgd(pgd, kernel); 23 } 24 25 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ 26 tlbmiss_handler_setup_pgd((unsigned long)pgd, 0) 27 #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ 28 tlbmiss_handler_setup_pgd((unsigned long)pgd, 1) 29 30 static inline unsigned long tlb_get_pgd(void) 31 { 32 return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET; 33 } 34 35 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 36 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 37 #define asid_cache(cpu) (cpu_data[cpu].asid_cache) 38 39 #define ASID_FIRST_VERSION (1 << CONFIG_CPU_ASID_BITS) 40 #define ASID_INC 0x1 41 #define ASID_MASK (ASID_FIRST_VERSION - 1) 42 #define ASID_VERSION_MASK ~ASID_MASK 43 44 #define destroy_context(mm) do {} while (0) 45 #define enter_lazy_tlb(mm, tsk) do {} while (0) 46 #define deactivate_mm(tsk, mm) do {} while (0) 47 48 /* 49 * All unused by hardware upper bits will be considered 50 * as a software asid extension. 51 */ 52 static inline void 53 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 54 { 55 unsigned long asid = asid_cache(cpu); 56 57 asid += ASID_INC; 58 if (!(asid & ASID_MASK)) { 59 flush_tlb_all(); /* start new asid cycle */ 60 if (!asid) /* fix version if needed */ 61 asid = ASID_FIRST_VERSION; 62 } 63 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 64 } 65 66 /* 67 * Initialize the context related info for a new mm_struct 68 * instance. 69 */ 70 static inline int 71 init_new_context(struct task_struct *tsk, struct mm_struct *mm) 72 { 73 int i; 74 75 for_each_online_cpu(i) 76 cpu_context(i, mm) = 0; 77 return 0; 78 } 79 80 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 81 struct task_struct *tsk) 82 { 83 unsigned int cpu = smp_processor_id(); 84 unsigned long flags; 85 86 local_irq_save(flags); 87 /* Check if our ASID is of an older version and thus invalid */ 88 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 89 get_new_mmu_context(next, cpu); 90 write_mmu_entryhi(cpu_asid(cpu, next)); 91 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 92 93 /* 94 * Mark current->active_mm as not "active" anymore. 95 * We don't want to mislead possible IPI tlb flush routines. 96 */ 97 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 98 cpumask_set_cpu(cpu, mm_cpumask(next)); 99 100 local_irq_restore(flags); 101 } 102 103 /* 104 * After we have set current->mm to a new value, this activates 105 * the context for the new mm so we see the new mappings. 106 */ 107 static inline void 108 activate_mm(struct mm_struct *prev, struct mm_struct *next) 109 { 110 unsigned long flags; 111 int cpu = smp_processor_id(); 112 113 local_irq_save(flags); 114 115 /* Unconditionally get a new ASID. */ 116 get_new_mmu_context(next, cpu); 117 118 write_mmu_entryhi(cpu_asid(cpu, next)); 119 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 120 121 /* mark mmu ownership change */ 122 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 123 cpumask_set_cpu(cpu, mm_cpumask(next)); 124 125 local_irq_restore(flags); 126 } 127 128 /* 129 * If mm is currently active_mm, we can't really drop it. Instead, 130 * we will get a new one for it. 131 */ 132 static inline void 133 drop_mmu_context(struct mm_struct *mm, unsigned int cpu) 134 { 135 unsigned long flags; 136 137 local_irq_save(flags); 138 139 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 140 get_new_mmu_context(mm, cpu); 141 write_mmu_entryhi(cpu_asid(cpu, mm)); 142 } else { 143 /* will get a new context next time */ 144 cpu_context(cpu, mm) = 0; 145 } 146 147 local_irq_restore(flags); 148 } 149 150 #endif /* __ASM_CSKY_MMU_CONTEXT_H */ 151