1 /* 2 * linux/arch/arm/mm/context.c 3 * 4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/smp.h> 17 #include <linux/percpu.h> 18 19 #include <asm/mmu_context.h> 20 #include <asm/smp_plat.h> 21 #include <asm/thread_notify.h> 22 #include <asm/tlbflush.h> 23 24 /* 25 * On ARMv6, we have the following structure in the Context ID: 26 * 27 * 31 7 0 28 * +-------------------------+-----------+ 29 * | process ID | ASID | 30 * +-------------------------+-----------+ 31 * | context ID | 32 * +-------------------------------------+ 33 * 34 * The ASID is used to tag entries in the CPU caches and TLBs. 35 * The context ID is used by debuggers and trace logic, and 36 * should be unique within all running processes. 37 */ 38 #define ASID_FIRST_VERSION (1ULL << ASID_BITS) 39 #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) 40 41 #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) 42 #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) 43 44 static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 45 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 46 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 47 48 static DEFINE_PER_CPU(atomic64_t, active_asids); 49 static DEFINE_PER_CPU(u64, reserved_asids); 50 static cpumask_t tlb_flush_pending; 51 52 #ifdef CONFIG_ARM_LPAE 53 static void cpu_set_reserved_ttbr0(void) 54 { 55 unsigned long ttbl = __pa(swapper_pg_dir); 56 unsigned long ttbh = 0; 57 58 /* 59 * Set TTBR0 to swapper_pg_dir which contains only global entries. The 60 * ASID is set to 0. 61 */ 62 asm volatile( 63 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" 64 : 65 : "r" (ttbl), "r" (ttbh)); 66 isb(); 67 } 68 #else 69 static void cpu_set_reserved_ttbr0(void) 70 { 71 u32 ttb; 72 /* Copy TTBR1 into TTBR0 */ 73 asm volatile( 74 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" 75 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" 76 : "=r" (ttb)); 77 isb(); 78 } 79 #endif 80 81 #ifdef CONFIG_PID_IN_CONTEXTIDR 82 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, 83 void *t) 84 { 85 u32 contextidr; 86 pid_t pid; 87 struct thread_info *thread = t; 88 89 if (cmd != THREAD_NOTIFY_SWITCH) 90 return NOTIFY_DONE; 91 92 pid = task_pid_nr(thread->task) << ASID_BITS; 93 asm volatile( 94 " mrc p15, 0, %0, c13, c0, 1\n" 95 " and %0, %0, %2\n" 96 " orr %0, %0, %1\n" 97 " mcr p15, 0, %0, c13, c0, 1\n" 98 : "=r" (contextidr), "+r" (pid) 99 : "I" (~ASID_MASK)); 100 isb(); 101 102 return NOTIFY_OK; 103 } 104 105 static struct notifier_block contextidr_notifier_block = { 106 .notifier_call = contextidr_notifier, 107 }; 108 109 static int __init contextidr_notifier_init(void) 110 { 111 return thread_register_notifier(&contextidr_notifier_block); 112 } 113 arch_initcall(contextidr_notifier_init); 114 #endif 115 116 static void flush_context(unsigned int cpu) 117 { 118 int i; 119 u64 asid; 120 121 /* Update the list of reserved ASIDs and the ASID bitmap. */ 122 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 123 for_each_possible_cpu(i) { 124 if (i == cpu) { 125 asid = 0; 126 } else { 127 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 128 __set_bit(ASID_TO_IDX(asid), asid_map); 129 } 130 per_cpu(reserved_asids, i) = asid; 131 } 132 133 /* Queue a TLB invalidate and flush the I-cache if necessary. */ 134 if (!tlb_ops_need_broadcast()) 135 cpumask_set_cpu(cpu, &tlb_flush_pending); 136 else 137 cpumask_setall(&tlb_flush_pending); 138 139 if (icache_is_vivt_asid_tagged()) 140 __flush_icache_all(); 141 } 142 143 static int is_reserved_asid(u64 asid) 144 { 145 int cpu; 146 for_each_possible_cpu(cpu) 147 if (per_cpu(reserved_asids, cpu) == asid) 148 return 1; 149 return 0; 150 } 151 152 static void new_context(struct mm_struct *mm, unsigned int cpu) 153 { 154 u64 asid = mm->context.id; 155 u64 generation = atomic64_read(&asid_generation); 156 157 if (asid != 0 && is_reserved_asid(asid)) { 158 /* 159 * Our current ASID was active during a rollover, we can 160 * continue to use it and this was just a false alarm. 161 */ 162 asid = generation | (asid & ~ASID_MASK); 163 } else { 164 /* 165 * Allocate a free ASID. If we can't find one, take a 166 * note of the currently active ASIDs and mark the TLBs 167 * as requiring flushes. 168 */ 169 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); 170 if (asid == NUM_USER_ASIDS) { 171 generation = atomic64_add_return(ASID_FIRST_VERSION, 172 &asid_generation); 173 flush_context(cpu); 174 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); 175 } 176 __set_bit(asid, asid_map); 177 asid = generation | IDX_TO_ASID(asid); 178 cpumask_clear(mm_cpumask(mm)); 179 } 180 181 mm->context.id = asid; 182 } 183 184 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 185 { 186 unsigned long flags; 187 unsigned int cpu = smp_processor_id(); 188 189 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 190 __check_vmalloc_seq(mm); 191 192 /* 193 * Required during context switch to avoid speculative page table 194 * walking with the wrong TTBR. 195 */ 196 cpu_set_reserved_ttbr0(); 197 198 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 199 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) 200 goto switch_mm_fastpath; 201 202 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 203 /* Check that our ASID belongs to the current generation. */ 204 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 205 new_context(mm, cpu); 206 207 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); 208 cpumask_set_cpu(cpu, mm_cpumask(mm)); 209 210 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 211 local_flush_tlb_all(); 212 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 213 214 switch_mm_fastpath: 215 cpu_switch_mm(mm->pgd, mm); 216 } 217