1 /* 2 * linux/arch/arm/mm/context.c 3 * 4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/mm.h> 16 #include <linux/smp.h> 17 #include <linux/percpu.h> 18 19 #include <asm/mmu_context.h> 20 #include <asm/smp_plat.h> 21 #include <asm/thread_notify.h> 22 #include <asm/tlbflush.h> 23 24 /* 25 * On ARMv6, we have the following structure in the Context ID: 26 * 27 * 31 7 0 28 * +-------------------------+-----------+ 29 * | process ID | ASID | 30 * +-------------------------+-----------+ 31 * | context ID | 32 * +-------------------------------------+ 33 * 34 * The ASID is used to tag entries in the CPU caches and TLBs. 35 * The context ID is used by debuggers and trace logic, and 36 * should be unique within all running processes. 37 * 38 * In big endian operation, the two 32 bit words are swapped if accesed by 39 * non 64-bit operations. 40 */ 41 #define ASID_FIRST_VERSION (1ULL << ASID_BITS) 42 #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) 43 44 #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) 45 #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) 46 47 static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 48 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 49 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 50 51 static DEFINE_PER_CPU(atomic64_t, active_asids); 52 static DEFINE_PER_CPU(u64, reserved_asids); 53 static cpumask_t tlb_flush_pending; 54 55 #ifdef CONFIG_ARM_LPAE 56 static void cpu_set_reserved_ttbr0(void) 57 { 58 unsigned long ttbl = __pa(swapper_pg_dir); 59 unsigned long ttbh = 0; 60 61 /* 62 * Set TTBR0 to swapper_pg_dir which contains only global entries. The 63 * ASID is set to 0. 64 */ 65 asm volatile( 66 " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" 67 : 68 : "r" (ttbl), "r" (ttbh)); 69 isb(); 70 } 71 #else 72 static void cpu_set_reserved_ttbr0(void) 73 { 74 u32 ttb; 75 /* Copy TTBR1 into TTBR0 */ 76 asm volatile( 77 " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" 78 " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" 79 : "=r" (ttb)); 80 isb(); 81 } 82 #endif 83 84 #ifdef CONFIG_PID_IN_CONTEXTIDR 85 static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, 86 void *t) 87 { 88 u32 contextidr; 89 pid_t pid; 90 struct thread_info *thread = t; 91 92 if (cmd != THREAD_NOTIFY_SWITCH) 93 return NOTIFY_DONE; 94 95 pid = task_pid_nr(thread->task) << ASID_BITS; 96 asm volatile( 97 " mrc p15, 0, %0, c13, c0, 1\n" 98 " and %0, %0, %2\n" 99 " orr %0, %0, %1\n" 100 " mcr p15, 0, %0, c13, c0, 1\n" 101 : "=r" (contextidr), "+r" (pid) 102 : "I" (~ASID_MASK)); 103 isb(); 104 105 return NOTIFY_OK; 106 } 107 108 static struct notifier_block contextidr_notifier_block = { 109 .notifier_call = contextidr_notifier, 110 }; 111 112 static int __init contextidr_notifier_init(void) 113 { 114 return thread_register_notifier(&contextidr_notifier_block); 115 } 116 arch_initcall(contextidr_notifier_init); 117 #endif 118 119 static void flush_context(unsigned int cpu) 120 { 121 int i; 122 u64 asid; 123 124 /* Update the list of reserved ASIDs and the ASID bitmap. */ 125 bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 126 for_each_possible_cpu(i) { 127 if (i == cpu) { 128 asid = 0; 129 } else { 130 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 131 __set_bit(ASID_TO_IDX(asid), asid_map); 132 } 133 per_cpu(reserved_asids, i) = asid; 134 } 135 136 /* Queue a TLB invalidate and flush the I-cache if necessary. */ 137 if (!tlb_ops_need_broadcast()) 138 cpumask_set_cpu(cpu, &tlb_flush_pending); 139 else 140 cpumask_setall(&tlb_flush_pending); 141 142 if (icache_is_vivt_asid_tagged()) 143 __flush_icache_all(); 144 } 145 146 static int is_reserved_asid(u64 asid) 147 { 148 int cpu; 149 for_each_possible_cpu(cpu) 150 if (per_cpu(reserved_asids, cpu) == asid) 151 return 1; 152 return 0; 153 } 154 155 static void new_context(struct mm_struct *mm, unsigned int cpu) 156 { 157 u64 asid = mm->context.id; 158 u64 generation = atomic64_read(&asid_generation); 159 160 if (asid != 0 && is_reserved_asid(asid)) { 161 /* 162 * Our current ASID was active during a rollover, we can 163 * continue to use it and this was just a false alarm. 164 */ 165 asid = generation | (asid & ~ASID_MASK); 166 } else { 167 /* 168 * Allocate a free ASID. If we can't find one, take a 169 * note of the currently active ASIDs and mark the TLBs 170 * as requiring flushes. 171 */ 172 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); 173 if (asid == NUM_USER_ASIDS) { 174 generation = atomic64_add_return(ASID_FIRST_VERSION, 175 &asid_generation); 176 flush_context(cpu); 177 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); 178 } 179 __set_bit(asid, asid_map); 180 asid = generation | IDX_TO_ASID(asid); 181 cpumask_clear(mm_cpumask(mm)); 182 } 183 184 mm->context.id = asid; 185 } 186 187 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 188 { 189 unsigned long flags; 190 unsigned int cpu = smp_processor_id(); 191 192 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) 193 __check_vmalloc_seq(mm); 194 195 /* 196 * Required during context switch to avoid speculative page table 197 * walking with the wrong TTBR. 198 */ 199 cpu_set_reserved_ttbr0(); 200 201 if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 202 && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) 203 goto switch_mm_fastpath; 204 205 raw_spin_lock_irqsave(&cpu_asid_lock, flags); 206 /* Check that our ASID belongs to the current generation. */ 207 if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) 208 new_context(mm, cpu); 209 210 atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); 211 cpumask_set_cpu(cpu, mm_cpumask(mm)); 212 213 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) 214 local_flush_tlb_all(); 215 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 216 217 switch_mm_fastpath: 218 cpu_switch_mm(mm->pgd, mm); 219 } 220