1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Context tracking: Probe on high level context boundaries such as kernel 4 * and userspace. This includes syscalls and exceptions entry/exit. 5 * 6 * This is used by RCU to remove its dependency on the timer tick while a CPU 7 * runs in userspace. 8 * 9 * Started by Frederic Weisbecker: 10 * 11 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> 12 * 13 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, 14 * Steven Rostedt, Peter Zijlstra for suggestions and improvements. 15 * 16 */ 17 18 #include <linux/context_tracking.h> 19 #include <linux/rcupdate.h> 20 #include <linux/sched.h> 21 #include <linux/hardirq.h> 22 #include <linux/export.h> 23 #include <linux/kprobes.h> 24 25 26 #ifdef CONFIG_CONTEXT_TRACKING_IDLE 27 noinstr void ct_idle_enter(void) 28 { 29 rcu_idle_enter(); 30 } 31 EXPORT_SYMBOL_GPL(ct_idle_enter); 32 33 void ct_idle_exit(void) 34 { 35 rcu_idle_exit(); 36 } 37 EXPORT_SYMBOL_GPL(ct_idle_exit); 38 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ 39 40 #ifdef CONFIG_CONTEXT_TRACKING_USER 41 42 #define CREATE_TRACE_POINTS 43 #include <trace/events/context_tracking.h> 44 45 DEFINE_STATIC_KEY_FALSE(context_tracking_key); 46 EXPORT_SYMBOL_GPL(context_tracking_key); 47 48 DEFINE_PER_CPU(struct context_tracking, context_tracking); 49 EXPORT_SYMBOL_GPL(context_tracking); 50 51 static noinstr bool context_tracking_recursion_enter(void) 52 { 53 int recursion; 54 55 recursion = __this_cpu_inc_return(context_tracking.recursion); 56 if (recursion == 1) 57 return true; 58 59 WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion); 60 __this_cpu_dec(context_tracking.recursion); 61 62 return false; 63 } 64 65 static __always_inline void context_tracking_recursion_exit(void) 66 { 67 __this_cpu_dec(context_tracking.recursion); 68 } 69 70 /** 71 * __ct_user_enter - Inform the context tracking that the CPU is going 72 * to enter user or guest space mode. 73 * 74 * This function must be called right before we switch from the kernel 75 * to user or guest space, when it's guaranteed the remaining kernel 76 * instructions to execute won't use any RCU read side critical section 77 * because this function sets RCU in extended quiescent state. 78 */ 79 void noinstr __ct_user_enter(enum ctx_state state) 80 { 81 /* Kernel threads aren't supposed to go to userspace */ 82 WARN_ON_ONCE(!current->mm); 83 84 if (!context_tracking_recursion_enter()) 85 return; 86 87 if ( __this_cpu_read(context_tracking.state) != state) { 88 if (__this_cpu_read(context_tracking.active)) { 89 /* 90 * At this stage, only low level arch entry code remains and 91 * then we'll run in userspace. We can assume there won't be 92 * any RCU read-side critical section until the next call to 93 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency 94 * on the tick. 95 */ 96 if (state == CONTEXT_USER) { 97 instrumentation_begin(); 98 trace_user_enter(0); 99 vtime_user_enter(current); 100 instrumentation_end(); 101 } 102 rcu_user_enter(); 103 } 104 /* 105 * Even if context tracking is disabled on this CPU, because it's outside 106 * the full dynticks mask for example, we still have to keep track of the 107 * context transitions and states to prevent inconsistency on those of 108 * other CPUs. 109 * If a task triggers an exception in userspace, sleep on the exception 110 * handler and then migrate to another CPU, that new CPU must know where 111 * the exception returns by the time we call exception_exit(). 112 * This information can only be provided by the previous CPU when it called 113 * exception_enter(). 114 * OTOH we can spare the calls to vtime and RCU when context_tracking.active 115 * is false because we know that CPU is not tickless. 116 */ 117 __this_cpu_write(context_tracking.state, state); 118 } 119 context_tracking_recursion_exit(); 120 } 121 EXPORT_SYMBOL_GPL(__ct_user_enter); 122 123 /* 124 * OBSOLETE: 125 * This function should be noinstr but the below local_irq_restore() is 126 * unsafe because it involves illegal RCU uses through tracing and lockdep. 127 * This is unlikely to be fixed as this function is obsolete. The preferred 128 * way is to call __context_tracking_enter() through user_enter_irqoff() 129 * or context_tracking_guest_enter(). It should be the arch entry code 130 * responsibility to call into context tracking with IRQs disabled. 131 */ 132 void ct_user_enter(enum ctx_state state) 133 { 134 unsigned long flags; 135 136 /* 137 * Some contexts may involve an exception occuring in an irq, 138 * leading to that nesting: 139 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() 140 * This would mess up the dyntick_nesting count though. And rcu_irq_*() 141 * helpers are enough to protect RCU uses inside the exception. So 142 * just return immediately if we detect we are in an IRQ. 143 */ 144 if (in_interrupt()) 145 return; 146 147 local_irq_save(flags); 148 __ct_user_enter(state); 149 local_irq_restore(flags); 150 } 151 NOKPROBE_SYMBOL(ct_user_enter); 152 EXPORT_SYMBOL_GPL(ct_user_enter); 153 154 /** 155 * user_enter_callable() - Unfortunate ASM callable version of user_enter() for 156 * archs that didn't manage to check the context tracking 157 * static key from low level code. 158 * 159 * This OBSOLETE function should be noinstr but it unsafely calls 160 * local_irq_restore(), involving illegal RCU uses through tracing and lockdep. 161 * This is unlikely to be fixed as this function is obsolete. The preferred 162 * way is to call user_enter_irqoff(). It should be the arch entry code 163 * responsibility to call into context tracking with IRQs disabled. 164 */ 165 void user_enter_callable(void) 166 { 167 user_enter(); 168 } 169 NOKPROBE_SYMBOL(user_enter_callable); 170 171 /** 172 * __ct_user_exit - Inform the context tracking that the CPU is 173 * exiting user or guest mode and entering the kernel. 174 * 175 * This function must be called after we entered the kernel from user or 176 * guest space before any use of RCU read side critical section. This 177 * potentially include any high level kernel code like syscalls, exceptions, 178 * signal handling, etc... 179 * 180 * This call supports re-entrancy. This way it can be called from any exception 181 * handler without needing to know if we came from userspace or not. 182 */ 183 void noinstr __ct_user_exit(enum ctx_state state) 184 { 185 if (!context_tracking_recursion_enter()) 186 return; 187 188 if (__this_cpu_read(context_tracking.state) == state) { 189 if (__this_cpu_read(context_tracking.active)) { 190 /* 191 * We are going to run code that may use RCU. Inform 192 * RCU core about that (ie: we may need the tick again). 193 */ 194 rcu_user_exit(); 195 if (state == CONTEXT_USER) { 196 instrumentation_begin(); 197 vtime_user_exit(current); 198 trace_user_exit(0); 199 instrumentation_end(); 200 } 201 } 202 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); 203 } 204 context_tracking_recursion_exit(); 205 } 206 EXPORT_SYMBOL_GPL(__ct_user_exit); 207 208 /* 209 * OBSOLETE: 210 * This function should be noinstr but the below local_irq_save() is 211 * unsafe because it involves illegal RCU uses through tracing and lockdep. 212 * This is unlikely to be fixed as this function is obsolete. The preferred 213 * way is to call __context_tracking_exit() through user_exit_irqoff() 214 * or context_tracking_guest_exit(). It should be the arch entry code 215 * responsibility to call into context tracking with IRQs disabled. 216 */ 217 void ct_user_exit(enum ctx_state state) 218 { 219 unsigned long flags; 220 221 if (in_interrupt()) 222 return; 223 224 local_irq_save(flags); 225 __ct_user_exit(state); 226 local_irq_restore(flags); 227 } 228 NOKPROBE_SYMBOL(ct_user_exit); 229 EXPORT_SYMBOL_GPL(ct_user_exit); 230 231 /** 232 * user_exit_callable() - Unfortunate ASM callable version of user_exit() for 233 * archs that didn't manage to check the context tracking 234 * static key from low level code. 235 * 236 * This OBSOLETE function should be noinstr but it unsafely calls local_irq_save(), 237 * involving illegal RCU uses through tracing and lockdep. This is unlikely 238 * to be fixed as this function is obsolete. The preferred way is to call 239 * user_exit_irqoff(). It should be the arch entry code responsibility to 240 * call into context tracking with IRQs disabled. 241 */ 242 void user_exit_callable(void) 243 { 244 user_exit(); 245 } 246 NOKPROBE_SYMBOL(user_exit_callable); 247 248 void __init ct_cpu_track_user(int cpu) 249 { 250 static __initdata bool initialized = false; 251 252 if (!per_cpu(context_tracking.active, cpu)) { 253 per_cpu(context_tracking.active, cpu) = true; 254 static_branch_inc(&context_tracking_key); 255 } 256 257 if (initialized) 258 return; 259 260 #ifdef CONFIG_HAVE_TIF_NOHZ 261 /* 262 * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork 263 * This assumes that init is the only task at this early boot stage. 264 */ 265 set_tsk_thread_flag(&init_task, TIF_NOHZ); 266 #endif 267 WARN_ON_ONCE(!tasklist_empty()); 268 269 initialized = true; 270 } 271 272 #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE 273 void __init context_tracking_init(void) 274 { 275 int cpu; 276 277 for_each_possible_cpu(cpu) 278 ct_cpu_track_user(cpu); 279 } 280 #endif 281 282 #endif /* #ifdef CONFIG_CONTEXT_TRACKING_USER */ 283