1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * Papers: 26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 28 * 29 * For detailed explanation of Read-Copy Update mechanism see - 30 * http://lse.sourceforge.net/locking/rcupdate.html 31 * 32 */ 33 #include <linux/types.h> 34 #include <linux/kernel.h> 35 #include <linux/init.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp.h> 38 #include <linux/interrupt.h> 39 #include <linux/sched.h> 40 #include <linux/atomic.h> 41 #include <linux/bitops.h> 42 #include <linux/percpu.h> 43 #include <linux/notifier.h> 44 #include <linux/cpu.h> 45 #include <linux/mutex.h> 46 #include <linux/export.h> 47 #include <linux/hardirq.h> 48 #include <linux/delay.h> 49 #include <linux/module.h> 50 51 #define CREATE_TRACE_POINTS 52 #include <trace/events/rcu.h> 53 54 #include "rcu.h" 55 56 MODULE_ALIAS("rcupdate"); 57 #ifdef MODULE_PARAM_PREFIX 58 #undef MODULE_PARAM_PREFIX 59 #endif 60 #define MODULE_PARAM_PREFIX "rcupdate." 61 62 module_param(rcu_expedited, int, 0); 63 64 #ifdef CONFIG_PREEMPT_RCU 65 66 /* 67 * Preemptible RCU implementation for rcu_read_lock(). 68 * Just increment ->rcu_read_lock_nesting, shared state will be updated 69 * if we block. 70 */ 71 void __rcu_read_lock(void) 72 { 73 current->rcu_read_lock_nesting++; 74 barrier(); /* critical section after entry code. */ 75 } 76 EXPORT_SYMBOL_GPL(__rcu_read_lock); 77 78 /* 79 * Preemptible RCU implementation for rcu_read_unlock(). 80 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 81 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 82 * invoke rcu_read_unlock_special() to clean up after a context switch 83 * in an RCU read-side critical section and other special cases. 84 */ 85 void __rcu_read_unlock(void) 86 { 87 struct task_struct *t = current; 88 89 if (t->rcu_read_lock_nesting != 1) { 90 --t->rcu_read_lock_nesting; 91 } else { 92 barrier(); /* critical section before exit code. */ 93 t->rcu_read_lock_nesting = INT_MIN; 94 #ifdef CONFIG_PROVE_RCU_DELAY 95 udelay(10); /* Make preemption more probable. */ 96 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ 97 barrier(); /* assign before ->rcu_read_unlock_special load */ 98 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 99 rcu_read_unlock_special(t); 100 barrier(); /* ->rcu_read_unlock_special load before assign */ 101 t->rcu_read_lock_nesting = 0; 102 } 103 #ifdef CONFIG_PROVE_LOCKING 104 { 105 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); 106 107 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); 108 } 109 #endif /* #ifdef CONFIG_PROVE_LOCKING */ 110 } 111 EXPORT_SYMBOL_GPL(__rcu_read_unlock); 112 113 #endif /* #ifdef CONFIG_PREEMPT_RCU */ 114 115 #ifdef CONFIG_DEBUG_LOCK_ALLOC 116 static struct lock_class_key rcu_lock_key; 117 struct lockdep_map rcu_lock_map = 118 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); 119 EXPORT_SYMBOL_GPL(rcu_lock_map); 120 121 static struct lock_class_key rcu_bh_lock_key; 122 struct lockdep_map rcu_bh_lock_map = 123 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); 124 EXPORT_SYMBOL_GPL(rcu_bh_lock_map); 125 126 static struct lock_class_key rcu_sched_lock_key; 127 struct lockdep_map rcu_sched_lock_map = 128 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 129 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 130 131 int notrace debug_lockdep_rcu_enabled(void) 132 { 133 return rcu_scheduler_active && debug_locks && 134 current->lockdep_recursion == 0; 135 } 136 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); 137 138 /** 139 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? 140 * 141 * Check for bottom half being disabled, which covers both the 142 * CONFIG_PROVE_RCU and not cases. Note that if someone uses 143 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) 144 * will show the situation. This is useful for debug checks in functions 145 * that require that they be called within an RCU read-side critical 146 * section. 147 * 148 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. 149 * 150 * Note that rcu_read_lock() is disallowed if the CPU is either idle or 151 * offline from an RCU perspective, so check for those as well. 152 */ 153 int rcu_read_lock_bh_held(void) 154 { 155 if (!debug_lockdep_rcu_enabled()) 156 return 1; 157 if (!rcu_is_watching()) 158 return 0; 159 if (!rcu_lockdep_current_cpu_online()) 160 return 0; 161 return in_softirq() || irqs_disabled(); 162 } 163 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); 164 165 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 166 167 struct rcu_synchronize { 168 struct rcu_head head; 169 struct completion completion; 170 }; 171 172 /* 173 * Awaken the corresponding synchronize_rcu() instance now that a 174 * grace period has elapsed. 175 */ 176 static void wakeme_after_rcu(struct rcu_head *head) 177 { 178 struct rcu_synchronize *rcu; 179 180 rcu = container_of(head, struct rcu_synchronize, head); 181 complete(&rcu->completion); 182 } 183 184 void wait_rcu_gp(call_rcu_func_t crf) 185 { 186 struct rcu_synchronize rcu; 187 188 init_rcu_head_on_stack(&rcu.head); 189 init_completion(&rcu.completion); 190 /* Will wake me after RCU finished. */ 191 crf(&rcu.head, wakeme_after_rcu); 192 /* Wait for it. */ 193 wait_for_completion(&rcu.completion); 194 destroy_rcu_head_on_stack(&rcu.head); 195 } 196 EXPORT_SYMBOL_GPL(wait_rcu_gp); 197 198 #ifdef CONFIG_PROVE_RCU 199 /* 200 * wrapper function to avoid #include problems. 201 */ 202 int rcu_my_thread_group_empty(void) 203 { 204 return thread_group_empty(current); 205 } 206 EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); 207 #endif /* #ifdef CONFIG_PROVE_RCU */ 208 209 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 210 static inline void debug_init_rcu_head(struct rcu_head *head) 211 { 212 debug_object_init(head, &rcuhead_debug_descr); 213 } 214 215 static inline void debug_rcu_head_free(struct rcu_head *head) 216 { 217 debug_object_free(head, &rcuhead_debug_descr); 218 } 219 220 /* 221 * fixup_activate is called when: 222 * - an active object is activated 223 * - an unknown object is activated (might be a statically initialized object) 224 * Activation is performed internally by call_rcu(). 225 */ 226 static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) 227 { 228 struct rcu_head *head = addr; 229 230 switch (state) { 231 232 case ODEBUG_STATE_NOTAVAILABLE: 233 /* 234 * This is not really a fixup. We just make sure that it is 235 * tracked in the object tracker. 236 */ 237 debug_object_init(head, &rcuhead_debug_descr); 238 debug_object_activate(head, &rcuhead_debug_descr); 239 return 0; 240 default: 241 return 1; 242 } 243 } 244 245 /** 246 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects 247 * @head: pointer to rcu_head structure to be initialized 248 * 249 * This function informs debugobjects of a new rcu_head structure that 250 * has been allocated as an auto variable on the stack. This function 251 * is not required for rcu_head structures that are statically defined or 252 * that are dynamically allocated on the heap. This function has no 253 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 254 */ 255 void init_rcu_head_on_stack(struct rcu_head *head) 256 { 257 debug_object_init_on_stack(head, &rcuhead_debug_descr); 258 } 259 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); 260 261 /** 262 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects 263 * @head: pointer to rcu_head structure to be initialized 264 * 265 * This function informs debugobjects that an on-stack rcu_head structure 266 * is about to go out of scope. As with init_rcu_head_on_stack(), this 267 * function is not required for rcu_head structures that are statically 268 * defined or that are dynamically allocated on the heap. Also as with 269 * init_rcu_head_on_stack(), this function has no effect for 270 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. 271 */ 272 void destroy_rcu_head_on_stack(struct rcu_head *head) 273 { 274 debug_object_free(head, &rcuhead_debug_descr); 275 } 276 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); 277 278 struct debug_obj_descr rcuhead_debug_descr = { 279 .name = "rcu_head", 280 .fixup_activate = rcuhead_fixup_activate, 281 }; 282 EXPORT_SYMBOL_GPL(rcuhead_debug_descr); 283 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 284 285 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) 286 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, 287 unsigned long secs, 288 unsigned long c_old, unsigned long c) 289 { 290 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); 291 } 292 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); 293 #else 294 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 295 do { } while (0) 296 #endif 297 298 #ifdef CONFIG_RCU_STALL_COMMON 299 300 #ifdef CONFIG_PROVE_RCU 301 #define RCU_STALL_DELAY_DELTA (5 * HZ) 302 #else 303 #define RCU_STALL_DELAY_DELTA 0 304 #endif 305 306 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 307 static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 308 309 module_param(rcu_cpu_stall_suppress, int, 0644); 310 module_param(rcu_cpu_stall_timeout, int, 0644); 311 312 int rcu_jiffies_till_stall_check(void) 313 { 314 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); 315 316 /* 317 * Limit check must be consistent with the Kconfig limits 318 * for CONFIG_RCU_CPU_STALL_TIMEOUT. 319 */ 320 if (till_stall_check < 3) { 321 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; 322 till_stall_check = 3; 323 } else if (till_stall_check > 300) { 324 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; 325 till_stall_check = 300; 326 } 327 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; 328 } 329 330 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 331 { 332 rcu_cpu_stall_suppress = 1; 333 return NOTIFY_DONE; 334 } 335 336 static struct notifier_block rcu_panic_block = { 337 .notifier_call = rcu_panic, 338 }; 339 340 static int __init check_cpu_stall_init(void) 341 { 342 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 343 return 0; 344 } 345 early_initcall(check_cpu_stall_init); 346 347 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 348