1 /* 2 * Read-Copy Update definitions shared among RCU implementations. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2011 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 */ 22 23 #ifndef __LINUX_RCU_H 24 #define __LINUX_RCU_H 25 26 #include <trace/events/rcu.h> 27 #ifdef CONFIG_RCU_TRACE 28 #define RCU_TRACE(stmt) stmt 29 #else /* #ifdef CONFIG_RCU_TRACE */ 30 #define RCU_TRACE(stmt) 31 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 32 33 /* Offset to allow for unmatched rcu_irq_{enter,exit}(). */ 34 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) 35 36 37 /* 38 * Grace-period counter management. 39 */ 40 41 #define RCU_SEQ_CTR_SHIFT 2 42 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) 43 44 /* 45 * Return the counter portion of a sequence number previously returned 46 * by rcu_seq_snap() or rcu_seq_current(). 47 */ 48 static inline unsigned long rcu_seq_ctr(unsigned long s) 49 { 50 return s >> RCU_SEQ_CTR_SHIFT; 51 } 52 53 /* 54 * Return the state portion of a sequence number previously returned 55 * by rcu_seq_snap() or rcu_seq_current(). 56 */ 57 static inline int rcu_seq_state(unsigned long s) 58 { 59 return s & RCU_SEQ_STATE_MASK; 60 } 61 62 /* 63 * Set the state portion of the pointed-to sequence number. 64 * The caller is responsible for preventing conflicting updates. 65 */ 66 static inline void rcu_seq_set_state(unsigned long *sp, int newstate) 67 { 68 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); 69 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); 70 } 71 72 /* Adjust sequence number for start of update-side operation. */ 73 static inline void rcu_seq_start(unsigned long *sp) 74 { 75 WRITE_ONCE(*sp, *sp + 1); 76 smp_mb(); /* Ensure update-side operation after counter increment. */ 77 WARN_ON_ONCE(rcu_seq_state(*sp) != 1); 78 } 79 80 /* Compute the end-of-grace-period value for the specified sequence number. */ 81 static inline unsigned long rcu_seq_endval(unsigned long *sp) 82 { 83 return (*sp | RCU_SEQ_STATE_MASK) + 1; 84 } 85 86 /* Adjust sequence number for end of update-side operation. */ 87 static inline void rcu_seq_end(unsigned long *sp) 88 { 89 smp_mb(); /* Ensure update-side operation before counter increment. */ 90 WARN_ON_ONCE(!rcu_seq_state(*sp)); 91 WRITE_ONCE(*sp, rcu_seq_endval(sp)); 92 } 93 94 /* Take a snapshot of the update side's sequence number. */ 95 static inline unsigned long rcu_seq_snap(unsigned long *sp) 96 { 97 unsigned long s; 98 99 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; 100 smp_mb(); /* Above access must not bleed into critical section. */ 101 return s; 102 } 103 104 /* Return the current value the update side's sequence number, no ordering. */ 105 static inline unsigned long rcu_seq_current(unsigned long *sp) 106 { 107 return READ_ONCE(*sp); 108 } 109 110 /* 111 * Given a snapshot from rcu_seq_snap(), determine whether or not the 112 * corresponding update-side operation has started. 113 */ 114 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) 115 { 116 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); 117 } 118 119 /* 120 * Given a snapshot from rcu_seq_snap(), determine whether or not a 121 * full update-side operation has occurred. 122 */ 123 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) 124 { 125 return ULONG_CMP_GE(READ_ONCE(*sp), s); 126 } 127 128 /* 129 * Has a grace period completed since the time the old gp_seq was collected? 130 */ 131 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) 132 { 133 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); 134 } 135 136 /* 137 * Has a grace period started since the time the old gp_seq was collected? 138 */ 139 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) 140 { 141 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, 142 new); 143 } 144 145 /* 146 * Roughly how many full grace periods have elapsed between the collection 147 * of the two specified grace periods? 148 */ 149 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) 150 { 151 return (new - old) >> RCU_SEQ_CTR_SHIFT; 152 } 153 154 /* 155 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 156 * by call_rcu() and rcu callback execution, and are therefore not part of the 157 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. 158 */ 159 160 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 161 # define STATE_RCU_HEAD_READY 0 162 # define STATE_RCU_HEAD_QUEUED 1 163 164 extern struct debug_obj_descr rcuhead_debug_descr; 165 166 static inline int debug_rcu_head_queue(struct rcu_head *head) 167 { 168 int r1; 169 170 r1 = debug_object_activate(head, &rcuhead_debug_descr); 171 debug_object_active_state(head, &rcuhead_debug_descr, 172 STATE_RCU_HEAD_READY, 173 STATE_RCU_HEAD_QUEUED); 174 return r1; 175 } 176 177 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 178 { 179 debug_object_active_state(head, &rcuhead_debug_descr, 180 STATE_RCU_HEAD_QUEUED, 181 STATE_RCU_HEAD_READY); 182 debug_object_deactivate(head, &rcuhead_debug_descr); 183 } 184 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 185 static inline int debug_rcu_head_queue(struct rcu_head *head) 186 { 187 return 0; 188 } 189 190 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 191 { 192 } 193 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 194 195 void kfree(const void *); 196 197 /* 198 * Reclaim the specified callback, either by invoking it (non-lazy case) 199 * or freeing it directly (lazy case). Return true if lazy, false otherwise. 200 */ 201 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) 202 { 203 unsigned long offset = (unsigned long)head->func; 204 205 rcu_lock_acquire(&rcu_callback_map); 206 if (__is_kfree_rcu_offset(offset)) { 207 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);) 208 kfree((void *)head - offset); 209 rcu_lock_release(&rcu_callback_map); 210 return true; 211 } else { 212 RCU_TRACE(trace_rcu_invoke_callback(rn, head);) 213 head->func(head); 214 rcu_lock_release(&rcu_callback_map); 215 return false; 216 } 217 } 218 219 #ifdef CONFIG_RCU_STALL_COMMON 220 221 extern int rcu_cpu_stall_suppress; 222 int rcu_jiffies_till_stall_check(void); 223 224 #define rcu_ftrace_dump_stall_suppress() \ 225 do { \ 226 if (!rcu_cpu_stall_suppress) \ 227 rcu_cpu_stall_suppress = 3; \ 228 } while (0) 229 230 #define rcu_ftrace_dump_stall_unsuppress() \ 231 do { \ 232 if (rcu_cpu_stall_suppress == 3) \ 233 rcu_cpu_stall_suppress = 0; \ 234 } while (0) 235 236 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ 237 #define rcu_ftrace_dump_stall_suppress() 238 #define rcu_ftrace_dump_stall_unsuppress() 239 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 240 241 /* 242 * Strings used in tracepoints need to be exported via the 243 * tracing system such that tools like perf and trace-cmd can 244 * translate the string address pointers to actual text. 245 */ 246 #define TPS(x) tracepoint_string(x) 247 248 /* 249 * Dump the ftrace buffer, but only one time per callsite per boot. 250 */ 251 #define rcu_ftrace_dump(oops_dump_mode) \ 252 do { \ 253 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ 254 \ 255 if (!atomic_read(&___rfd_beenhere) && \ 256 !atomic_xchg(&___rfd_beenhere, 1)) { \ 257 tracing_off(); \ 258 rcu_ftrace_dump_stall_suppress(); \ 259 ftrace_dump(oops_dump_mode); \ 260 rcu_ftrace_dump_stall_unsuppress(); \ 261 } \ 262 } while (0) 263 264 void rcu_early_boot_tests(void); 265 void rcu_test_sync_prims(void); 266 267 /* 268 * This function really isn't for public consumption, but RCU is special in 269 * that context switches can allow the state machine to make progress. 270 */ 271 extern void resched_cpu(int cpu); 272 273 #if defined(SRCU) || !defined(TINY_RCU) 274 275 #include <linux/rcu_node_tree.h> 276 277 extern int rcu_num_lvls; 278 extern int num_rcu_lvl[]; 279 extern int rcu_num_nodes; 280 static bool rcu_fanout_exact; 281 static int rcu_fanout_leaf; 282 283 /* 284 * Compute the per-level fanout, either using the exact fanout specified 285 * or balancing the tree, depending on the rcu_fanout_exact boot parameter. 286 */ 287 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) 288 { 289 int i; 290 291 if (rcu_fanout_exact) { 292 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; 293 for (i = rcu_num_lvls - 2; i >= 0; i--) 294 levelspread[i] = RCU_FANOUT; 295 } else { 296 int ccur; 297 int cprv; 298 299 cprv = nr_cpu_ids; 300 for (i = rcu_num_lvls - 1; i >= 0; i--) { 301 ccur = levelcnt[i]; 302 levelspread[i] = (cprv + ccur - 1) / ccur; 303 cprv = ccur; 304 } 305 } 306 } 307 308 /* Returns first leaf rcu_node of the specified RCU flavor. */ 309 #define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1]) 310 311 /* Is this rcu_node a leaf? */ 312 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) 313 314 /* Is this rcu_node the last leaf? */ 315 #define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1]) 316 317 /* 318 * Do a full breadth-first scan of the rcu_node structures for the 319 * specified rcu_state structure. 320 */ 321 #define rcu_for_each_node_breadth_first(rsp, rnp) \ 322 for ((rnp) = &(rsp)->node[0]; \ 323 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 324 325 /* 326 * Do a breadth-first scan of the non-leaf rcu_node structures for the 327 * specified rcu_state structure. Note that if there is a singleton 328 * rcu_node tree with but one rcu_node structure, this loop is a no-op. 329 */ 330 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ 331 for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++) 332 333 /* 334 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state 335 * structure. Note that if there is a singleton rcu_node tree with but 336 * one rcu_node structure, this loop -will- visit the rcu_node structure. 337 * It is still a leaf node, even if it is also the root node. 338 */ 339 #define rcu_for_each_leaf_node(rsp, rnp) \ 340 for ((rnp) = rcu_first_leaf_node(rsp); \ 341 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 342 343 /* 344 * Iterate over all possible CPUs in a leaf RCU node. 345 */ 346 #define for_each_leaf_node_possible_cpu(rnp, cpu) \ 347 for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ 348 (cpu) <= rnp->grphi; \ 349 (cpu) = cpumask_next((cpu), cpu_possible_mask)) 350 351 /* 352 * Iterate over all CPUs in a leaf RCU node's specified mask. 353 */ 354 #define rcu_find_next_bit(rnp, cpu, mask) \ 355 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) 356 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ 357 for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ 358 (cpu) <= rnp->grphi; \ 359 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) 360 361 /* 362 * Wrappers for the rcu_node::lock acquire and release. 363 * 364 * Because the rcu_nodes form a tree, the tree traversal locking will observe 365 * different lock values, this in turn means that an UNLOCK of one level 366 * followed by a LOCK of another level does not imply a full memory barrier; 367 * and most importantly transitivity is lost. 368 * 369 * In order to restore full ordering between tree levels, augment the regular 370 * lock acquire functions with smp_mb__after_unlock_lock(). 371 * 372 * As ->lock of struct rcu_node is a __private field, therefore one should use 373 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. 374 */ 375 #define raw_spin_lock_rcu_node(p) \ 376 do { \ 377 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ 378 smp_mb__after_unlock_lock(); \ 379 } while (0) 380 381 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock)) 382 383 #define raw_spin_lock_irq_rcu_node(p) \ 384 do { \ 385 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 386 smp_mb__after_unlock_lock(); \ 387 } while (0) 388 389 #define raw_spin_unlock_irq_rcu_node(p) \ 390 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 391 392 #define raw_spin_lock_irqsave_rcu_node(p, flags) \ 393 do { \ 394 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 395 smp_mb__after_unlock_lock(); \ 396 } while (0) 397 398 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ 399 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) 400 401 #define raw_spin_trylock_rcu_node(p) \ 402 ({ \ 403 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ 404 \ 405 if (___locked) \ 406 smp_mb__after_unlock_lock(); \ 407 ___locked; \ 408 }) 409 410 #define raw_lockdep_assert_held_rcu_node(p) \ 411 lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) 412 413 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */ 414 415 #ifdef CONFIG_TINY_RCU 416 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 417 static inline bool rcu_gp_is_normal(void) { return true; } 418 static inline bool rcu_gp_is_expedited(void) { return false; } 419 static inline void rcu_expedite_gp(void) { } 420 static inline void rcu_unexpedite_gp(void) { } 421 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } 422 #else /* #ifdef CONFIG_TINY_RCU */ 423 bool rcu_gp_is_normal(void); /* Internal RCU use. */ 424 bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 425 void rcu_expedite_gp(void); 426 void rcu_unexpedite_gp(void); 427 void rcupdate_announce_bootup_oddness(void); 428 void rcu_request_urgent_qs_task(struct task_struct *t); 429 #endif /* #else #ifdef CONFIG_TINY_RCU */ 430 431 #define RCU_SCHEDULER_INACTIVE 0 432 #define RCU_SCHEDULER_INIT 1 433 #define RCU_SCHEDULER_RUNNING 2 434 435 enum rcutorture_type { 436 RCU_FLAVOR, 437 RCU_BH_FLAVOR, 438 RCU_SCHED_FLAVOR, 439 RCU_TASKS_FLAVOR, 440 SRCU_FLAVOR, 441 INVALID_RCU_FLAVOR 442 }; 443 444 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 445 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 446 unsigned long *gp_seq); 447 void rcutorture_record_progress(unsigned long vernum); 448 void do_trace_rcu_torture_read(const char *rcutorturename, 449 struct rcu_head *rhp, 450 unsigned long secs, 451 unsigned long c_old, 452 unsigned long c); 453 #else 454 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, 455 int *flags, unsigned long *gp_seq) 456 { 457 *flags = 0; 458 *gp_seq = 0; 459 } 460 static inline void rcutorture_record_progress(unsigned long vernum) { } 461 #ifdef CONFIG_RCU_TRACE 462 void do_trace_rcu_torture_read(const char *rcutorturename, 463 struct rcu_head *rhp, 464 unsigned long secs, 465 unsigned long c_old, 466 unsigned long c); 467 #else 468 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 469 do { } while (0) 470 #endif 471 #endif 472 473 #ifdef CONFIG_TINY_SRCU 474 475 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, 476 struct srcu_struct *sp, int *flags, 477 unsigned long *gp_seq) 478 { 479 if (test_type != SRCU_FLAVOR) 480 return; 481 *flags = 0; 482 *gp_seq = sp->srcu_idx; 483 } 484 485 #elif defined(CONFIG_TREE_SRCU) 486 487 void srcutorture_get_gp_data(enum rcutorture_type test_type, 488 struct srcu_struct *sp, int *flags, 489 unsigned long *gp_seq); 490 491 #endif 492 493 #ifdef CONFIG_TINY_RCU 494 static inline unsigned long rcu_get_gp_seq(void) { return 0; } 495 static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; } 496 static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; } 497 static inline unsigned long rcu_exp_batches_completed(void) { return 0; } 498 static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; } 499 static inline unsigned long 500 srcu_batches_completed(struct srcu_struct *sp) { return 0; } 501 static inline void rcu_force_quiescent_state(void) { } 502 static inline void rcu_bh_force_quiescent_state(void) { } 503 static inline void rcu_sched_force_quiescent_state(void) { } 504 static inline void show_rcu_gp_kthreads(void) { } 505 static inline int rcu_get_gp_kthreads_prio(void) { return 0; } 506 #else /* #ifdef CONFIG_TINY_RCU */ 507 unsigned long rcu_get_gp_seq(void); 508 unsigned long rcu_bh_get_gp_seq(void); 509 unsigned long rcu_sched_get_gp_seq(void); 510 unsigned long rcu_exp_batches_completed(void); 511 unsigned long rcu_exp_batches_completed_sched(void); 512 unsigned long srcu_batches_completed(struct srcu_struct *sp); 513 void show_rcu_gp_kthreads(void); 514 int rcu_get_gp_kthreads_prio(void); 515 void rcu_force_quiescent_state(void); 516 void rcu_bh_force_quiescent_state(void); 517 void rcu_sched_force_quiescent_state(void); 518 extern struct workqueue_struct *rcu_gp_wq; 519 extern struct workqueue_struct *rcu_par_gp_wq; 520 #endif /* #else #ifdef CONFIG_TINY_RCU */ 521 522 #ifdef CONFIG_RCU_NOCB_CPU 523 bool rcu_is_nocb_cpu(int cpu); 524 #else 525 static inline bool rcu_is_nocb_cpu(int cpu) { return false; } 526 #endif 527 528 #endif /* __LINUX_RCU_H */ 529