1 /* 2 * Read-Copy Update definitions shared among RCU implementations. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright IBM Corporation, 2011 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 */ 22 23 #ifndef __LINUX_RCU_H 24 #define __LINUX_RCU_H 25 26 #include <trace/events/rcu.h> 27 #ifdef CONFIG_RCU_TRACE 28 #define RCU_TRACE(stmt) stmt 29 #else /* #ifdef CONFIG_RCU_TRACE */ 30 #define RCU_TRACE(stmt) 31 #endif /* #else #ifdef CONFIG_RCU_TRACE */ 32 33 /* Offset to allow for unmatched rcu_irq_{enter,exit}(). */ 34 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) 35 36 37 /* 38 * Grace-period counter management. 39 */ 40 41 #define RCU_SEQ_CTR_SHIFT 2 42 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) 43 44 /* 45 * Return the counter portion of a sequence number previously returned 46 * by rcu_seq_snap() or rcu_seq_current(). 47 */ 48 static inline unsigned long rcu_seq_ctr(unsigned long s) 49 { 50 return s >> RCU_SEQ_CTR_SHIFT; 51 } 52 53 /* 54 * Return the state portion of a sequence number previously returned 55 * by rcu_seq_snap() or rcu_seq_current(). 56 */ 57 static inline int rcu_seq_state(unsigned long s) 58 { 59 return s & RCU_SEQ_STATE_MASK; 60 } 61 62 /* 63 * Set the state portion of the pointed-to sequence number. 64 * The caller is responsible for preventing conflicting updates. 65 */ 66 static inline void rcu_seq_set_state(unsigned long *sp, int newstate) 67 { 68 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); 69 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); 70 } 71 72 /* Adjust sequence number for start of update-side operation. */ 73 static inline void rcu_seq_start(unsigned long *sp) 74 { 75 WRITE_ONCE(*sp, *sp + 1); 76 smp_mb(); /* Ensure update-side operation after counter increment. */ 77 WARN_ON_ONCE(rcu_seq_state(*sp) != 1); 78 } 79 80 /* Compute the end-of-grace-period value for the specified sequence number. */ 81 static inline unsigned long rcu_seq_endval(unsigned long *sp) 82 { 83 return (*sp | RCU_SEQ_STATE_MASK) + 1; 84 } 85 86 /* Adjust sequence number for end of update-side operation. */ 87 static inline void rcu_seq_end(unsigned long *sp) 88 { 89 smp_mb(); /* Ensure update-side operation before counter increment. */ 90 WARN_ON_ONCE(!rcu_seq_state(*sp)); 91 WRITE_ONCE(*sp, rcu_seq_endval(sp)); 92 } 93 94 /* 95 * rcu_seq_snap - Take a snapshot of the update side's sequence number. 96 * 97 * This function returns the earliest value of the grace-period sequence number 98 * that will indicate that a full grace period has elapsed since the current 99 * time. Once the grace-period sequence number has reached this value, it will 100 * be safe to invoke all callbacks that have been registered prior to the 101 * current time. This value is the current grace-period number plus two to the 102 * power of the number of low-order bits reserved for state, then rounded up to 103 * the next value in which the state bits are all zero. 104 */ 105 static inline unsigned long rcu_seq_snap(unsigned long *sp) 106 { 107 unsigned long s; 108 109 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; 110 smp_mb(); /* Above access must not bleed into critical section. */ 111 return s; 112 } 113 114 /* Return the current value the update side's sequence number, no ordering. */ 115 static inline unsigned long rcu_seq_current(unsigned long *sp) 116 { 117 return READ_ONCE(*sp); 118 } 119 120 /* 121 * Given a snapshot from rcu_seq_snap(), determine whether or not the 122 * corresponding update-side operation has started. 123 */ 124 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) 125 { 126 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); 127 } 128 129 /* 130 * Given a snapshot from rcu_seq_snap(), determine whether or not a 131 * full update-side operation has occurred. 132 */ 133 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) 134 { 135 return ULONG_CMP_GE(READ_ONCE(*sp), s); 136 } 137 138 /* 139 * Has a grace period completed since the time the old gp_seq was collected? 140 */ 141 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) 142 { 143 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); 144 } 145 146 /* 147 * Has a grace period started since the time the old gp_seq was collected? 148 */ 149 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) 150 { 151 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, 152 new); 153 } 154 155 /* 156 * Roughly how many full grace periods have elapsed between the collection 157 * of the two specified grace periods? 158 */ 159 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) 160 { 161 unsigned long rnd_diff; 162 163 if (old == new) 164 return 0; 165 /* 166 * Compute the number of grace periods (still shifted up), plus 167 * one if either of new and old is not an exact grace period. 168 */ 169 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - 170 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + 171 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); 172 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) 173 return 1; /* Definitely no grace period has elapsed. */ 174 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; 175 } 176 177 /* 178 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally 179 * by call_rcu() and rcu callback execution, and are therefore not part 180 * of the RCU API. These are in rcupdate.h because they are used by all 181 * RCU implementations. 182 */ 183 184 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 185 # define STATE_RCU_HEAD_READY 0 186 # define STATE_RCU_HEAD_QUEUED 1 187 188 extern struct debug_obj_descr rcuhead_debug_descr; 189 190 static inline int debug_rcu_head_queue(struct rcu_head *head) 191 { 192 int r1; 193 194 r1 = debug_object_activate(head, &rcuhead_debug_descr); 195 debug_object_active_state(head, &rcuhead_debug_descr, 196 STATE_RCU_HEAD_READY, 197 STATE_RCU_HEAD_QUEUED); 198 return r1; 199 } 200 201 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 202 { 203 debug_object_active_state(head, &rcuhead_debug_descr, 204 STATE_RCU_HEAD_QUEUED, 205 STATE_RCU_HEAD_READY); 206 debug_object_deactivate(head, &rcuhead_debug_descr); 207 } 208 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 209 static inline int debug_rcu_head_queue(struct rcu_head *head) 210 { 211 return 0; 212 } 213 214 static inline void debug_rcu_head_unqueue(struct rcu_head *head) 215 { 216 } 217 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 218 219 void kfree(const void *); 220 221 /* 222 * Reclaim the specified callback, either by invoking it (non-lazy case) 223 * or freeing it directly (lazy case). Return true if lazy, false otherwise. 224 */ 225 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) 226 { 227 rcu_callback_t f; 228 unsigned long offset = (unsigned long)head->func; 229 230 rcu_lock_acquire(&rcu_callback_map); 231 if (__is_kfree_rcu_offset(offset)) { 232 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);) 233 kfree((void *)head - offset); 234 rcu_lock_release(&rcu_callback_map); 235 return true; 236 } else { 237 RCU_TRACE(trace_rcu_invoke_callback(rn, head);) 238 f = head->func; 239 WRITE_ONCE(head->func, (rcu_callback_t)0L); 240 f(head); 241 rcu_lock_release(&rcu_callback_map); 242 return false; 243 } 244 } 245 246 #ifdef CONFIG_RCU_STALL_COMMON 247 248 extern int rcu_cpu_stall_suppress; 249 int rcu_jiffies_till_stall_check(void); 250 251 #define rcu_ftrace_dump_stall_suppress() \ 252 do { \ 253 if (!rcu_cpu_stall_suppress) \ 254 rcu_cpu_stall_suppress = 3; \ 255 } while (0) 256 257 #define rcu_ftrace_dump_stall_unsuppress() \ 258 do { \ 259 if (rcu_cpu_stall_suppress == 3) \ 260 rcu_cpu_stall_suppress = 0; \ 261 } while (0) 262 263 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ 264 #define rcu_ftrace_dump_stall_suppress() 265 #define rcu_ftrace_dump_stall_unsuppress() 266 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 267 268 /* 269 * Strings used in tracepoints need to be exported via the 270 * tracing system such that tools like perf and trace-cmd can 271 * translate the string address pointers to actual text. 272 */ 273 #define TPS(x) tracepoint_string(x) 274 275 /* 276 * Dump the ftrace buffer, but only one time per callsite per boot. 277 */ 278 #define rcu_ftrace_dump(oops_dump_mode) \ 279 do { \ 280 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ 281 \ 282 if (!atomic_read(&___rfd_beenhere) && \ 283 !atomic_xchg(&___rfd_beenhere, 1)) { \ 284 tracing_off(); \ 285 rcu_ftrace_dump_stall_suppress(); \ 286 ftrace_dump(oops_dump_mode); \ 287 rcu_ftrace_dump_stall_unsuppress(); \ 288 } \ 289 } while (0) 290 291 void rcu_early_boot_tests(void); 292 void rcu_test_sync_prims(void); 293 294 /* 295 * This function really isn't for public consumption, but RCU is special in 296 * that context switches can allow the state machine to make progress. 297 */ 298 extern void resched_cpu(int cpu); 299 300 #if defined(SRCU) || !defined(TINY_RCU) 301 302 #include <linux/rcu_node_tree.h> 303 304 extern int rcu_num_lvls; 305 extern int num_rcu_lvl[]; 306 extern int rcu_num_nodes; 307 static bool rcu_fanout_exact; 308 static int rcu_fanout_leaf; 309 310 /* 311 * Compute the per-level fanout, either using the exact fanout specified 312 * or balancing the tree, depending on the rcu_fanout_exact boot parameter. 313 */ 314 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) 315 { 316 int i; 317 318 if (rcu_fanout_exact) { 319 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; 320 for (i = rcu_num_lvls - 2; i >= 0; i--) 321 levelspread[i] = RCU_FANOUT; 322 } else { 323 int ccur; 324 int cprv; 325 326 cprv = nr_cpu_ids; 327 for (i = rcu_num_lvls - 1; i >= 0; i--) { 328 ccur = levelcnt[i]; 329 levelspread[i] = (cprv + ccur - 1) / ccur; 330 cprv = ccur; 331 } 332 } 333 } 334 335 /* Returns a pointer to the first leaf rcu_node structure. */ 336 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) 337 338 /* Is this rcu_node a leaf? */ 339 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) 340 341 /* Is this rcu_node the last leaf? */ 342 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) 343 344 /* 345 * Do a full breadth-first scan of the {s,}rcu_node structures for the 346 * specified state structure (for SRCU) or the only rcu_state structure 347 * (for RCU). 348 */ 349 #define srcu_for_each_node_breadth_first(sp, rnp) \ 350 for ((rnp) = &(sp)->node[0]; \ 351 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) 352 #define rcu_for_each_node_breadth_first(rnp) \ 353 srcu_for_each_node_breadth_first(&rcu_state, rnp) 354 355 /* 356 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. 357 * Note that if there is a singleton rcu_node tree with but one rcu_node 358 * structure, this loop -will- visit the rcu_node structure. It is still 359 * a leaf node, even if it is also the root node. 360 */ 361 #define rcu_for_each_leaf_node(rnp) \ 362 for ((rnp) = rcu_first_leaf_node(); \ 363 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) 364 365 /* 366 * Iterate over all possible CPUs in a leaf RCU node. 367 */ 368 #define for_each_leaf_node_possible_cpu(rnp, cpu) \ 369 for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ 370 (cpu) <= rnp->grphi; \ 371 (cpu) = cpumask_next((cpu), cpu_possible_mask)) 372 373 /* 374 * Iterate over all CPUs in a leaf RCU node's specified mask. 375 */ 376 #define rcu_find_next_bit(rnp, cpu, mask) \ 377 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) 378 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ 379 for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ 380 (cpu) <= rnp->grphi; \ 381 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) 382 383 /* 384 * Wrappers for the rcu_node::lock acquire and release. 385 * 386 * Because the rcu_nodes form a tree, the tree traversal locking will observe 387 * different lock values, this in turn means that an UNLOCK of one level 388 * followed by a LOCK of another level does not imply a full memory barrier; 389 * and most importantly transitivity is lost. 390 * 391 * In order to restore full ordering between tree levels, augment the regular 392 * lock acquire functions with smp_mb__after_unlock_lock(). 393 * 394 * As ->lock of struct rcu_node is a __private field, therefore one should use 395 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. 396 */ 397 #define raw_spin_lock_rcu_node(p) \ 398 do { \ 399 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ 400 smp_mb__after_unlock_lock(); \ 401 } while (0) 402 403 #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock)) 404 405 #define raw_spin_lock_irq_rcu_node(p) \ 406 do { \ 407 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 408 smp_mb__after_unlock_lock(); \ 409 } while (0) 410 411 #define raw_spin_unlock_irq_rcu_node(p) \ 412 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 413 414 #define raw_spin_lock_irqsave_rcu_node(p, flags) \ 415 do { \ 416 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 417 smp_mb__after_unlock_lock(); \ 418 } while (0) 419 420 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ 421 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) 422 423 #define raw_spin_trylock_rcu_node(p) \ 424 ({ \ 425 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ 426 \ 427 if (___locked) \ 428 smp_mb__after_unlock_lock(); \ 429 ___locked; \ 430 }) 431 432 #define raw_lockdep_assert_held_rcu_node(p) \ 433 lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) 434 435 #endif /* #if defined(SRCU) || !defined(TINY_RCU) */ 436 437 #ifdef CONFIG_SRCU 438 void srcu_init(void); 439 #else /* #ifdef CONFIG_SRCU */ 440 static inline void srcu_init(void) { } 441 #endif /* #else #ifdef CONFIG_SRCU */ 442 443 #ifdef CONFIG_TINY_RCU 444 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ 445 static inline bool rcu_gp_is_normal(void) { return true; } 446 static inline bool rcu_gp_is_expedited(void) { return false; } 447 static inline void rcu_expedite_gp(void) { } 448 static inline void rcu_unexpedite_gp(void) { } 449 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } 450 #else /* #ifdef CONFIG_TINY_RCU */ 451 bool rcu_gp_is_normal(void); /* Internal RCU use. */ 452 bool rcu_gp_is_expedited(void); /* Internal RCU use. */ 453 void rcu_expedite_gp(void); 454 void rcu_unexpedite_gp(void); 455 void rcupdate_announce_bootup_oddness(void); 456 void rcu_request_urgent_qs_task(struct task_struct *t); 457 #endif /* #else #ifdef CONFIG_TINY_RCU */ 458 459 #define RCU_SCHEDULER_INACTIVE 0 460 #define RCU_SCHEDULER_INIT 1 461 #define RCU_SCHEDULER_RUNNING 2 462 463 enum rcutorture_type { 464 RCU_FLAVOR, 465 RCU_BH_FLAVOR, 466 RCU_SCHED_FLAVOR, 467 RCU_TASKS_FLAVOR, 468 SRCU_FLAVOR, 469 INVALID_RCU_FLAVOR 470 }; 471 472 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) 473 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 474 unsigned long *gp_seq); 475 void rcutorture_record_progress(unsigned long vernum); 476 void do_trace_rcu_torture_read(const char *rcutorturename, 477 struct rcu_head *rhp, 478 unsigned long secs, 479 unsigned long c_old, 480 unsigned long c); 481 #else 482 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, 483 int *flags, unsigned long *gp_seq) 484 { 485 *flags = 0; 486 *gp_seq = 0; 487 } 488 static inline void rcutorture_record_progress(unsigned long vernum) { } 489 #ifdef CONFIG_RCU_TRACE 490 void do_trace_rcu_torture_read(const char *rcutorturename, 491 struct rcu_head *rhp, 492 unsigned long secs, 493 unsigned long c_old, 494 unsigned long c); 495 #else 496 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ 497 do { } while (0) 498 #endif 499 #endif 500 501 #ifdef CONFIG_TINY_SRCU 502 503 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, 504 struct srcu_struct *sp, int *flags, 505 unsigned long *gp_seq) 506 { 507 if (test_type != SRCU_FLAVOR) 508 return; 509 *flags = 0; 510 *gp_seq = sp->srcu_idx; 511 } 512 513 #elif defined(CONFIG_TREE_SRCU) 514 515 void srcutorture_get_gp_data(enum rcutorture_type test_type, 516 struct srcu_struct *sp, int *flags, 517 unsigned long *gp_seq); 518 519 #endif 520 521 #ifdef CONFIG_TINY_RCU 522 static inline unsigned long rcu_get_gp_seq(void) { return 0; } 523 static inline unsigned long rcu_exp_batches_completed(void) { return 0; } 524 static inline unsigned long 525 srcu_batches_completed(struct srcu_struct *sp) { return 0; } 526 static inline void rcu_force_quiescent_state(void) { } 527 static inline void show_rcu_gp_kthreads(void) { } 528 static inline int rcu_get_gp_kthreads_prio(void) { return 0; } 529 static inline void rcu_fwd_progress_check(unsigned long j) { } 530 #else /* #ifdef CONFIG_TINY_RCU */ 531 unsigned long rcu_get_gp_seq(void); 532 unsigned long rcu_exp_batches_completed(void); 533 unsigned long srcu_batches_completed(struct srcu_struct *sp); 534 void show_rcu_gp_kthreads(void); 535 int rcu_get_gp_kthreads_prio(void); 536 void rcu_fwd_progress_check(unsigned long j); 537 void rcu_force_quiescent_state(void); 538 extern struct workqueue_struct *rcu_gp_wq; 539 extern struct workqueue_struct *rcu_par_gp_wq; 540 #endif /* #else #ifdef CONFIG_TINY_RCU */ 541 542 #ifdef CONFIG_RCU_NOCB_CPU 543 bool rcu_is_nocb_cpu(int cpu); 544 void rcu_bind_current_to_nocb(void); 545 #else 546 static inline bool rcu_is_nocb_cpu(int cpu) { return false; } 547 static inline void rcu_bind_current_to_nocb(void) { } 548 #endif 549 550 #endif /* __LINUX_RCU_H */ 551