1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion. 4 * 5 * Copyright (C) IBM Corporation, 2006 6 * Copyright (C) Fujitsu, 2012 7 * 8 * Author: Paul McKenney <paulmck@linux.ibm.com> 9 * Lai Jiangshan <laijs@cn.fujitsu.com> 10 * 11 * For detailed explanation of Read-Copy Update mechanism see - 12 * Documentation/RCU/ *.txt 13 * 14 */ 15 16 #define pr_fmt(fmt) "rcu: " fmt 17 18 #include <linux/export.h> 19 #include <linux/mutex.h> 20 #include <linux/percpu.h> 21 #include <linux/preempt.h> 22 #include <linux/rcupdate_wait.h> 23 #include <linux/sched.h> 24 #include <linux/smp.h> 25 #include <linux/delay.h> 26 #include <linux/module.h> 27 #include <linux/srcu.h> 28 29 #include "rcu.h" 30 #include "rcu_segcblist.h" 31 32 /* Holdoff in nanoseconds for auto-expediting. */ 33 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) 34 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; 35 module_param(exp_holdoff, ulong, 0444); 36 37 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ 38 static ulong counter_wrap_check = (ULONG_MAX >> 2); 39 module_param(counter_wrap_check, ulong, 0444); 40 41 /* Early-boot callback-management, so early that no lock is required! */ 42 static LIST_HEAD(srcu_boot_list); 43 static bool __read_mostly srcu_init_done; 44 45 static void srcu_invoke_callbacks(struct work_struct *work); 46 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); 47 static void process_srcu(struct work_struct *work); 48 static void srcu_delay_timer(struct timer_list *t); 49 50 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ 51 #define spin_lock_rcu_node(p) \ 52 do { \ 53 spin_lock(&ACCESS_PRIVATE(p, lock)); \ 54 smp_mb__after_unlock_lock(); \ 55 } while (0) 56 57 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) 58 59 #define spin_lock_irq_rcu_node(p) \ 60 do { \ 61 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 62 smp_mb__after_unlock_lock(); \ 63 } while (0) 64 65 #define spin_unlock_irq_rcu_node(p) \ 66 spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 67 68 #define spin_lock_irqsave_rcu_node(p, flags) \ 69 do { \ 70 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 71 smp_mb__after_unlock_lock(); \ 72 } while (0) 73 74 #define spin_unlock_irqrestore_rcu_node(p, flags) \ 75 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ 76 77 /* 78 * Initialize SRCU combining tree. Note that statically allocated 79 * srcu_struct structures might already have srcu_read_lock() and 80 * srcu_read_unlock() running against them. So if the is_static parameter 81 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 82 */ 83 static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) 84 { 85 int cpu; 86 int i; 87 int level = 0; 88 int levelspread[RCU_NUM_LVLS]; 89 struct srcu_data *sdp; 90 struct srcu_node *snp; 91 struct srcu_node *snp_first; 92 93 /* Work out the overall tree geometry. */ 94 ssp->level[0] = &ssp->node[0]; 95 for (i = 1; i < rcu_num_lvls; i++) 96 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; 97 rcu_init_levelspread(levelspread, num_rcu_lvl); 98 99 /* Each pass through this loop initializes one srcu_node structure. */ 100 srcu_for_each_node_breadth_first(ssp, snp) { 101 spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 102 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 103 ARRAY_SIZE(snp->srcu_data_have_cbs)); 104 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 105 snp->srcu_have_cbs[i] = 0; 106 snp->srcu_data_have_cbs[i] = 0; 107 } 108 snp->srcu_gp_seq_needed_exp = 0; 109 snp->grplo = -1; 110 snp->grphi = -1; 111 if (snp == &ssp->node[0]) { 112 /* Root node, special case. */ 113 snp->srcu_parent = NULL; 114 continue; 115 } 116 117 /* Non-root node. */ 118 if (snp == ssp->level[level + 1]) 119 level++; 120 snp->srcu_parent = ssp->level[level - 1] + 121 (snp - ssp->level[level]) / 122 levelspread[level - 1]; 123 } 124 125 /* 126 * Initialize the per-CPU srcu_data array, which feeds into the 127 * leaves of the srcu_node tree. 128 */ 129 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 130 ARRAY_SIZE(sdp->srcu_unlock_count)); 131 level = rcu_num_lvls - 1; 132 snp_first = ssp->level[level]; 133 for_each_possible_cpu(cpu) { 134 sdp = per_cpu_ptr(ssp->sda, cpu); 135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 136 rcu_segcblist_init(&sdp->srcu_cblist); 137 sdp->srcu_cblist_invoking = false; 138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; 139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; 140 sdp->mynode = &snp_first[cpu / levelspread[level]]; 141 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 142 if (snp->grplo < 0) 143 snp->grplo = cpu; 144 snp->grphi = cpu; 145 } 146 sdp->cpu = cpu; 147 INIT_WORK(&sdp->work, srcu_invoke_callbacks); 148 timer_setup(&sdp->delay_work, srcu_delay_timer, 0); 149 sdp->ssp = ssp; 150 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 151 if (is_static) 152 continue; 153 154 /* Dynamically allocated, better be no srcu_read_locks()! */ 155 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { 156 sdp->srcu_lock_count[i] = 0; 157 sdp->srcu_unlock_count[i] = 0; 158 } 159 } 160 } 161 162 /* 163 * Initialize non-compile-time initialized fields, including the 164 * associated srcu_node and srcu_data structures. The is_static 165 * parameter is passed through to init_srcu_struct_nodes(), and 166 * also tells us that ->sda has already been wired up to srcu_data. 167 */ 168 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) 169 { 170 mutex_init(&ssp->srcu_cb_mutex); 171 mutex_init(&ssp->srcu_gp_mutex); 172 ssp->srcu_idx = 0; 173 ssp->srcu_gp_seq = 0; 174 ssp->srcu_barrier_seq = 0; 175 mutex_init(&ssp->srcu_barrier_mutex); 176 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); 177 INIT_DELAYED_WORK(&ssp->work, process_srcu); 178 if (!is_static) 179 ssp->sda = alloc_percpu(struct srcu_data); 180 init_srcu_struct_nodes(ssp, is_static); 181 ssp->srcu_gp_seq_needed_exp = 0; 182 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 183 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ 184 return ssp->sda ? 0 : -ENOMEM; 185 } 186 187 #ifdef CONFIG_DEBUG_LOCK_ALLOC 188 189 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 190 struct lock_class_key *key) 191 { 192 /* Don't re-initialize a lock while it is held. */ 193 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); 194 lockdep_init_map(&ssp->dep_map, name, key, 0); 195 spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 196 return init_srcu_struct_fields(ssp, false); 197 } 198 EXPORT_SYMBOL_GPL(__init_srcu_struct); 199 200 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 201 202 /** 203 * init_srcu_struct - initialize a sleep-RCU structure 204 * @ssp: structure to initialize. 205 * 206 * Must invoke this on a given srcu_struct before passing that srcu_struct 207 * to any other function. Each srcu_struct represents a separate domain 208 * of SRCU protection. 209 */ 210 int init_srcu_struct(struct srcu_struct *ssp) 211 { 212 spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 213 return init_srcu_struct_fields(ssp, false); 214 } 215 EXPORT_SYMBOL_GPL(init_srcu_struct); 216 217 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 218 219 /* 220 * First-use initialization of statically allocated srcu_struct 221 * structure. Wiring up the combining tree is more than can be 222 * done with compile-time initialization, so this check is added 223 * to each update-side SRCU primitive. Use ssp->lock, which -is- 224 * compile-time initialized, to resolve races involving multiple 225 * CPUs trying to garner first-use privileges. 226 */ 227 static void check_init_srcu_struct(struct srcu_struct *ssp) 228 { 229 unsigned long flags; 230 231 /* The smp_load_acquire() pairs with the smp_store_release(). */ 232 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ 233 return; /* Already initialized. */ 234 spin_lock_irqsave_rcu_node(ssp, flags); 235 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { 236 spin_unlock_irqrestore_rcu_node(ssp, flags); 237 return; 238 } 239 init_srcu_struct_fields(ssp, true); 240 spin_unlock_irqrestore_rcu_node(ssp, flags); 241 } 242 243 /* 244 * Returns approximate total of the readers' ->srcu_lock_count[] values 245 * for the rank of per-CPU counters specified by idx. 246 */ 247 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) 248 { 249 int cpu; 250 unsigned long sum = 0; 251 252 for_each_possible_cpu(cpu) { 253 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 254 255 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 256 } 257 return sum; 258 } 259 260 /* 261 * Returns approximate total of the readers' ->srcu_unlock_count[] values 262 * for the rank of per-CPU counters specified by idx. 263 */ 264 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) 265 { 266 int cpu; 267 unsigned long sum = 0; 268 269 for_each_possible_cpu(cpu) { 270 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 271 272 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 273 } 274 return sum; 275 } 276 277 /* 278 * Return true if the number of pre-existing readers is determined to 279 * be zero. 280 */ 281 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) 282 { 283 unsigned long unlocks; 284 285 unlocks = srcu_readers_unlock_idx(ssp, idx); 286 287 /* 288 * Make sure that a lock is always counted if the corresponding 289 * unlock is counted. Needs to be a smp_mb() as the read side may 290 * contain a read from a variable that is written to before the 291 * synchronize_srcu() in the write side. In this case smp_mb()s 292 * A and B act like the store buffering pattern. 293 * 294 * This smp_mb() also pairs with smp_mb() C to prevent accesses 295 * after the synchronize_srcu() from being executed before the 296 * grace period ends. 297 */ 298 smp_mb(); /* A */ 299 300 /* 301 * If the locks are the same as the unlocks, then there must have 302 * been no readers on this index at some time in between. This does 303 * not mean that there are no more readers, as one could have read 304 * the current index but not have incremented the lock counter yet. 305 * 306 * So suppose that the updater is preempted here for so long 307 * that more than ULONG_MAX non-nested readers come and go in 308 * the meantime. It turns out that this cannot result in overflow 309 * because if a reader modifies its unlock count after we read it 310 * above, then that reader's next load of ->srcu_idx is guaranteed 311 * to get the new value, which will cause it to operate on the 312 * other bank of counters, where it cannot contribute to the 313 * overflow of these counters. This means that there is a maximum 314 * of 2*NR_CPUS increments, which cannot overflow given current 315 * systems, especially not on 64-bit systems. 316 * 317 * OK, how about nesting? This does impose a limit on nesting 318 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, 319 * especially on 64-bit systems. 320 */ 321 return srcu_readers_lock_idx(ssp, idx) == unlocks; 322 } 323 324 /** 325 * srcu_readers_active - returns true if there are readers. and false 326 * otherwise 327 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). 328 * 329 * Note that this is not an atomic primitive, and can therefore suffer 330 * severe errors when invoked on an active srcu_struct. That said, it 331 * can be useful as an error check at cleanup time. 332 */ 333 static bool srcu_readers_active(struct srcu_struct *ssp) 334 { 335 int cpu; 336 unsigned long sum = 0; 337 338 for_each_possible_cpu(cpu) { 339 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 340 341 sum += READ_ONCE(cpuc->srcu_lock_count[0]); 342 sum += READ_ONCE(cpuc->srcu_lock_count[1]); 343 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 344 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 345 } 346 return sum; 347 } 348 349 #define SRCU_INTERVAL 1 350 351 /* 352 * Return grace-period delay, zero if there are expedited grace 353 * periods pending, SRCU_INTERVAL otherwise. 354 */ 355 static unsigned long srcu_get_delay(struct srcu_struct *ssp) 356 { 357 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), 358 READ_ONCE(ssp->srcu_gp_seq_needed_exp))) 359 return 0; 360 return SRCU_INTERVAL; 361 } 362 363 /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ 364 void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) 365 { 366 int cpu; 367 368 if (WARN_ON(!srcu_get_delay(ssp))) 369 return; /* Just leak it! */ 370 if (WARN_ON(srcu_readers_active(ssp))) 371 return; /* Just leak it! */ 372 if (quiesced) { 373 if (WARN_ON(delayed_work_pending(&ssp->work))) 374 return; /* Just leak it! */ 375 } else { 376 flush_delayed_work(&ssp->work); 377 } 378 for_each_possible_cpu(cpu) { 379 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); 380 381 if (quiesced) { 382 if (WARN_ON(timer_pending(&sdp->delay_work))) 383 return; /* Just leak it! */ 384 if (WARN_ON(work_pending(&sdp->work))) 385 return; /* Just leak it! */ 386 } else { 387 del_timer_sync(&sdp->delay_work); 388 flush_work(&sdp->work); 389 } 390 } 391 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 392 WARN_ON(srcu_readers_active(ssp))) { 393 pr_info("%s: Active srcu_struct %p state: %d\n", 394 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); 395 return; /* Caller forgot to stop doing call_srcu()? */ 396 } 397 free_percpu(ssp->sda); 398 ssp->sda = NULL; 399 } 400 EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); 401 402 /* 403 * Counts the new reader in the appropriate per-CPU element of the 404 * srcu_struct. 405 * Returns an index that must be passed to the matching srcu_read_unlock(). 406 */ 407 int __srcu_read_lock(struct srcu_struct *ssp) 408 { 409 int idx; 410 411 idx = READ_ONCE(ssp->srcu_idx) & 0x1; 412 this_cpu_inc(ssp->sda->srcu_lock_count[idx]); 413 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 414 return idx; 415 } 416 EXPORT_SYMBOL_GPL(__srcu_read_lock); 417 418 /* 419 * Removes the count for the old reader from the appropriate per-CPU 420 * element of the srcu_struct. Note that this may well be a different 421 * CPU than that which was incremented by the corresponding srcu_read_lock(). 422 */ 423 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 424 { 425 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 426 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); 427 } 428 EXPORT_SYMBOL_GPL(__srcu_read_unlock); 429 430 /* 431 * We use an adaptive strategy for synchronize_srcu() and especially for 432 * synchronize_srcu_expedited(). We spin for a fixed time period 433 * (defined below) to allow SRCU readers to exit their read-side critical 434 * sections. If there are still some readers after a few microseconds, 435 * we repeatedly block for 1-millisecond time periods. 436 */ 437 #define SRCU_RETRY_CHECK_DELAY 5 438 439 /* 440 * Start an SRCU grace period. 441 */ 442 static void srcu_gp_start(struct srcu_struct *ssp) 443 { 444 struct srcu_data *sdp = this_cpu_ptr(ssp->sda); 445 int state; 446 447 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); 448 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 449 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ 450 rcu_segcblist_advance(&sdp->srcu_cblist, 451 rcu_seq_current(&ssp->srcu_gp_seq)); 452 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 453 rcu_seq_snap(&ssp->srcu_gp_seq)); 454 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ 455 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 456 rcu_seq_start(&ssp->srcu_gp_seq); 457 state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 458 WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 459 } 460 461 462 static void srcu_delay_timer(struct timer_list *t) 463 { 464 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); 465 466 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); 467 } 468 469 static void srcu_queue_delayed_work_on(struct srcu_data *sdp, 470 unsigned long delay) 471 { 472 if (!delay) { 473 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); 474 return; 475 } 476 477 timer_reduce(&sdp->delay_work, jiffies + delay); 478 } 479 480 /* 481 * Schedule callback invocation for the specified srcu_data structure, 482 * if possible, on the corresponding CPU. 483 */ 484 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 485 { 486 srcu_queue_delayed_work_on(sdp, delay); 487 } 488 489 /* 490 * Schedule callback invocation for all srcu_data structures associated 491 * with the specified srcu_node structure that have callbacks for the 492 * just-completed grace period, the one corresponding to idx. If possible, 493 * schedule this invocation on the corresponding CPUs. 494 */ 495 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, 496 unsigned long mask, unsigned long delay) 497 { 498 int cpu; 499 500 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 501 if (!(mask & (1 << (cpu - snp->grplo)))) 502 continue; 503 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); 504 } 505 } 506 507 /* 508 * Note the end of an SRCU grace period. Initiates callback invocation 509 * and starts a new grace period if needed. 510 * 511 * The ->srcu_cb_mutex acquisition does not protect any data, but 512 * instead prevents more than one grace period from starting while we 513 * are initiating callback invocation. This allows the ->srcu_have_cbs[] 514 * array to have a finite number of elements. 515 */ 516 static void srcu_gp_end(struct srcu_struct *ssp) 517 { 518 unsigned long cbdelay; 519 bool cbs; 520 bool last_lvl; 521 int cpu; 522 unsigned long flags; 523 unsigned long gpseq; 524 int idx; 525 unsigned long mask; 526 struct srcu_data *sdp; 527 struct srcu_node *snp; 528 529 /* Prevent more than one additional grace period. */ 530 mutex_lock(&ssp->srcu_cb_mutex); 531 532 /* End the current grace period. */ 533 spin_lock_irq_rcu_node(ssp); 534 idx = rcu_seq_state(ssp->srcu_gp_seq); 535 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 536 cbdelay = srcu_get_delay(ssp); 537 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 538 rcu_seq_end(&ssp->srcu_gp_seq); 539 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 540 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) 541 ssp->srcu_gp_seq_needed_exp = gpseq; 542 spin_unlock_irq_rcu_node(ssp); 543 mutex_unlock(&ssp->srcu_gp_mutex); 544 /* A new grace period can start at this point. But only one. */ 545 546 /* Initiate callback invocation as needed. */ 547 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 548 srcu_for_each_node_breadth_first(ssp, snp) { 549 spin_lock_irq_rcu_node(snp); 550 cbs = false; 551 last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; 552 if (last_lvl) 553 cbs = snp->srcu_have_cbs[idx] == gpseq; 554 snp->srcu_have_cbs[idx] = gpseq; 555 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 556 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 557 snp->srcu_gp_seq_needed_exp = gpseq; 558 mask = snp->srcu_data_have_cbs[idx]; 559 snp->srcu_data_have_cbs[idx] = 0; 560 spin_unlock_irq_rcu_node(snp); 561 if (cbs) 562 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); 563 564 /* Occasionally prevent srcu_data counter wrap. */ 565 if (!(gpseq & counter_wrap_check) && last_lvl) 566 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 567 sdp = per_cpu_ptr(ssp->sda, cpu); 568 spin_lock_irqsave_rcu_node(sdp, flags); 569 if (ULONG_CMP_GE(gpseq, 570 sdp->srcu_gp_seq_needed + 100)) 571 sdp->srcu_gp_seq_needed = gpseq; 572 if (ULONG_CMP_GE(gpseq, 573 sdp->srcu_gp_seq_needed_exp + 100)) 574 sdp->srcu_gp_seq_needed_exp = gpseq; 575 spin_unlock_irqrestore_rcu_node(sdp, flags); 576 } 577 } 578 579 /* Callback initiation done, allow grace periods after next. */ 580 mutex_unlock(&ssp->srcu_cb_mutex); 581 582 /* Start a new grace period if needed. */ 583 spin_lock_irq_rcu_node(ssp); 584 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 585 if (!rcu_seq_state(gpseq) && 586 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { 587 srcu_gp_start(ssp); 588 spin_unlock_irq_rcu_node(ssp); 589 srcu_reschedule(ssp, 0); 590 } else { 591 spin_unlock_irq_rcu_node(ssp); 592 } 593 } 594 595 /* 596 * Funnel-locking scheme to scalably mediate many concurrent expedited 597 * grace-period requests. This function is invoked for the first known 598 * expedited request for a grace period that has already been requested, 599 * but without expediting. To start a completely new grace period, 600 * whether expedited or not, use srcu_funnel_gp_start() instead. 601 */ 602 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, 603 unsigned long s) 604 { 605 unsigned long flags; 606 607 for (; snp != NULL; snp = snp->srcu_parent) { 608 if (rcu_seq_done(&ssp->srcu_gp_seq, s) || 609 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 610 return; 611 spin_lock_irqsave_rcu_node(snp, flags); 612 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 613 spin_unlock_irqrestore_rcu_node(snp, flags); 614 return; 615 } 616 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 617 spin_unlock_irqrestore_rcu_node(snp, flags); 618 } 619 spin_lock_irqsave_rcu_node(ssp, flags); 620 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 621 ssp->srcu_gp_seq_needed_exp = s; 622 spin_unlock_irqrestore_rcu_node(ssp, flags); 623 } 624 625 /* 626 * Funnel-locking scheme to scalably mediate many concurrent grace-period 627 * requests. The winner has to do the work of actually starting grace 628 * period s. Losers must either ensure that their desired grace-period 629 * number is recorded on at least their leaf srcu_node structure, or they 630 * must take steps to invoke their own callbacks. 631 * 632 * Note that this function also does the work of srcu_funnel_exp_start(), 633 * in some cases by directly invoking it. 634 */ 635 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, 636 unsigned long s, bool do_norm) 637 { 638 unsigned long flags; 639 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 640 struct srcu_node *snp = sdp->mynode; 641 unsigned long snp_seq; 642 643 /* Each pass through the loop does one level of the srcu_node tree. */ 644 for (; snp != NULL; snp = snp->srcu_parent) { 645 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) 646 return; /* GP already done and CBs recorded. */ 647 spin_lock_irqsave_rcu_node(snp, flags); 648 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 649 snp_seq = snp->srcu_have_cbs[idx]; 650 if (snp == sdp->mynode && snp_seq == s) 651 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 652 spin_unlock_irqrestore_rcu_node(snp, flags); 653 if (snp == sdp->mynode && snp_seq != s) { 654 srcu_schedule_cbs_sdp(sdp, do_norm 655 ? SRCU_INTERVAL 656 : 0); 657 return; 658 } 659 if (!do_norm) 660 srcu_funnel_exp_start(ssp, snp, s); 661 return; 662 } 663 snp->srcu_have_cbs[idx] = s; 664 if (snp == sdp->mynode) 665 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 666 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 667 snp->srcu_gp_seq_needed_exp = s; 668 spin_unlock_irqrestore_rcu_node(snp, flags); 669 } 670 671 /* Top of tree, must ensure the grace period will be started. */ 672 spin_lock_irqsave_rcu_node(ssp, flags); 673 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { 674 /* 675 * Record need for grace period s. Pair with load 676 * acquire setting up for initialization. 677 */ 678 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ 679 } 680 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 681 ssp->srcu_gp_seq_needed_exp = s; 682 683 /* If grace period not already done and none in progress, start it. */ 684 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && 685 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { 686 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 687 srcu_gp_start(ssp); 688 if (likely(srcu_init_done)) 689 queue_delayed_work(rcu_gp_wq, &ssp->work, 690 srcu_get_delay(ssp)); 691 else if (list_empty(&ssp->work.work.entry)) 692 list_add(&ssp->work.work.entry, &srcu_boot_list); 693 } 694 spin_unlock_irqrestore_rcu_node(ssp, flags); 695 } 696 697 /* 698 * Wait until all readers counted by array index idx complete, but 699 * loop an additional time if there is an expedited grace period pending. 700 * The caller must ensure that ->srcu_idx is not changed while checking. 701 */ 702 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) 703 { 704 for (;;) { 705 if (srcu_readers_active_idx_check(ssp, idx)) 706 return true; 707 if (--trycount + !srcu_get_delay(ssp) <= 0) 708 return false; 709 udelay(SRCU_RETRY_CHECK_DELAY); 710 } 711 } 712 713 /* 714 * Increment the ->srcu_idx counter so that future SRCU readers will 715 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 716 * us to wait for pre-existing readers in a starvation-free manner. 717 */ 718 static void srcu_flip(struct srcu_struct *ssp) 719 { 720 /* 721 * Ensure that if this updater saw a given reader's increment 722 * from __srcu_read_lock(), that reader was using an old value 723 * of ->srcu_idx. Also ensure that if a given reader sees the 724 * new value of ->srcu_idx, this updater's earlier scans cannot 725 * have seen that reader's increments (which is OK, because this 726 * grace period need not wait on that reader). 727 */ 728 smp_mb(); /* E */ /* Pairs with B and C. */ 729 730 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 731 732 /* 733 * Ensure that if the updater misses an __srcu_read_unlock() 734 * increment, that task's next __srcu_read_lock() will see the 735 * above counter update. Note that both this memory barrier 736 * and the one in srcu_readers_active_idx_check() provide the 737 * guarantee for __srcu_read_lock(). 738 */ 739 smp_mb(); /* D */ /* Pairs with C. */ 740 } 741 742 /* 743 * If SRCU is likely idle, return true, otherwise return false. 744 * 745 * Note that it is OK for several current from-idle requests for a new 746 * grace period from idle to specify expediting because they will all end 747 * up requesting the same grace period anyhow. So no loss. 748 * 749 * Note also that if any CPU (including the current one) is still invoking 750 * callbacks, this function will nevertheless say "idle". This is not 751 * ideal, but the overhead of checking all CPUs' callback lists is even 752 * less ideal, especially on large systems. Furthermore, the wakeup 753 * can happen before the callback is fully removed, so we have no choice 754 * but to accept this type of error. 755 * 756 * This function is also subject to counter-wrap errors, but let's face 757 * it, if this function was preempted for enough time for the counters 758 * to wrap, it really doesn't matter whether or not we expedite the grace 759 * period. The extra overhead of a needlessly expedited grace period is 760 * negligible when amoritized over that time period, and the extra latency 761 * of a needlessly non-expedited grace period is similarly negligible. 762 */ 763 static bool srcu_might_be_idle(struct srcu_struct *ssp) 764 { 765 unsigned long curseq; 766 unsigned long flags; 767 struct srcu_data *sdp; 768 unsigned long t; 769 770 /* If the local srcu_data structure has callbacks, not idle. */ 771 local_irq_save(flags); 772 sdp = this_cpu_ptr(ssp->sda); 773 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 774 local_irq_restore(flags); 775 return false; /* Callbacks already present, so not idle. */ 776 } 777 local_irq_restore(flags); 778 779 /* 780 * No local callbacks, so probabalistically probe global state. 781 * Exact information would require acquiring locks, which would 782 * kill scalability, hence the probabalistic nature of the probe. 783 */ 784 785 /* First, see if enough time has passed since the last GP. */ 786 t = ktime_get_mono_fast_ns(); 787 if (exp_holdoff == 0 || 788 time_in_range_open(t, ssp->srcu_last_gp_end, 789 ssp->srcu_last_gp_end + exp_holdoff)) 790 return false; /* Too soon after last GP. */ 791 792 /* Next, check for probable idleness. */ 793 curseq = rcu_seq_current(&ssp->srcu_gp_seq); 794 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 795 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) 796 return false; /* Grace period in progress, so not idle. */ 797 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 798 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) 799 return false; /* GP # changed, so not idle. */ 800 return true; /* With reasonable probability, idle! */ 801 } 802 803 /* 804 * SRCU callback function to leak a callback. 805 */ 806 static void srcu_leak_callback(struct rcu_head *rhp) 807 { 808 } 809 810 /* 811 * Enqueue an SRCU callback on the srcu_data structure associated with 812 * the current CPU and the specified srcu_struct structure, initiating 813 * grace-period processing if it is not already running. 814 * 815 * Note that all CPUs must agree that the grace period extended beyond 816 * all pre-existing SRCU read-side critical section. On systems with 817 * more than one CPU, this means that when "func()" is invoked, each CPU 818 * is guaranteed to have executed a full memory barrier since the end of 819 * its last corresponding SRCU read-side critical section whose beginning 820 * preceded the call to call_srcu(). It also means that each CPU executing 821 * an SRCU read-side critical section that continues beyond the start of 822 * "func()" must have executed a memory barrier after the call_srcu() 823 * but before the beginning of that SRCU read-side critical section. 824 * Note that these guarantees include CPUs that are offline, idle, or 825 * executing in user mode, as well as CPUs that are executing in the kernel. 826 * 827 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the 828 * resulting SRCU callback function "func()", then both CPU A and CPU 829 * B are guaranteed to execute a full memory barrier during the time 830 * interval between the call to call_srcu() and the invocation of "func()". 831 * This guarantee applies even if CPU A and CPU B are the same CPU (but 832 * again only if the system has more than one CPU). 833 * 834 * Of course, these guarantees apply only for invocations of call_srcu(), 835 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 836 * srcu_struct structure. 837 */ 838 void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 839 rcu_callback_t func, bool do_norm) 840 { 841 unsigned long flags; 842 int idx; 843 bool needexp = false; 844 bool needgp = false; 845 unsigned long s; 846 struct srcu_data *sdp; 847 848 check_init_srcu_struct(ssp); 849 if (debug_rcu_head_queue(rhp)) { 850 /* Probable double call_srcu(), so leak the callback. */ 851 WRITE_ONCE(rhp->func, srcu_leak_callback); 852 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); 853 return; 854 } 855 rhp->func = func; 856 idx = srcu_read_lock(ssp); 857 local_irq_save(flags); 858 sdp = this_cpu_ptr(ssp->sda); 859 spin_lock_rcu_node(sdp); 860 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 861 rcu_segcblist_advance(&sdp->srcu_cblist, 862 rcu_seq_current(&ssp->srcu_gp_seq)); 863 s = rcu_seq_snap(&ssp->srcu_gp_seq); 864 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 865 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 866 sdp->srcu_gp_seq_needed = s; 867 needgp = true; 868 } 869 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 870 sdp->srcu_gp_seq_needed_exp = s; 871 needexp = true; 872 } 873 spin_unlock_irqrestore_rcu_node(sdp, flags); 874 if (needgp) 875 srcu_funnel_gp_start(ssp, sdp, s, do_norm); 876 else if (needexp) 877 srcu_funnel_exp_start(ssp, sdp->mynode, s); 878 srcu_read_unlock(ssp, idx); 879 } 880 881 /** 882 * call_srcu() - Queue a callback for invocation after an SRCU grace period 883 * @ssp: srcu_struct in queue the callback 884 * @rhp: structure to be used for queueing the SRCU callback. 885 * @func: function to be invoked after the SRCU grace period 886 * 887 * The callback function will be invoked some time after a full SRCU 888 * grace period elapses, in other words after all pre-existing SRCU 889 * read-side critical sections have completed. However, the callback 890 * function might well execute concurrently with other SRCU read-side 891 * critical sections that started after call_srcu() was invoked. SRCU 892 * read-side critical sections are delimited by srcu_read_lock() and 893 * srcu_read_unlock(), and may be nested. 894 * 895 * The callback will be invoked from process context, but must nevertheless 896 * be fast and must not block. 897 */ 898 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 899 rcu_callback_t func) 900 { 901 __call_srcu(ssp, rhp, func, true); 902 } 903 EXPORT_SYMBOL_GPL(call_srcu); 904 905 /* 906 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 907 */ 908 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) 909 { 910 struct rcu_synchronize rcu; 911 912 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || 913 lock_is_held(&rcu_bh_lock_map) || 914 lock_is_held(&rcu_lock_map) || 915 lock_is_held(&rcu_sched_lock_map), 916 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 917 918 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 919 return; 920 might_sleep(); 921 check_init_srcu_struct(ssp); 922 init_completion(&rcu.completion); 923 init_rcu_head_on_stack(&rcu.head); 924 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); 925 wait_for_completion(&rcu.completion); 926 destroy_rcu_head_on_stack(&rcu.head); 927 928 /* 929 * Make sure that later code is ordered after the SRCU grace 930 * period. This pairs with the spin_lock_irq_rcu_node() 931 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 932 * because the current CPU might have been totally uninvolved with 933 * (and thus unordered against) that grace period. 934 */ 935 smp_mb(); 936 } 937 938 /** 939 * synchronize_srcu_expedited - Brute-force SRCU grace period 940 * @ssp: srcu_struct with which to synchronize. 941 * 942 * Wait for an SRCU grace period to elapse, but be more aggressive about 943 * spinning rather than blocking when waiting. 944 * 945 * Note that synchronize_srcu_expedited() has the same deadlock and 946 * memory-ordering properties as does synchronize_srcu(). 947 */ 948 void synchronize_srcu_expedited(struct srcu_struct *ssp) 949 { 950 __synchronize_srcu(ssp, rcu_gp_is_normal()); 951 } 952 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 953 954 /** 955 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 956 * @ssp: srcu_struct with which to synchronize. 957 * 958 * Wait for the count to drain to zero of both indexes. To avoid the 959 * possible starvation of synchronize_srcu(), it waits for the count of 960 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 961 * and then flip the srcu_idx and wait for the count of the other index. 962 * 963 * Can block; must be called from process context. 964 * 965 * Note that it is illegal to call synchronize_srcu() from the corresponding 966 * SRCU read-side critical section; doing so will result in deadlock. 967 * However, it is perfectly legal to call synchronize_srcu() on one 968 * srcu_struct from some other srcu_struct's read-side critical section, 969 * as long as the resulting graph of srcu_structs is acyclic. 970 * 971 * There are memory-ordering constraints implied by synchronize_srcu(). 972 * On systems with more than one CPU, when synchronize_srcu() returns, 973 * each CPU is guaranteed to have executed a full memory barrier since 974 * the end of its last corresponding SRCU read-side critical section 975 * whose beginning preceded the call to synchronize_srcu(). In addition, 976 * each CPU having an SRCU read-side critical section that extends beyond 977 * the return from synchronize_srcu() is guaranteed to have executed a 978 * full memory barrier after the beginning of synchronize_srcu() and before 979 * the beginning of that SRCU read-side critical section. Note that these 980 * guarantees include CPUs that are offline, idle, or executing in user mode, 981 * as well as CPUs that are executing in the kernel. 982 * 983 * Furthermore, if CPU A invoked synchronize_srcu(), which returned 984 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 985 * to have executed a full memory barrier during the execution of 986 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 987 * are the same CPU, but again only if the system has more than one CPU. 988 * 989 * Of course, these memory-ordering guarantees apply only when 990 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 991 * passed the same srcu_struct structure. 992 * 993 * If SRCU is likely idle, expedite the first request. This semantic 994 * was provided by Classic SRCU, and is relied upon by its users, so TREE 995 * SRCU must also provide it. Note that detecting idleness is heuristic 996 * and subject to both false positives and negatives. 997 */ 998 void synchronize_srcu(struct srcu_struct *ssp) 999 { 1000 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) 1001 synchronize_srcu_expedited(ssp); 1002 else 1003 __synchronize_srcu(ssp, true); 1004 } 1005 EXPORT_SYMBOL_GPL(synchronize_srcu); 1006 1007 /* 1008 * Callback function for srcu_barrier() use. 1009 */ 1010 static void srcu_barrier_cb(struct rcu_head *rhp) 1011 { 1012 struct srcu_data *sdp; 1013 struct srcu_struct *ssp; 1014 1015 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 1016 ssp = sdp->ssp; 1017 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1018 complete(&ssp->srcu_barrier_completion); 1019 } 1020 1021 /** 1022 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 1023 * @ssp: srcu_struct on which to wait for in-flight callbacks. 1024 */ 1025 void srcu_barrier(struct srcu_struct *ssp) 1026 { 1027 int cpu; 1028 struct srcu_data *sdp; 1029 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); 1030 1031 check_init_srcu_struct(ssp); 1032 mutex_lock(&ssp->srcu_barrier_mutex); 1033 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { 1034 smp_mb(); /* Force ordering following return. */ 1035 mutex_unlock(&ssp->srcu_barrier_mutex); 1036 return; /* Someone else did our work for us. */ 1037 } 1038 rcu_seq_start(&ssp->srcu_barrier_seq); 1039 init_completion(&ssp->srcu_barrier_completion); 1040 1041 /* Initial count prevents reaching zero until all CBs are posted. */ 1042 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); 1043 1044 /* 1045 * Each pass through this loop enqueues a callback, but only 1046 * on CPUs already having callbacks enqueued. Note that if 1047 * a CPU already has callbacks enqueue, it must have already 1048 * registered the need for a future grace period, so all we 1049 * need do is enqueue a callback that will use the same 1050 * grace period as the last callback already in the queue. 1051 */ 1052 for_each_possible_cpu(cpu) { 1053 sdp = per_cpu_ptr(ssp->sda, cpu); 1054 spin_lock_irq_rcu_node(sdp); 1055 atomic_inc(&ssp->srcu_barrier_cpu_cnt); 1056 sdp->srcu_barrier_head.func = srcu_barrier_cb; 1057 debug_rcu_head_queue(&sdp->srcu_barrier_head); 1058 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 1059 &sdp->srcu_barrier_head, 0)) { 1060 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1061 atomic_dec(&ssp->srcu_barrier_cpu_cnt); 1062 } 1063 spin_unlock_irq_rcu_node(sdp); 1064 } 1065 1066 /* Remove the initial count, at which point reaching zero can happen. */ 1067 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1068 complete(&ssp->srcu_barrier_completion); 1069 wait_for_completion(&ssp->srcu_barrier_completion); 1070 1071 rcu_seq_end(&ssp->srcu_barrier_seq); 1072 mutex_unlock(&ssp->srcu_barrier_mutex); 1073 } 1074 EXPORT_SYMBOL_GPL(srcu_barrier); 1075 1076 /** 1077 * srcu_batches_completed - return batches completed. 1078 * @ssp: srcu_struct on which to report batch completion. 1079 * 1080 * Report the number of batches, correlated with, but not necessarily 1081 * precisely the same as, the number of grace periods that have elapsed. 1082 */ 1083 unsigned long srcu_batches_completed(struct srcu_struct *ssp) 1084 { 1085 return ssp->srcu_idx; 1086 } 1087 EXPORT_SYMBOL_GPL(srcu_batches_completed); 1088 1089 /* 1090 * Core SRCU state machine. Push state bits of ->srcu_gp_seq 1091 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 1092 * completed in that state. 1093 */ 1094 static void srcu_advance_state(struct srcu_struct *ssp) 1095 { 1096 int idx; 1097 1098 mutex_lock(&ssp->srcu_gp_mutex); 1099 1100 /* 1101 * Because readers might be delayed for an extended period after 1102 * fetching ->srcu_idx for their index, at any point in time there 1103 * might well be readers using both idx=0 and idx=1. We therefore 1104 * need to wait for readers to clear from both index values before 1105 * invoking a callback. 1106 * 1107 * The load-acquire ensures that we see the accesses performed 1108 * by the prior grace period. 1109 */ 1110 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ 1111 if (idx == SRCU_STATE_IDLE) { 1112 spin_lock_irq_rcu_node(ssp); 1113 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1114 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); 1115 spin_unlock_irq_rcu_node(ssp); 1116 mutex_unlock(&ssp->srcu_gp_mutex); 1117 return; 1118 } 1119 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 1120 if (idx == SRCU_STATE_IDLE) 1121 srcu_gp_start(ssp); 1122 spin_unlock_irq_rcu_node(ssp); 1123 if (idx != SRCU_STATE_IDLE) { 1124 mutex_unlock(&ssp->srcu_gp_mutex); 1125 return; /* Someone else started the grace period. */ 1126 } 1127 } 1128 1129 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1130 idx = 1 ^ (ssp->srcu_idx & 1); 1131 if (!try_check_zero(ssp, idx, 1)) { 1132 mutex_unlock(&ssp->srcu_gp_mutex); 1133 return; /* readers present, retry later. */ 1134 } 1135 srcu_flip(ssp); 1136 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); 1137 } 1138 1139 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1140 1141 /* 1142 * SRCU read-side critical sections are normally short, 1143 * so check at least twice in quick succession after a flip. 1144 */ 1145 idx = 1 ^ (ssp->srcu_idx & 1); 1146 if (!try_check_zero(ssp, idx, 2)) { 1147 mutex_unlock(&ssp->srcu_gp_mutex); 1148 return; /* readers present, retry later. */ 1149 } 1150 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ 1151 } 1152 } 1153 1154 /* 1155 * Invoke a limited number of SRCU callbacks that have passed through 1156 * their grace period. If there are more to do, SRCU will reschedule 1157 * the workqueue. Note that needed memory barriers have been executed 1158 * in this task's context by srcu_readers_active_idx_check(). 1159 */ 1160 static void srcu_invoke_callbacks(struct work_struct *work) 1161 { 1162 bool more; 1163 struct rcu_cblist ready_cbs; 1164 struct rcu_head *rhp; 1165 struct srcu_data *sdp; 1166 struct srcu_struct *ssp; 1167 1168 sdp = container_of(work, struct srcu_data, work); 1169 1170 ssp = sdp->ssp; 1171 rcu_cblist_init(&ready_cbs); 1172 spin_lock_irq_rcu_node(sdp); 1173 rcu_segcblist_advance(&sdp->srcu_cblist, 1174 rcu_seq_current(&ssp->srcu_gp_seq)); 1175 if (sdp->srcu_cblist_invoking || 1176 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1177 spin_unlock_irq_rcu_node(sdp); 1178 return; /* Someone else on the job or nothing to do. */ 1179 } 1180 1181 /* We are on the job! Extract and invoke ready callbacks. */ 1182 sdp->srcu_cblist_invoking = true; 1183 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1184 spin_unlock_irq_rcu_node(sdp); 1185 rhp = rcu_cblist_dequeue(&ready_cbs); 1186 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1187 debug_rcu_head_unqueue(rhp); 1188 local_bh_disable(); 1189 rhp->func(rhp); 1190 local_bh_enable(); 1191 } 1192 1193 /* 1194 * Update counts, accelerate new callbacks, and if needed, 1195 * schedule another round of callback invocation. 1196 */ 1197 spin_lock_irq_rcu_node(sdp); 1198 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1199 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1200 rcu_seq_snap(&ssp->srcu_gp_seq)); 1201 sdp->srcu_cblist_invoking = false; 1202 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1203 spin_unlock_irq_rcu_node(sdp); 1204 if (more) 1205 srcu_schedule_cbs_sdp(sdp, 0); 1206 } 1207 1208 /* 1209 * Finished one round of SRCU grace period. Start another if there are 1210 * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1211 */ 1212 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) 1213 { 1214 bool pushgp = true; 1215 1216 spin_lock_irq_rcu_node(ssp); 1217 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1218 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { 1219 /* All requests fulfilled, time to go idle. */ 1220 pushgp = false; 1221 } 1222 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { 1223 /* Outstanding request and no GP. Start one. */ 1224 srcu_gp_start(ssp); 1225 } 1226 spin_unlock_irq_rcu_node(ssp); 1227 1228 if (pushgp) 1229 queue_delayed_work(rcu_gp_wq, &ssp->work, delay); 1230 } 1231 1232 /* 1233 * This is the work-queue function that handles SRCU grace periods. 1234 */ 1235 static void process_srcu(struct work_struct *work) 1236 { 1237 struct srcu_struct *ssp; 1238 1239 ssp = container_of(work, struct srcu_struct, work.work); 1240 1241 srcu_advance_state(ssp); 1242 srcu_reschedule(ssp, srcu_get_delay(ssp)); 1243 } 1244 1245 void srcutorture_get_gp_data(enum rcutorture_type test_type, 1246 struct srcu_struct *ssp, int *flags, 1247 unsigned long *gp_seq) 1248 { 1249 if (test_type != SRCU_FLAVOR) 1250 return; 1251 *flags = 0; 1252 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); 1253 } 1254 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 1255 1256 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) 1257 { 1258 int cpu; 1259 int idx; 1260 unsigned long s0 = 0, s1 = 0; 1261 1262 idx = ssp->srcu_idx & 0x1; 1263 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", 1264 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); 1265 for_each_possible_cpu(cpu) { 1266 unsigned long l0, l1; 1267 unsigned long u0, u1; 1268 long c0, c1; 1269 struct srcu_data *sdp; 1270 1271 sdp = per_cpu_ptr(ssp->sda, cpu); 1272 u0 = sdp->srcu_unlock_count[!idx]; 1273 u1 = sdp->srcu_unlock_count[idx]; 1274 1275 /* 1276 * Make sure that a lock is always counted if the corresponding 1277 * unlock is counted. 1278 */ 1279 smp_rmb(); 1280 1281 l0 = sdp->srcu_lock_count[!idx]; 1282 l1 = sdp->srcu_lock_count[idx]; 1283 1284 c0 = l0 - u0; 1285 c1 = l1 - u1; 1286 pr_cont(" %d(%ld,%ld %1p)", 1287 cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist)); 1288 s0 += c0; 1289 s1 += c1; 1290 } 1291 pr_cont(" T(%ld,%ld)\n", s0, s1); 1292 } 1293 EXPORT_SYMBOL_GPL(srcu_torture_stats_print); 1294 1295 static int __init srcu_bootup_announce(void) 1296 { 1297 pr_info("Hierarchical SRCU implementation.\n"); 1298 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) 1299 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); 1300 return 0; 1301 } 1302 early_initcall(srcu_bootup_announce); 1303 1304 void __init srcu_init(void) 1305 { 1306 struct srcu_struct *ssp; 1307 1308 srcu_init_done = true; 1309 while (!list_empty(&srcu_boot_list)) { 1310 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, 1311 work.work.entry); 1312 check_init_srcu_struct(ssp); 1313 list_del_init(&ssp->work.work.entry); 1314 queue_work(rcu_gp_wq, &ssp->work.work); 1315 } 1316 } 1317