1 /* 2 * Sleepable Read-Copy Update mechanism for mutual exclusion. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2006 19 * Copyright (C) Fujitsu, 2012 20 * 21 * Author: Paul McKenney <paulmck@us.ibm.com> 22 * Lai Jiangshan <laijs@cn.fujitsu.com> 23 * 24 * For detailed explanation of Read-Copy Update mechanism see - 25 * Documentation/RCU/ *.txt 26 * 27 */ 28 29 #define pr_fmt(fmt) "rcu: " fmt 30 31 #include <linux/export.h> 32 #include <linux/mutex.h> 33 #include <linux/percpu.h> 34 #include <linux/preempt.h> 35 #include <linux/rcupdate_wait.h> 36 #include <linux/sched.h> 37 #include <linux/smp.h> 38 #include <linux/delay.h> 39 #include <linux/module.h> 40 #include <linux/srcu.h> 41 42 #include "rcu.h" 43 #include "rcu_segcblist.h" 44 45 /* Holdoff in nanoseconds for auto-expediting. */ 46 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) 47 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; 48 module_param(exp_holdoff, ulong, 0444); 49 50 /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ 51 static ulong counter_wrap_check = (ULONG_MAX >> 2); 52 module_param(counter_wrap_check, ulong, 0444); 53 54 /* Early-boot callback-management, so early that no lock is required! */ 55 static LIST_HEAD(srcu_boot_list); 56 static bool __read_mostly srcu_init_done; 57 58 static void srcu_invoke_callbacks(struct work_struct *work); 59 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); 60 static void process_srcu(struct work_struct *work); 61 62 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ 63 #define spin_lock_rcu_node(p) \ 64 do { \ 65 spin_lock(&ACCESS_PRIVATE(p, lock)); \ 66 smp_mb__after_unlock_lock(); \ 67 } while (0) 68 69 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) 70 71 #define spin_lock_irq_rcu_node(p) \ 72 do { \ 73 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ 74 smp_mb__after_unlock_lock(); \ 75 } while (0) 76 77 #define spin_unlock_irq_rcu_node(p) \ 78 spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) 79 80 #define spin_lock_irqsave_rcu_node(p, flags) \ 81 do { \ 82 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ 83 smp_mb__after_unlock_lock(); \ 84 } while (0) 85 86 #define spin_unlock_irqrestore_rcu_node(p, flags) \ 87 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ 88 89 /* 90 * Initialize SRCU combining tree. Note that statically allocated 91 * srcu_struct structures might already have srcu_read_lock() and 92 * srcu_read_unlock() running against them. So if the is_static parameter 93 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 94 */ 95 static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) 96 { 97 int cpu; 98 int i; 99 int level = 0; 100 int levelspread[RCU_NUM_LVLS]; 101 struct srcu_data *sdp; 102 struct srcu_node *snp; 103 struct srcu_node *snp_first; 104 105 /* Work out the overall tree geometry. */ 106 ssp->level[0] = &ssp->node[0]; 107 for (i = 1; i < rcu_num_lvls; i++) 108 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; 109 rcu_init_levelspread(levelspread, num_rcu_lvl); 110 111 /* Each pass through this loop initializes one srcu_node structure. */ 112 srcu_for_each_node_breadth_first(ssp, snp) { 113 spin_lock_init(&ACCESS_PRIVATE(snp, lock)); 114 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 115 ARRAY_SIZE(snp->srcu_data_have_cbs)); 116 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 117 snp->srcu_have_cbs[i] = 0; 118 snp->srcu_data_have_cbs[i] = 0; 119 } 120 snp->srcu_gp_seq_needed_exp = 0; 121 snp->grplo = -1; 122 snp->grphi = -1; 123 if (snp == &ssp->node[0]) { 124 /* Root node, special case. */ 125 snp->srcu_parent = NULL; 126 continue; 127 } 128 129 /* Non-root node. */ 130 if (snp == ssp->level[level + 1]) 131 level++; 132 snp->srcu_parent = ssp->level[level - 1] + 133 (snp - ssp->level[level]) / 134 levelspread[level - 1]; 135 } 136 137 /* 138 * Initialize the per-CPU srcu_data array, which feeds into the 139 * leaves of the srcu_node tree. 140 */ 141 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 142 ARRAY_SIZE(sdp->srcu_unlock_count)); 143 level = rcu_num_lvls - 1; 144 snp_first = ssp->level[level]; 145 for_each_possible_cpu(cpu) { 146 sdp = per_cpu_ptr(ssp->sda, cpu); 147 spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); 148 rcu_segcblist_init(&sdp->srcu_cblist); 149 sdp->srcu_cblist_invoking = false; 150 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; 151 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; 152 sdp->mynode = &snp_first[cpu / levelspread[level]]; 153 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 154 if (snp->grplo < 0) 155 snp->grplo = cpu; 156 snp->grphi = cpu; 157 } 158 sdp->cpu = cpu; 159 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 160 sdp->ssp = ssp; 161 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 162 if (is_static) 163 continue; 164 165 /* Dynamically allocated, better be no srcu_read_locks()! */ 166 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { 167 sdp->srcu_lock_count[i] = 0; 168 sdp->srcu_unlock_count[i] = 0; 169 } 170 } 171 } 172 173 /* 174 * Initialize non-compile-time initialized fields, including the 175 * associated srcu_node and srcu_data structures. The is_static 176 * parameter is passed through to init_srcu_struct_nodes(), and 177 * also tells us that ->sda has already been wired up to srcu_data. 178 */ 179 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) 180 { 181 mutex_init(&ssp->srcu_cb_mutex); 182 mutex_init(&ssp->srcu_gp_mutex); 183 ssp->srcu_idx = 0; 184 ssp->srcu_gp_seq = 0; 185 ssp->srcu_barrier_seq = 0; 186 mutex_init(&ssp->srcu_barrier_mutex); 187 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); 188 INIT_DELAYED_WORK(&ssp->work, process_srcu); 189 if (!is_static) 190 ssp->sda = alloc_percpu(struct srcu_data); 191 init_srcu_struct_nodes(ssp, is_static); 192 ssp->srcu_gp_seq_needed_exp = 0; 193 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 194 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ 195 return ssp->sda ? 0 : -ENOMEM; 196 } 197 198 #ifdef CONFIG_DEBUG_LOCK_ALLOC 199 200 int __init_srcu_struct(struct srcu_struct *ssp, const char *name, 201 struct lock_class_key *key) 202 { 203 /* Don't re-initialize a lock while it is held. */ 204 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); 205 lockdep_init_map(&ssp->dep_map, name, key, 0); 206 spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 207 return init_srcu_struct_fields(ssp, false); 208 } 209 EXPORT_SYMBOL_GPL(__init_srcu_struct); 210 211 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 212 213 /** 214 * init_srcu_struct - initialize a sleep-RCU structure 215 * @ssp: structure to initialize. 216 * 217 * Must invoke this on a given srcu_struct before passing that srcu_struct 218 * to any other function. Each srcu_struct represents a separate domain 219 * of SRCU protection. 220 */ 221 int init_srcu_struct(struct srcu_struct *ssp) 222 { 223 spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); 224 return init_srcu_struct_fields(ssp, false); 225 } 226 EXPORT_SYMBOL_GPL(init_srcu_struct); 227 228 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 229 230 /* 231 * First-use initialization of statically allocated srcu_struct 232 * structure. Wiring up the combining tree is more than can be 233 * done with compile-time initialization, so this check is added 234 * to each update-side SRCU primitive. Use ssp->lock, which -is- 235 * compile-time initialized, to resolve races involving multiple 236 * CPUs trying to garner first-use privileges. 237 */ 238 static void check_init_srcu_struct(struct srcu_struct *ssp) 239 { 240 unsigned long flags; 241 242 /* The smp_load_acquire() pairs with the smp_store_release(). */ 243 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ 244 return; /* Already initialized. */ 245 spin_lock_irqsave_rcu_node(ssp, flags); 246 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { 247 spin_unlock_irqrestore_rcu_node(ssp, flags); 248 return; 249 } 250 init_srcu_struct_fields(ssp, true); 251 spin_unlock_irqrestore_rcu_node(ssp, flags); 252 } 253 254 /* 255 * Returns approximate total of the readers' ->srcu_lock_count[] values 256 * for the rank of per-CPU counters specified by idx. 257 */ 258 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) 259 { 260 int cpu; 261 unsigned long sum = 0; 262 263 for_each_possible_cpu(cpu) { 264 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 265 266 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 267 } 268 return sum; 269 } 270 271 /* 272 * Returns approximate total of the readers' ->srcu_unlock_count[] values 273 * for the rank of per-CPU counters specified by idx. 274 */ 275 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) 276 { 277 int cpu; 278 unsigned long sum = 0; 279 280 for_each_possible_cpu(cpu) { 281 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 282 283 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 284 } 285 return sum; 286 } 287 288 /* 289 * Return true if the number of pre-existing readers is determined to 290 * be zero. 291 */ 292 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) 293 { 294 unsigned long unlocks; 295 296 unlocks = srcu_readers_unlock_idx(ssp, idx); 297 298 /* 299 * Make sure that a lock is always counted if the corresponding 300 * unlock is counted. Needs to be a smp_mb() as the read side may 301 * contain a read from a variable that is written to before the 302 * synchronize_srcu() in the write side. In this case smp_mb()s 303 * A and B act like the store buffering pattern. 304 * 305 * This smp_mb() also pairs with smp_mb() C to prevent accesses 306 * after the synchronize_srcu() from being executed before the 307 * grace period ends. 308 */ 309 smp_mb(); /* A */ 310 311 /* 312 * If the locks are the same as the unlocks, then there must have 313 * been no readers on this index at some time in between. This does 314 * not mean that there are no more readers, as one could have read 315 * the current index but not have incremented the lock counter yet. 316 * 317 * So suppose that the updater is preempted here for so long 318 * that more than ULONG_MAX non-nested readers come and go in 319 * the meantime. It turns out that this cannot result in overflow 320 * because if a reader modifies its unlock count after we read it 321 * above, then that reader's next load of ->srcu_idx is guaranteed 322 * to get the new value, which will cause it to operate on the 323 * other bank of counters, where it cannot contribute to the 324 * overflow of these counters. This means that there is a maximum 325 * of 2*NR_CPUS increments, which cannot overflow given current 326 * systems, especially not on 64-bit systems. 327 * 328 * OK, how about nesting? This does impose a limit on nesting 329 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, 330 * especially on 64-bit systems. 331 */ 332 return srcu_readers_lock_idx(ssp, idx) == unlocks; 333 } 334 335 /** 336 * srcu_readers_active - returns true if there are readers. and false 337 * otherwise 338 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). 339 * 340 * Note that this is not an atomic primitive, and can therefore suffer 341 * severe errors when invoked on an active srcu_struct. That said, it 342 * can be useful as an error check at cleanup time. 343 */ 344 static bool srcu_readers_active(struct srcu_struct *ssp) 345 { 346 int cpu; 347 unsigned long sum = 0; 348 349 for_each_possible_cpu(cpu) { 350 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); 351 352 sum += READ_ONCE(cpuc->srcu_lock_count[0]); 353 sum += READ_ONCE(cpuc->srcu_lock_count[1]); 354 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 355 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 356 } 357 return sum; 358 } 359 360 #define SRCU_INTERVAL 1 361 362 /* 363 * Return grace-period delay, zero if there are expedited grace 364 * periods pending, SRCU_INTERVAL otherwise. 365 */ 366 static unsigned long srcu_get_delay(struct srcu_struct *ssp) 367 { 368 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), 369 READ_ONCE(ssp->srcu_gp_seq_needed_exp))) 370 return 0; 371 return SRCU_INTERVAL; 372 } 373 374 /* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ 375 void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced) 376 { 377 int cpu; 378 379 if (WARN_ON(!srcu_get_delay(ssp))) 380 return; /* Just leak it! */ 381 if (WARN_ON(srcu_readers_active(ssp))) 382 return; /* Just leak it! */ 383 if (quiesced) { 384 if (WARN_ON(delayed_work_pending(&ssp->work))) 385 return; /* Just leak it! */ 386 } else { 387 flush_delayed_work(&ssp->work); 388 } 389 for_each_possible_cpu(cpu) 390 if (quiesced) { 391 if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work))) 392 return; /* Just leak it! */ 393 } else { 394 flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work); 395 } 396 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 397 WARN_ON(srcu_readers_active(ssp))) { 398 pr_info("%s: Active srcu_struct %p state: %d\n", 399 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); 400 return; /* Caller forgot to stop doing call_srcu()? */ 401 } 402 free_percpu(ssp->sda); 403 ssp->sda = NULL; 404 } 405 EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); 406 407 /* 408 * Counts the new reader in the appropriate per-CPU element of the 409 * srcu_struct. 410 * Returns an index that must be passed to the matching srcu_read_unlock(). 411 */ 412 int __srcu_read_lock(struct srcu_struct *ssp) 413 { 414 int idx; 415 416 idx = READ_ONCE(ssp->srcu_idx) & 0x1; 417 this_cpu_inc(ssp->sda->srcu_lock_count[idx]); 418 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 419 return idx; 420 } 421 EXPORT_SYMBOL_GPL(__srcu_read_lock); 422 423 /* 424 * Removes the count for the old reader from the appropriate per-CPU 425 * element of the srcu_struct. Note that this may well be a different 426 * CPU than that which was incremented by the corresponding srcu_read_lock(). 427 */ 428 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) 429 { 430 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 431 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); 432 } 433 EXPORT_SYMBOL_GPL(__srcu_read_unlock); 434 435 /* 436 * We use an adaptive strategy for synchronize_srcu() and especially for 437 * synchronize_srcu_expedited(). We spin for a fixed time period 438 * (defined below) to allow SRCU readers to exit their read-side critical 439 * sections. If there are still some readers after a few microseconds, 440 * we repeatedly block for 1-millisecond time periods. 441 */ 442 #define SRCU_RETRY_CHECK_DELAY 5 443 444 /* 445 * Start an SRCU grace period. 446 */ 447 static void srcu_gp_start(struct srcu_struct *ssp) 448 { 449 struct srcu_data *sdp = this_cpu_ptr(ssp->sda); 450 int state; 451 452 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); 453 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 454 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ 455 rcu_segcblist_advance(&sdp->srcu_cblist, 456 rcu_seq_current(&ssp->srcu_gp_seq)); 457 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 458 rcu_seq_snap(&ssp->srcu_gp_seq)); 459 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ 460 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 461 rcu_seq_start(&ssp->srcu_gp_seq); 462 state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 463 WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 464 } 465 466 /* 467 * Track online CPUs to guide callback workqueue placement. 468 */ 469 DEFINE_PER_CPU(bool, srcu_online); 470 471 void srcu_online_cpu(unsigned int cpu) 472 { 473 WRITE_ONCE(per_cpu(srcu_online, cpu), true); 474 } 475 476 void srcu_offline_cpu(unsigned int cpu) 477 { 478 WRITE_ONCE(per_cpu(srcu_online, cpu), false); 479 } 480 481 /* 482 * Place the workqueue handler on the specified CPU if online, otherwise 483 * just run it whereever. This is useful for placing workqueue handlers 484 * that are to invoke the specified CPU's callbacks. 485 */ 486 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 487 struct delayed_work *dwork, 488 unsigned long delay) 489 { 490 bool ret; 491 492 preempt_disable(); 493 if (READ_ONCE(per_cpu(srcu_online, cpu))) 494 ret = queue_delayed_work_on(cpu, wq, dwork, delay); 495 else 496 ret = queue_delayed_work(wq, dwork, delay); 497 preempt_enable(); 498 return ret; 499 } 500 501 /* 502 * Schedule callback invocation for the specified srcu_data structure, 503 * if possible, on the corresponding CPU. 504 */ 505 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 506 { 507 srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); 508 } 509 510 /* 511 * Schedule callback invocation for all srcu_data structures associated 512 * with the specified srcu_node structure that have callbacks for the 513 * just-completed grace period, the one corresponding to idx. If possible, 514 * schedule this invocation on the corresponding CPUs. 515 */ 516 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, 517 unsigned long mask, unsigned long delay) 518 { 519 int cpu; 520 521 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 522 if (!(mask & (1 << (cpu - snp->grplo)))) 523 continue; 524 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); 525 } 526 } 527 528 /* 529 * Note the end of an SRCU grace period. Initiates callback invocation 530 * and starts a new grace period if needed. 531 * 532 * The ->srcu_cb_mutex acquisition does not protect any data, but 533 * instead prevents more than one grace period from starting while we 534 * are initiating callback invocation. This allows the ->srcu_have_cbs[] 535 * array to have a finite number of elements. 536 */ 537 static void srcu_gp_end(struct srcu_struct *ssp) 538 { 539 unsigned long cbdelay; 540 bool cbs; 541 bool last_lvl; 542 int cpu; 543 unsigned long flags; 544 unsigned long gpseq; 545 int idx; 546 unsigned long mask; 547 struct srcu_data *sdp; 548 struct srcu_node *snp; 549 550 /* Prevent more than one additional grace period. */ 551 mutex_lock(&ssp->srcu_cb_mutex); 552 553 /* End the current grace period. */ 554 spin_lock_irq_rcu_node(ssp); 555 idx = rcu_seq_state(ssp->srcu_gp_seq); 556 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 557 cbdelay = srcu_get_delay(ssp); 558 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 559 rcu_seq_end(&ssp->srcu_gp_seq); 560 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 561 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) 562 ssp->srcu_gp_seq_needed_exp = gpseq; 563 spin_unlock_irq_rcu_node(ssp); 564 mutex_unlock(&ssp->srcu_gp_mutex); 565 /* A new grace period can start at this point. But only one. */ 566 567 /* Initiate callback invocation as needed. */ 568 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 569 srcu_for_each_node_breadth_first(ssp, snp) { 570 spin_lock_irq_rcu_node(snp); 571 cbs = false; 572 last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; 573 if (last_lvl) 574 cbs = snp->srcu_have_cbs[idx] == gpseq; 575 snp->srcu_have_cbs[idx] = gpseq; 576 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 577 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 578 snp->srcu_gp_seq_needed_exp = gpseq; 579 mask = snp->srcu_data_have_cbs[idx]; 580 snp->srcu_data_have_cbs[idx] = 0; 581 spin_unlock_irq_rcu_node(snp); 582 if (cbs) 583 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); 584 585 /* Occasionally prevent srcu_data counter wrap. */ 586 if (!(gpseq & counter_wrap_check) && last_lvl) 587 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 588 sdp = per_cpu_ptr(ssp->sda, cpu); 589 spin_lock_irqsave_rcu_node(sdp, flags); 590 if (ULONG_CMP_GE(gpseq, 591 sdp->srcu_gp_seq_needed + 100)) 592 sdp->srcu_gp_seq_needed = gpseq; 593 if (ULONG_CMP_GE(gpseq, 594 sdp->srcu_gp_seq_needed_exp + 100)) 595 sdp->srcu_gp_seq_needed_exp = gpseq; 596 spin_unlock_irqrestore_rcu_node(sdp, flags); 597 } 598 } 599 600 /* Callback initiation done, allow grace periods after next. */ 601 mutex_unlock(&ssp->srcu_cb_mutex); 602 603 /* Start a new grace period if needed. */ 604 spin_lock_irq_rcu_node(ssp); 605 gpseq = rcu_seq_current(&ssp->srcu_gp_seq); 606 if (!rcu_seq_state(gpseq) && 607 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { 608 srcu_gp_start(ssp); 609 spin_unlock_irq_rcu_node(ssp); 610 srcu_reschedule(ssp, 0); 611 } else { 612 spin_unlock_irq_rcu_node(ssp); 613 } 614 } 615 616 /* 617 * Funnel-locking scheme to scalably mediate many concurrent expedited 618 * grace-period requests. This function is invoked for the first known 619 * expedited request for a grace period that has already been requested, 620 * but without expediting. To start a completely new grace period, 621 * whether expedited or not, use srcu_funnel_gp_start() instead. 622 */ 623 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, 624 unsigned long s) 625 { 626 unsigned long flags; 627 628 for (; snp != NULL; snp = snp->srcu_parent) { 629 if (rcu_seq_done(&ssp->srcu_gp_seq, s) || 630 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 631 return; 632 spin_lock_irqsave_rcu_node(snp, flags); 633 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 634 spin_unlock_irqrestore_rcu_node(snp, flags); 635 return; 636 } 637 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 638 spin_unlock_irqrestore_rcu_node(snp, flags); 639 } 640 spin_lock_irqsave_rcu_node(ssp, flags); 641 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 642 ssp->srcu_gp_seq_needed_exp = s; 643 spin_unlock_irqrestore_rcu_node(ssp, flags); 644 } 645 646 /* 647 * Funnel-locking scheme to scalably mediate many concurrent grace-period 648 * requests. The winner has to do the work of actually starting grace 649 * period s. Losers must either ensure that their desired grace-period 650 * number is recorded on at least their leaf srcu_node structure, or they 651 * must take steps to invoke their own callbacks. 652 * 653 * Note that this function also does the work of srcu_funnel_exp_start(), 654 * in some cases by directly invoking it. 655 */ 656 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, 657 unsigned long s, bool do_norm) 658 { 659 unsigned long flags; 660 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 661 struct srcu_node *snp = sdp->mynode; 662 unsigned long snp_seq; 663 664 /* Each pass through the loop does one level of the srcu_node tree. */ 665 for (; snp != NULL; snp = snp->srcu_parent) { 666 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) 667 return; /* GP already done and CBs recorded. */ 668 spin_lock_irqsave_rcu_node(snp, flags); 669 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 670 snp_seq = snp->srcu_have_cbs[idx]; 671 if (snp == sdp->mynode && snp_seq == s) 672 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 673 spin_unlock_irqrestore_rcu_node(snp, flags); 674 if (snp == sdp->mynode && snp_seq != s) { 675 srcu_schedule_cbs_sdp(sdp, do_norm 676 ? SRCU_INTERVAL 677 : 0); 678 return; 679 } 680 if (!do_norm) 681 srcu_funnel_exp_start(ssp, snp, s); 682 return; 683 } 684 snp->srcu_have_cbs[idx] = s; 685 if (snp == sdp->mynode) 686 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 687 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 688 snp->srcu_gp_seq_needed_exp = s; 689 spin_unlock_irqrestore_rcu_node(snp, flags); 690 } 691 692 /* Top of tree, must ensure the grace period will be started. */ 693 spin_lock_irqsave_rcu_node(ssp, flags); 694 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { 695 /* 696 * Record need for grace period s. Pair with load 697 * acquire setting up for initialization. 698 */ 699 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ 700 } 701 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) 702 ssp->srcu_gp_seq_needed_exp = s; 703 704 /* If grace period not already done and none in progress, start it. */ 705 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && 706 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { 707 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); 708 srcu_gp_start(ssp); 709 if (likely(srcu_init_done)) 710 queue_delayed_work(rcu_gp_wq, &ssp->work, 711 srcu_get_delay(ssp)); 712 else if (list_empty(&ssp->work.work.entry)) 713 list_add(&ssp->work.work.entry, &srcu_boot_list); 714 } 715 spin_unlock_irqrestore_rcu_node(ssp, flags); 716 } 717 718 /* 719 * Wait until all readers counted by array index idx complete, but 720 * loop an additional time if there is an expedited grace period pending. 721 * The caller must ensure that ->srcu_idx is not changed while checking. 722 */ 723 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) 724 { 725 for (;;) { 726 if (srcu_readers_active_idx_check(ssp, idx)) 727 return true; 728 if (--trycount + !srcu_get_delay(ssp) <= 0) 729 return false; 730 udelay(SRCU_RETRY_CHECK_DELAY); 731 } 732 } 733 734 /* 735 * Increment the ->srcu_idx counter so that future SRCU readers will 736 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 737 * us to wait for pre-existing readers in a starvation-free manner. 738 */ 739 static void srcu_flip(struct srcu_struct *ssp) 740 { 741 /* 742 * Ensure that if this updater saw a given reader's increment 743 * from __srcu_read_lock(), that reader was using an old value 744 * of ->srcu_idx. Also ensure that if a given reader sees the 745 * new value of ->srcu_idx, this updater's earlier scans cannot 746 * have seen that reader's increments (which is OK, because this 747 * grace period need not wait on that reader). 748 */ 749 smp_mb(); /* E */ /* Pairs with B and C. */ 750 751 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); 752 753 /* 754 * Ensure that if the updater misses an __srcu_read_unlock() 755 * increment, that task's next __srcu_read_lock() will see the 756 * above counter update. Note that both this memory barrier 757 * and the one in srcu_readers_active_idx_check() provide the 758 * guarantee for __srcu_read_lock(). 759 */ 760 smp_mb(); /* D */ /* Pairs with C. */ 761 } 762 763 /* 764 * If SRCU is likely idle, return true, otherwise return false. 765 * 766 * Note that it is OK for several current from-idle requests for a new 767 * grace period from idle to specify expediting because they will all end 768 * up requesting the same grace period anyhow. So no loss. 769 * 770 * Note also that if any CPU (including the current one) is still invoking 771 * callbacks, this function will nevertheless say "idle". This is not 772 * ideal, but the overhead of checking all CPUs' callback lists is even 773 * less ideal, especially on large systems. Furthermore, the wakeup 774 * can happen before the callback is fully removed, so we have no choice 775 * but to accept this type of error. 776 * 777 * This function is also subject to counter-wrap errors, but let's face 778 * it, if this function was preempted for enough time for the counters 779 * to wrap, it really doesn't matter whether or not we expedite the grace 780 * period. The extra overhead of a needlessly expedited grace period is 781 * negligible when amoritized over that time period, and the extra latency 782 * of a needlessly non-expedited grace period is similarly negligible. 783 */ 784 static bool srcu_might_be_idle(struct srcu_struct *ssp) 785 { 786 unsigned long curseq; 787 unsigned long flags; 788 struct srcu_data *sdp; 789 unsigned long t; 790 791 /* If the local srcu_data structure has callbacks, not idle. */ 792 local_irq_save(flags); 793 sdp = this_cpu_ptr(ssp->sda); 794 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 795 local_irq_restore(flags); 796 return false; /* Callbacks already present, so not idle. */ 797 } 798 local_irq_restore(flags); 799 800 /* 801 * No local callbacks, so probabalistically probe global state. 802 * Exact information would require acquiring locks, which would 803 * kill scalability, hence the probabalistic nature of the probe. 804 */ 805 806 /* First, see if enough time has passed since the last GP. */ 807 t = ktime_get_mono_fast_ns(); 808 if (exp_holdoff == 0 || 809 time_in_range_open(t, ssp->srcu_last_gp_end, 810 ssp->srcu_last_gp_end + exp_holdoff)) 811 return false; /* Too soon after last GP. */ 812 813 /* Next, check for probable idleness. */ 814 curseq = rcu_seq_current(&ssp->srcu_gp_seq); 815 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 816 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) 817 return false; /* Grace period in progress, so not idle. */ 818 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 819 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) 820 return false; /* GP # changed, so not idle. */ 821 return true; /* With reasonable probability, idle! */ 822 } 823 824 /* 825 * SRCU callback function to leak a callback. 826 */ 827 static void srcu_leak_callback(struct rcu_head *rhp) 828 { 829 } 830 831 /* 832 * Enqueue an SRCU callback on the srcu_data structure associated with 833 * the current CPU and the specified srcu_struct structure, initiating 834 * grace-period processing if it is not already running. 835 * 836 * Note that all CPUs must agree that the grace period extended beyond 837 * all pre-existing SRCU read-side critical section. On systems with 838 * more than one CPU, this means that when "func()" is invoked, each CPU 839 * is guaranteed to have executed a full memory barrier since the end of 840 * its last corresponding SRCU read-side critical section whose beginning 841 * preceded the call to call_srcu(). It also means that each CPU executing 842 * an SRCU read-side critical section that continues beyond the start of 843 * "func()" must have executed a memory barrier after the call_srcu() 844 * but before the beginning of that SRCU read-side critical section. 845 * Note that these guarantees include CPUs that are offline, idle, or 846 * executing in user mode, as well as CPUs that are executing in the kernel. 847 * 848 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the 849 * resulting SRCU callback function "func()", then both CPU A and CPU 850 * B are guaranteed to execute a full memory barrier during the time 851 * interval between the call to call_srcu() and the invocation of "func()". 852 * This guarantee applies even if CPU A and CPU B are the same CPU (but 853 * again only if the system has more than one CPU). 854 * 855 * Of course, these guarantees apply only for invocations of call_srcu(), 856 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 857 * srcu_struct structure. 858 */ 859 void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 860 rcu_callback_t func, bool do_norm) 861 { 862 unsigned long flags; 863 int idx; 864 bool needexp = false; 865 bool needgp = false; 866 unsigned long s; 867 struct srcu_data *sdp; 868 869 check_init_srcu_struct(ssp); 870 if (debug_rcu_head_queue(rhp)) { 871 /* Probable double call_srcu(), so leak the callback. */ 872 WRITE_ONCE(rhp->func, srcu_leak_callback); 873 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); 874 return; 875 } 876 rhp->func = func; 877 idx = srcu_read_lock(ssp); 878 local_irq_save(flags); 879 sdp = this_cpu_ptr(ssp->sda); 880 spin_lock_rcu_node(sdp); 881 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 882 rcu_segcblist_advance(&sdp->srcu_cblist, 883 rcu_seq_current(&ssp->srcu_gp_seq)); 884 s = rcu_seq_snap(&ssp->srcu_gp_seq); 885 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 886 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 887 sdp->srcu_gp_seq_needed = s; 888 needgp = true; 889 } 890 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 891 sdp->srcu_gp_seq_needed_exp = s; 892 needexp = true; 893 } 894 spin_unlock_irqrestore_rcu_node(sdp, flags); 895 if (needgp) 896 srcu_funnel_gp_start(ssp, sdp, s, do_norm); 897 else if (needexp) 898 srcu_funnel_exp_start(ssp, sdp->mynode, s); 899 srcu_read_unlock(ssp, idx); 900 } 901 902 /** 903 * call_srcu() - Queue a callback for invocation after an SRCU grace period 904 * @ssp: srcu_struct in queue the callback 905 * @rhp: structure to be used for queueing the SRCU callback. 906 * @func: function to be invoked after the SRCU grace period 907 * 908 * The callback function will be invoked some time after a full SRCU 909 * grace period elapses, in other words after all pre-existing SRCU 910 * read-side critical sections have completed. However, the callback 911 * function might well execute concurrently with other SRCU read-side 912 * critical sections that started after call_srcu() was invoked. SRCU 913 * read-side critical sections are delimited by srcu_read_lock() and 914 * srcu_read_unlock(), and may be nested. 915 * 916 * The callback will be invoked from process context, but must nevertheless 917 * be fast and must not block. 918 */ 919 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 920 rcu_callback_t func) 921 { 922 __call_srcu(ssp, rhp, func, true); 923 } 924 EXPORT_SYMBOL_GPL(call_srcu); 925 926 /* 927 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 928 */ 929 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) 930 { 931 struct rcu_synchronize rcu; 932 933 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || 934 lock_is_held(&rcu_bh_lock_map) || 935 lock_is_held(&rcu_lock_map) || 936 lock_is_held(&rcu_sched_lock_map), 937 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 938 939 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 940 return; 941 might_sleep(); 942 check_init_srcu_struct(ssp); 943 init_completion(&rcu.completion); 944 init_rcu_head_on_stack(&rcu.head); 945 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); 946 wait_for_completion(&rcu.completion); 947 destroy_rcu_head_on_stack(&rcu.head); 948 949 /* 950 * Make sure that later code is ordered after the SRCU grace 951 * period. This pairs with the spin_lock_irq_rcu_node() 952 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed 953 * because the current CPU might have been totally uninvolved with 954 * (and thus unordered against) that grace period. 955 */ 956 smp_mb(); 957 } 958 959 /** 960 * synchronize_srcu_expedited - Brute-force SRCU grace period 961 * @ssp: srcu_struct with which to synchronize. 962 * 963 * Wait for an SRCU grace period to elapse, but be more aggressive about 964 * spinning rather than blocking when waiting. 965 * 966 * Note that synchronize_srcu_expedited() has the same deadlock and 967 * memory-ordering properties as does synchronize_srcu(). 968 */ 969 void synchronize_srcu_expedited(struct srcu_struct *ssp) 970 { 971 __synchronize_srcu(ssp, rcu_gp_is_normal()); 972 } 973 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 974 975 /** 976 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 977 * @ssp: srcu_struct with which to synchronize. 978 * 979 * Wait for the count to drain to zero of both indexes. To avoid the 980 * possible starvation of synchronize_srcu(), it waits for the count of 981 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 982 * and then flip the srcu_idx and wait for the count of the other index. 983 * 984 * Can block; must be called from process context. 985 * 986 * Note that it is illegal to call synchronize_srcu() from the corresponding 987 * SRCU read-side critical section; doing so will result in deadlock. 988 * However, it is perfectly legal to call synchronize_srcu() on one 989 * srcu_struct from some other srcu_struct's read-side critical section, 990 * as long as the resulting graph of srcu_structs is acyclic. 991 * 992 * There are memory-ordering constraints implied by synchronize_srcu(). 993 * On systems with more than one CPU, when synchronize_srcu() returns, 994 * each CPU is guaranteed to have executed a full memory barrier since 995 * the end of its last corresponding SRCU read-side critical section 996 * whose beginning preceded the call to synchronize_srcu(). In addition, 997 * each CPU having an SRCU read-side critical section that extends beyond 998 * the return from synchronize_srcu() is guaranteed to have executed a 999 * full memory barrier after the beginning of synchronize_srcu() and before 1000 * the beginning of that SRCU read-side critical section. Note that these 1001 * guarantees include CPUs that are offline, idle, or executing in user mode, 1002 * as well as CPUs that are executing in the kernel. 1003 * 1004 * Furthermore, if CPU A invoked synchronize_srcu(), which returned 1005 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 1006 * to have executed a full memory barrier during the execution of 1007 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 1008 * are the same CPU, but again only if the system has more than one CPU. 1009 * 1010 * Of course, these memory-ordering guarantees apply only when 1011 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 1012 * passed the same srcu_struct structure. 1013 * 1014 * If SRCU is likely idle, expedite the first request. This semantic 1015 * was provided by Classic SRCU, and is relied upon by its users, so TREE 1016 * SRCU must also provide it. Note that detecting idleness is heuristic 1017 * and subject to both false positives and negatives. 1018 */ 1019 void synchronize_srcu(struct srcu_struct *ssp) 1020 { 1021 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) 1022 synchronize_srcu_expedited(ssp); 1023 else 1024 __synchronize_srcu(ssp, true); 1025 } 1026 EXPORT_SYMBOL_GPL(synchronize_srcu); 1027 1028 /* 1029 * Callback function for srcu_barrier() use. 1030 */ 1031 static void srcu_barrier_cb(struct rcu_head *rhp) 1032 { 1033 struct srcu_data *sdp; 1034 struct srcu_struct *ssp; 1035 1036 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 1037 ssp = sdp->ssp; 1038 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1039 complete(&ssp->srcu_barrier_completion); 1040 } 1041 1042 /** 1043 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 1044 * @ssp: srcu_struct on which to wait for in-flight callbacks. 1045 */ 1046 void srcu_barrier(struct srcu_struct *ssp) 1047 { 1048 int cpu; 1049 struct srcu_data *sdp; 1050 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); 1051 1052 check_init_srcu_struct(ssp); 1053 mutex_lock(&ssp->srcu_barrier_mutex); 1054 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { 1055 smp_mb(); /* Force ordering following return. */ 1056 mutex_unlock(&ssp->srcu_barrier_mutex); 1057 return; /* Someone else did our work for us. */ 1058 } 1059 rcu_seq_start(&ssp->srcu_barrier_seq); 1060 init_completion(&ssp->srcu_barrier_completion); 1061 1062 /* Initial count prevents reaching zero until all CBs are posted. */ 1063 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); 1064 1065 /* 1066 * Each pass through this loop enqueues a callback, but only 1067 * on CPUs already having callbacks enqueued. Note that if 1068 * a CPU already has callbacks enqueue, it must have already 1069 * registered the need for a future grace period, so all we 1070 * need do is enqueue a callback that will use the same 1071 * grace period as the last callback already in the queue. 1072 */ 1073 for_each_possible_cpu(cpu) { 1074 sdp = per_cpu_ptr(ssp->sda, cpu); 1075 spin_lock_irq_rcu_node(sdp); 1076 atomic_inc(&ssp->srcu_barrier_cpu_cnt); 1077 sdp->srcu_barrier_head.func = srcu_barrier_cb; 1078 debug_rcu_head_queue(&sdp->srcu_barrier_head); 1079 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 1080 &sdp->srcu_barrier_head, 0)) { 1081 debug_rcu_head_unqueue(&sdp->srcu_barrier_head); 1082 atomic_dec(&ssp->srcu_barrier_cpu_cnt); 1083 } 1084 spin_unlock_irq_rcu_node(sdp); 1085 } 1086 1087 /* Remove the initial count, at which point reaching zero can happen. */ 1088 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) 1089 complete(&ssp->srcu_barrier_completion); 1090 wait_for_completion(&ssp->srcu_barrier_completion); 1091 1092 rcu_seq_end(&ssp->srcu_barrier_seq); 1093 mutex_unlock(&ssp->srcu_barrier_mutex); 1094 } 1095 EXPORT_SYMBOL_GPL(srcu_barrier); 1096 1097 /** 1098 * srcu_batches_completed - return batches completed. 1099 * @ssp: srcu_struct on which to report batch completion. 1100 * 1101 * Report the number of batches, correlated with, but not necessarily 1102 * precisely the same as, the number of grace periods that have elapsed. 1103 */ 1104 unsigned long srcu_batches_completed(struct srcu_struct *ssp) 1105 { 1106 return ssp->srcu_idx; 1107 } 1108 EXPORT_SYMBOL_GPL(srcu_batches_completed); 1109 1110 /* 1111 * Core SRCU state machine. Push state bits of ->srcu_gp_seq 1112 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 1113 * completed in that state. 1114 */ 1115 static void srcu_advance_state(struct srcu_struct *ssp) 1116 { 1117 int idx; 1118 1119 mutex_lock(&ssp->srcu_gp_mutex); 1120 1121 /* 1122 * Because readers might be delayed for an extended period after 1123 * fetching ->srcu_idx for their index, at any point in time there 1124 * might well be readers using both idx=0 and idx=1. We therefore 1125 * need to wait for readers to clear from both index values before 1126 * invoking a callback. 1127 * 1128 * The load-acquire ensures that we see the accesses performed 1129 * by the prior grace period. 1130 */ 1131 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ 1132 if (idx == SRCU_STATE_IDLE) { 1133 spin_lock_irq_rcu_node(ssp); 1134 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1135 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); 1136 spin_unlock_irq_rcu_node(ssp); 1137 mutex_unlock(&ssp->srcu_gp_mutex); 1138 return; 1139 } 1140 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); 1141 if (idx == SRCU_STATE_IDLE) 1142 srcu_gp_start(ssp); 1143 spin_unlock_irq_rcu_node(ssp); 1144 if (idx != SRCU_STATE_IDLE) { 1145 mutex_unlock(&ssp->srcu_gp_mutex); 1146 return; /* Someone else started the grace period. */ 1147 } 1148 } 1149 1150 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1151 idx = 1 ^ (ssp->srcu_idx & 1); 1152 if (!try_check_zero(ssp, idx, 1)) { 1153 mutex_unlock(&ssp->srcu_gp_mutex); 1154 return; /* readers present, retry later. */ 1155 } 1156 srcu_flip(ssp); 1157 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); 1158 } 1159 1160 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1161 1162 /* 1163 * SRCU read-side critical sections are normally short, 1164 * so check at least twice in quick succession after a flip. 1165 */ 1166 idx = 1 ^ (ssp->srcu_idx & 1); 1167 if (!try_check_zero(ssp, idx, 2)) { 1168 mutex_unlock(&ssp->srcu_gp_mutex); 1169 return; /* readers present, retry later. */ 1170 } 1171 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ 1172 } 1173 } 1174 1175 /* 1176 * Invoke a limited number of SRCU callbacks that have passed through 1177 * their grace period. If there are more to do, SRCU will reschedule 1178 * the workqueue. Note that needed memory barriers have been executed 1179 * in this task's context by srcu_readers_active_idx_check(). 1180 */ 1181 static void srcu_invoke_callbacks(struct work_struct *work) 1182 { 1183 bool more; 1184 struct rcu_cblist ready_cbs; 1185 struct rcu_head *rhp; 1186 struct srcu_data *sdp; 1187 struct srcu_struct *ssp; 1188 1189 sdp = container_of(work, struct srcu_data, work.work); 1190 ssp = sdp->ssp; 1191 rcu_cblist_init(&ready_cbs); 1192 spin_lock_irq_rcu_node(sdp); 1193 rcu_segcblist_advance(&sdp->srcu_cblist, 1194 rcu_seq_current(&ssp->srcu_gp_seq)); 1195 if (sdp->srcu_cblist_invoking || 1196 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1197 spin_unlock_irq_rcu_node(sdp); 1198 return; /* Someone else on the job or nothing to do. */ 1199 } 1200 1201 /* We are on the job! Extract and invoke ready callbacks. */ 1202 sdp->srcu_cblist_invoking = true; 1203 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1204 spin_unlock_irq_rcu_node(sdp); 1205 rhp = rcu_cblist_dequeue(&ready_cbs); 1206 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1207 debug_rcu_head_unqueue(rhp); 1208 local_bh_disable(); 1209 rhp->func(rhp); 1210 local_bh_enable(); 1211 } 1212 1213 /* 1214 * Update counts, accelerate new callbacks, and if needed, 1215 * schedule another round of callback invocation. 1216 */ 1217 spin_lock_irq_rcu_node(sdp); 1218 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1219 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1220 rcu_seq_snap(&ssp->srcu_gp_seq)); 1221 sdp->srcu_cblist_invoking = false; 1222 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1223 spin_unlock_irq_rcu_node(sdp); 1224 if (more) 1225 srcu_schedule_cbs_sdp(sdp, 0); 1226 } 1227 1228 /* 1229 * Finished one round of SRCU grace period. Start another if there are 1230 * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1231 */ 1232 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) 1233 { 1234 bool pushgp = true; 1235 1236 spin_lock_irq_rcu_node(ssp); 1237 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { 1238 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { 1239 /* All requests fulfilled, time to go idle. */ 1240 pushgp = false; 1241 } 1242 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { 1243 /* Outstanding request and no GP. Start one. */ 1244 srcu_gp_start(ssp); 1245 } 1246 spin_unlock_irq_rcu_node(ssp); 1247 1248 if (pushgp) 1249 queue_delayed_work(rcu_gp_wq, &ssp->work, delay); 1250 } 1251 1252 /* 1253 * This is the work-queue function that handles SRCU grace periods. 1254 */ 1255 static void process_srcu(struct work_struct *work) 1256 { 1257 struct srcu_struct *ssp; 1258 1259 ssp = container_of(work, struct srcu_struct, work.work); 1260 1261 srcu_advance_state(ssp); 1262 srcu_reschedule(ssp, srcu_get_delay(ssp)); 1263 } 1264 1265 void srcutorture_get_gp_data(enum rcutorture_type test_type, 1266 struct srcu_struct *ssp, int *flags, 1267 unsigned long *gp_seq) 1268 { 1269 if (test_type != SRCU_FLAVOR) 1270 return; 1271 *flags = 0; 1272 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); 1273 } 1274 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 1275 1276 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) 1277 { 1278 int cpu; 1279 int idx; 1280 unsigned long s0 = 0, s1 = 0; 1281 1282 idx = ssp->srcu_idx & 0x1; 1283 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", 1284 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); 1285 for_each_possible_cpu(cpu) { 1286 unsigned long l0, l1; 1287 unsigned long u0, u1; 1288 long c0, c1; 1289 struct srcu_data *sdp; 1290 1291 sdp = per_cpu_ptr(ssp->sda, cpu); 1292 u0 = sdp->srcu_unlock_count[!idx]; 1293 u1 = sdp->srcu_unlock_count[idx]; 1294 1295 /* 1296 * Make sure that a lock is always counted if the corresponding 1297 * unlock is counted. 1298 */ 1299 smp_rmb(); 1300 1301 l0 = sdp->srcu_lock_count[!idx]; 1302 l1 = sdp->srcu_lock_count[idx]; 1303 1304 c0 = l0 - u0; 1305 c1 = l1 - u1; 1306 pr_cont(" %d(%ld,%ld %1p)", 1307 cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist)); 1308 s0 += c0; 1309 s1 += c1; 1310 } 1311 pr_cont(" T(%ld,%ld)\n", s0, s1); 1312 } 1313 EXPORT_SYMBOL_GPL(srcu_torture_stats_print); 1314 1315 static int __init srcu_bootup_announce(void) 1316 { 1317 pr_info("Hierarchical SRCU implementation.\n"); 1318 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) 1319 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); 1320 return 0; 1321 } 1322 early_initcall(srcu_bootup_announce); 1323 1324 void __init srcu_init(void) 1325 { 1326 struct srcu_struct *ssp; 1327 1328 srcu_init_done = true; 1329 while (!list_empty(&srcu_boot_list)) { 1330 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, 1331 work.work.entry); 1332 check_init_srcu_struct(ssp); 1333 list_del_init(&ssp->work.work.entry); 1334 queue_work(rcu_gp_wq, &ssp->work.work); 1335 } 1336 } 1337