1 /* 2 * Sleepable Read-Copy Update mechanism for mutual exclusion. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2006 19 * Copyright (C) Fujitsu, 2012 20 * 21 * Author: Paul McKenney <paulmck@us.ibm.com> 22 * Lai Jiangshan <laijs@cn.fujitsu.com> 23 * 24 * For detailed explanation of Read-Copy Update mechanism see - 25 * Documentation/RCU/ *.txt 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/mutex.h> 31 #include <linux/percpu.h> 32 #include <linux/preempt.h> 33 #include <linux/rcupdate_wait.h> 34 #include <linux/sched.h> 35 #include <linux/smp.h> 36 #include <linux/delay.h> 37 #include <linux/module.h> 38 #include <linux/srcu.h> 39 40 #include "rcu.h" 41 #include "rcu_segcblist.h" 42 43 ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */ 44 module_param(exp_holdoff, ulong, 0444); 45 46 static void srcu_invoke_callbacks(struct work_struct *work); 47 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); 48 49 /* 50 * Initialize SRCU combining tree. Note that statically allocated 51 * srcu_struct structures might already have srcu_read_lock() and 52 * srcu_read_unlock() running against them. So if the is_static parameter 53 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 54 */ 55 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) 56 { 57 int cpu; 58 int i; 59 int level = 0; 60 int levelspread[RCU_NUM_LVLS]; 61 struct srcu_data *sdp; 62 struct srcu_node *snp; 63 struct srcu_node *snp_first; 64 65 /* Work out the overall tree geometry. */ 66 sp->level[0] = &sp->node[0]; 67 for (i = 1; i < rcu_num_lvls; i++) 68 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; 69 rcu_init_levelspread(levelspread, num_rcu_lvl); 70 71 /* Each pass through this loop initializes one srcu_node structure. */ 72 rcu_for_each_node_breadth_first(sp, snp) { 73 spin_lock_init(&snp->lock); 74 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 75 ARRAY_SIZE(snp->srcu_data_have_cbs)); 76 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 77 snp->srcu_have_cbs[i] = 0; 78 snp->srcu_data_have_cbs[i] = 0; 79 } 80 snp->srcu_gp_seq_needed_exp = 0; 81 snp->grplo = -1; 82 snp->grphi = -1; 83 if (snp == &sp->node[0]) { 84 /* Root node, special case. */ 85 snp->srcu_parent = NULL; 86 continue; 87 } 88 89 /* Non-root node. */ 90 if (snp == sp->level[level + 1]) 91 level++; 92 snp->srcu_parent = sp->level[level - 1] + 93 (snp - sp->level[level]) / 94 levelspread[level - 1]; 95 } 96 97 /* 98 * Initialize the per-CPU srcu_data array, which feeds into the 99 * leaves of the srcu_node tree. 100 */ 101 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 102 ARRAY_SIZE(sdp->srcu_unlock_count)); 103 level = rcu_num_lvls - 1; 104 snp_first = sp->level[level]; 105 for_each_possible_cpu(cpu) { 106 sdp = per_cpu_ptr(sp->sda, cpu); 107 spin_lock_init(&sdp->lock); 108 rcu_segcblist_init(&sdp->srcu_cblist); 109 sdp->srcu_cblist_invoking = false; 110 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; 111 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; 112 sdp->mynode = &snp_first[cpu / levelspread[level]]; 113 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 114 if (snp->grplo < 0) 115 snp->grplo = cpu; 116 snp->grphi = cpu; 117 } 118 sdp->cpu = cpu; 119 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 120 sdp->sp = sp; 121 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 122 if (is_static) 123 continue; 124 125 /* Dynamically allocated, better be no srcu_read_locks()! */ 126 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { 127 sdp->srcu_lock_count[i] = 0; 128 sdp->srcu_unlock_count[i] = 0; 129 } 130 } 131 } 132 133 /* 134 * Initialize non-compile-time initialized fields, including the 135 * associated srcu_node and srcu_data structures. The is_static 136 * parameter is passed through to init_srcu_struct_nodes(), and 137 * also tells us that ->sda has already been wired up to srcu_data. 138 */ 139 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) 140 { 141 mutex_init(&sp->srcu_cb_mutex); 142 mutex_init(&sp->srcu_gp_mutex); 143 sp->srcu_idx = 0; 144 sp->srcu_gp_seq = 0; 145 sp->srcu_barrier_seq = 0; 146 mutex_init(&sp->srcu_barrier_mutex); 147 atomic_set(&sp->srcu_barrier_cpu_cnt, 0); 148 INIT_DELAYED_WORK(&sp->work, process_srcu); 149 if (!is_static) 150 sp->sda = alloc_percpu(struct srcu_data); 151 init_srcu_struct_nodes(sp, is_static); 152 sp->srcu_gp_seq_needed_exp = 0; 153 sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 154 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ 155 return sp->sda ? 0 : -ENOMEM; 156 } 157 158 #ifdef CONFIG_DEBUG_LOCK_ALLOC 159 160 int __init_srcu_struct(struct srcu_struct *sp, const char *name, 161 struct lock_class_key *key) 162 { 163 /* Don't re-initialize a lock while it is held. */ 164 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 165 lockdep_init_map(&sp->dep_map, name, key, 0); 166 spin_lock_init(&sp->gp_lock); 167 return init_srcu_struct_fields(sp, false); 168 } 169 EXPORT_SYMBOL_GPL(__init_srcu_struct); 170 171 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 172 173 /** 174 * init_srcu_struct - initialize a sleep-RCU structure 175 * @sp: structure to initialize. 176 * 177 * Must invoke this on a given srcu_struct before passing that srcu_struct 178 * to any other function. Each srcu_struct represents a separate domain 179 * of SRCU protection. 180 */ 181 int init_srcu_struct(struct srcu_struct *sp) 182 { 183 spin_lock_init(&sp->gp_lock); 184 return init_srcu_struct_fields(sp, false); 185 } 186 EXPORT_SYMBOL_GPL(init_srcu_struct); 187 188 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 189 190 /* 191 * First-use initialization of statically allocated srcu_struct 192 * structure. Wiring up the combining tree is more than can be 193 * done with compile-time initialization, so this check is added 194 * to each update-side SRCU primitive. Use ->gp_lock, which -is- 195 * compile-time initialized, to resolve races involving multiple 196 * CPUs trying to garner first-use privileges. 197 */ 198 static void check_init_srcu_struct(struct srcu_struct *sp) 199 { 200 unsigned long flags; 201 202 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); 203 /* The smp_load_acquire() pairs with the smp_store_release(). */ 204 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ 205 return; /* Already initialized. */ 206 spin_lock_irqsave(&sp->gp_lock, flags); 207 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { 208 spin_unlock_irqrestore(&sp->gp_lock, flags); 209 return; 210 } 211 init_srcu_struct_fields(sp, true); 212 spin_unlock_irqrestore(&sp->gp_lock, flags); 213 } 214 215 /* 216 * Returns approximate total of the readers' ->srcu_lock_count[] values 217 * for the rank of per-CPU counters specified by idx. 218 */ 219 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) 220 { 221 int cpu; 222 unsigned long sum = 0; 223 224 for_each_possible_cpu(cpu) { 225 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 226 227 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 228 } 229 return sum; 230 } 231 232 /* 233 * Returns approximate total of the readers' ->srcu_unlock_count[] values 234 * for the rank of per-CPU counters specified by idx. 235 */ 236 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) 237 { 238 int cpu; 239 unsigned long sum = 0; 240 241 for_each_possible_cpu(cpu) { 242 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 243 244 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 245 } 246 return sum; 247 } 248 249 /* 250 * Return true if the number of pre-existing readers is determined to 251 * be zero. 252 */ 253 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) 254 { 255 unsigned long unlocks; 256 257 unlocks = srcu_readers_unlock_idx(sp, idx); 258 259 /* 260 * Make sure that a lock is always counted if the corresponding 261 * unlock is counted. Needs to be a smp_mb() as the read side may 262 * contain a read from a variable that is written to before the 263 * synchronize_srcu() in the write side. In this case smp_mb()s 264 * A and B act like the store buffering pattern. 265 * 266 * This smp_mb() also pairs with smp_mb() C to prevent accesses 267 * after the synchronize_srcu() from being executed before the 268 * grace period ends. 269 */ 270 smp_mb(); /* A */ 271 272 /* 273 * If the locks are the same as the unlocks, then there must have 274 * been no readers on this index at some time in between. This does 275 * not mean that there are no more readers, as one could have read 276 * the current index but not have incremented the lock counter yet. 277 * 278 * Possible bug: There is no guarantee that there haven't been 279 * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were 280 * counted, meaning that this could return true even if there are 281 * still active readers. Since there are no memory barriers around 282 * srcu_flip(), the CPU is not required to increment ->srcu_idx 283 * before running srcu_readers_unlock_idx(), which means that there 284 * could be an arbitrarily large number of critical sections that 285 * execute after srcu_readers_unlock_idx() but use the old value 286 * of ->srcu_idx. 287 */ 288 return srcu_readers_lock_idx(sp, idx) == unlocks; 289 } 290 291 /** 292 * srcu_readers_active - returns true if there are readers. and false 293 * otherwise 294 * @sp: which srcu_struct to count active readers (holding srcu_read_lock). 295 * 296 * Note that this is not an atomic primitive, and can therefore suffer 297 * severe errors when invoked on an active srcu_struct. That said, it 298 * can be useful as an error check at cleanup time. 299 */ 300 static bool srcu_readers_active(struct srcu_struct *sp) 301 { 302 int cpu; 303 unsigned long sum = 0; 304 305 for_each_possible_cpu(cpu) { 306 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 307 308 sum += READ_ONCE(cpuc->srcu_lock_count[0]); 309 sum += READ_ONCE(cpuc->srcu_lock_count[1]); 310 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 311 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 312 } 313 return sum; 314 } 315 316 #define SRCU_INTERVAL 1 317 318 /* 319 * Return grace-period delay, zero if there are expedited grace 320 * periods pending, SRCU_INTERVAL otherwise. 321 */ 322 static unsigned long srcu_get_delay(struct srcu_struct *sp) 323 { 324 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), 325 READ_ONCE(sp->srcu_gp_seq_needed_exp))) 326 return 0; 327 return SRCU_INTERVAL; 328 } 329 330 /** 331 * cleanup_srcu_struct - deconstruct a sleep-RCU structure 332 * @sp: structure to clean up. 333 * 334 * Must invoke this after you are finished using a given srcu_struct that 335 * was initialized via init_srcu_struct(), else you leak memory. 336 */ 337 void cleanup_srcu_struct(struct srcu_struct *sp) 338 { 339 int cpu; 340 341 if (WARN_ON(!srcu_get_delay(sp))) 342 return; /* Leakage unless caller handles error. */ 343 if (WARN_ON(srcu_readers_active(sp))) 344 return; /* Leakage unless caller handles error. */ 345 flush_delayed_work(&sp->work); 346 for_each_possible_cpu(cpu) 347 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); 348 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 349 WARN_ON(srcu_readers_active(sp))) { 350 pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); 351 return; /* Caller forgot to stop doing call_srcu()? */ 352 } 353 free_percpu(sp->sda); 354 sp->sda = NULL; 355 } 356 EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 357 358 /* 359 * Counts the new reader in the appropriate per-CPU element of the 360 * srcu_struct. Must be called from process context. 361 * Returns an index that must be passed to the matching srcu_read_unlock(). 362 */ 363 int __srcu_read_lock(struct srcu_struct *sp) 364 { 365 int idx; 366 367 idx = READ_ONCE(sp->srcu_idx) & 0x1; 368 __this_cpu_inc(sp->sda->srcu_lock_count[idx]); 369 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 370 return idx; 371 } 372 EXPORT_SYMBOL_GPL(__srcu_read_lock); 373 374 /* 375 * Removes the count for the old reader from the appropriate per-CPU 376 * element of the srcu_struct. Note that this may well be a different 377 * CPU than that which was incremented by the corresponding srcu_read_lock(). 378 * Must be called from process context. 379 */ 380 void __srcu_read_unlock(struct srcu_struct *sp, int idx) 381 { 382 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 383 this_cpu_inc(sp->sda->srcu_unlock_count[idx]); 384 } 385 EXPORT_SYMBOL_GPL(__srcu_read_unlock); 386 387 /* 388 * We use an adaptive strategy for synchronize_srcu() and especially for 389 * synchronize_srcu_expedited(). We spin for a fixed time period 390 * (defined below) to allow SRCU readers to exit their read-side critical 391 * sections. If there are still some readers after a few microseconds, 392 * we repeatedly block for 1-millisecond time periods. 393 */ 394 #define SRCU_RETRY_CHECK_DELAY 5 395 396 /* 397 * Start an SRCU grace period. 398 */ 399 static void srcu_gp_start(struct srcu_struct *sp) 400 { 401 struct srcu_data *sdp = this_cpu_ptr(sp->sda); 402 int state; 403 404 RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock), 405 "Invoked srcu_gp_start() without ->gp_lock!"); 406 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 407 rcu_segcblist_advance(&sdp->srcu_cblist, 408 rcu_seq_current(&sp->srcu_gp_seq)); 409 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 410 rcu_seq_snap(&sp->srcu_gp_seq)); 411 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 412 rcu_seq_start(&sp->srcu_gp_seq); 413 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 414 WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 415 } 416 417 /* 418 * Track online CPUs to guide callback workqueue placement. 419 */ 420 DEFINE_PER_CPU(bool, srcu_online); 421 422 void srcu_online_cpu(unsigned int cpu) 423 { 424 WRITE_ONCE(per_cpu(srcu_online, cpu), true); 425 } 426 427 void srcu_offline_cpu(unsigned int cpu) 428 { 429 WRITE_ONCE(per_cpu(srcu_online, cpu), false); 430 } 431 432 /* 433 * Place the workqueue handler on the specified CPU if online, otherwise 434 * just run it whereever. This is useful for placing workqueue handlers 435 * that are to invoke the specified CPU's callbacks. 436 */ 437 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 438 struct delayed_work *dwork, 439 unsigned long delay) 440 { 441 bool ret; 442 443 preempt_disable(); 444 if (READ_ONCE(per_cpu(srcu_online, cpu))) 445 ret = queue_delayed_work_on(cpu, wq, dwork, delay); 446 else 447 ret = queue_delayed_work(wq, dwork, delay); 448 preempt_enable(); 449 return ret; 450 } 451 452 /* 453 * Schedule callback invocation for the specified srcu_data structure, 454 * if possible, on the corresponding CPU. 455 */ 456 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 457 { 458 srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, 459 &sdp->work, delay); 460 } 461 462 /* 463 * Schedule callback invocation for all srcu_data structures associated 464 * with the specified srcu_node structure that have callbacks for the 465 * just-completed grace period, the one corresponding to idx. If possible, 466 * schedule this invocation on the corresponding CPUs. 467 */ 468 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, 469 unsigned long mask, unsigned long delay) 470 { 471 int cpu; 472 473 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 474 if (!(mask & (1 << (cpu - snp->grplo)))) 475 continue; 476 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); 477 } 478 } 479 480 /* 481 * Note the end of an SRCU grace period. Initiates callback invocation 482 * and starts a new grace period if needed. 483 * 484 * The ->srcu_cb_mutex acquisition does not protect any data, but 485 * instead prevents more than one grace period from starting while we 486 * are initiating callback invocation. This allows the ->srcu_have_cbs[] 487 * array to have a finite number of elements. 488 */ 489 static void srcu_gp_end(struct srcu_struct *sp) 490 { 491 unsigned long cbdelay; 492 bool cbs; 493 unsigned long gpseq; 494 int idx; 495 int idxnext; 496 unsigned long mask; 497 struct srcu_node *snp; 498 499 /* Prevent more than one additional grace period. */ 500 mutex_lock(&sp->srcu_cb_mutex); 501 502 /* End the current grace period. */ 503 spin_lock_irq(&sp->gp_lock); 504 idx = rcu_seq_state(sp->srcu_gp_seq); 505 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 506 cbdelay = srcu_get_delay(sp); 507 sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 508 rcu_seq_end(&sp->srcu_gp_seq); 509 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 510 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) 511 sp->srcu_gp_seq_needed_exp = gpseq; 512 spin_unlock_irq(&sp->gp_lock); 513 mutex_unlock(&sp->srcu_gp_mutex); 514 /* A new grace period can start at this point. But only one. */ 515 516 /* Initiate callback invocation as needed. */ 517 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 518 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); 519 rcu_for_each_node_breadth_first(sp, snp) { 520 spin_lock_irq(&snp->lock); 521 cbs = false; 522 if (snp >= sp->level[rcu_num_lvls - 1]) 523 cbs = snp->srcu_have_cbs[idx] == gpseq; 524 snp->srcu_have_cbs[idx] = gpseq; 525 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 526 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 527 snp->srcu_gp_seq_needed_exp = gpseq; 528 mask = snp->srcu_data_have_cbs[idx]; 529 snp->srcu_data_have_cbs[idx] = 0; 530 spin_unlock_irq(&snp->lock); 531 if (cbs) { 532 smp_mb(); /* GP end before CB invocation. */ 533 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); 534 } 535 } 536 537 /* Callback initiation done, allow grace periods after next. */ 538 mutex_unlock(&sp->srcu_cb_mutex); 539 540 /* Start a new grace period if needed. */ 541 spin_lock_irq(&sp->gp_lock); 542 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 543 if (!rcu_seq_state(gpseq) && 544 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { 545 srcu_gp_start(sp); 546 spin_unlock_irq(&sp->gp_lock); 547 /* Throttle expedited grace periods: Should be rare! */ 548 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff 549 ? 0 : SRCU_INTERVAL); 550 } else { 551 spin_unlock_irq(&sp->gp_lock); 552 } 553 } 554 555 /* 556 * Funnel-locking scheme to scalably mediate many concurrent expedited 557 * grace-period requests. This function is invoked for the first known 558 * expedited request for a grace period that has already been requested, 559 * but without expediting. To start a completely new grace period, 560 * whether expedited or not, use srcu_funnel_gp_start() instead. 561 */ 562 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, 563 unsigned long s) 564 { 565 unsigned long flags; 566 567 for (; snp != NULL; snp = snp->srcu_parent) { 568 if (rcu_seq_done(&sp->srcu_gp_seq, s) || 569 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 570 return; 571 spin_lock_irqsave(&snp->lock, flags); 572 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 573 spin_unlock_irqrestore(&snp->lock, flags); 574 return; 575 } 576 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 577 spin_unlock_irqrestore(&snp->lock, flags); 578 } 579 spin_lock_irqsave(&sp->gp_lock, flags); 580 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 581 sp->srcu_gp_seq_needed_exp = s; 582 spin_unlock_irqrestore(&sp->gp_lock, flags); 583 } 584 585 /* 586 * Funnel-locking scheme to scalably mediate many concurrent grace-period 587 * requests. The winner has to do the work of actually starting grace 588 * period s. Losers must either ensure that their desired grace-period 589 * number is recorded on at least their leaf srcu_node structure, or they 590 * must take steps to invoke their own callbacks. 591 */ 592 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, 593 unsigned long s, bool do_norm) 594 { 595 unsigned long flags; 596 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 597 struct srcu_node *snp = sdp->mynode; 598 unsigned long snp_seq; 599 600 /* Each pass through the loop does one level of the srcu_node tree. */ 601 for (; snp != NULL; snp = snp->srcu_parent) { 602 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) 603 return; /* GP already done and CBs recorded. */ 604 spin_lock_irqsave(&snp->lock, flags); 605 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 606 snp_seq = snp->srcu_have_cbs[idx]; 607 if (snp == sdp->mynode && snp_seq == s) 608 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 609 spin_unlock_irqrestore(&snp->lock, flags); 610 if (snp == sdp->mynode && snp_seq != s) { 611 smp_mb(); /* CBs after GP! */ 612 srcu_schedule_cbs_sdp(sdp, do_norm 613 ? SRCU_INTERVAL 614 : 0); 615 return; 616 } 617 if (!do_norm) 618 srcu_funnel_exp_start(sp, snp, s); 619 return; 620 } 621 snp->srcu_have_cbs[idx] = s; 622 if (snp == sdp->mynode) 623 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 624 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 625 snp->srcu_gp_seq_needed_exp = s; 626 spin_unlock_irqrestore(&snp->lock, flags); 627 } 628 629 /* Top of tree, must ensure the grace period will be started. */ 630 spin_lock_irqsave(&sp->gp_lock, flags); 631 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { 632 /* 633 * Record need for grace period s. Pair with load 634 * acquire setting up for initialization. 635 */ 636 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ 637 } 638 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 639 sp->srcu_gp_seq_needed_exp = s; 640 641 /* If grace period not already done and none in progress, start it. */ 642 if (!rcu_seq_done(&sp->srcu_gp_seq, s) && 643 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { 644 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 645 srcu_gp_start(sp); 646 queue_delayed_work(system_power_efficient_wq, &sp->work, 647 srcu_get_delay(sp)); 648 } 649 spin_unlock_irqrestore(&sp->gp_lock, flags); 650 } 651 652 /* 653 * Wait until all readers counted by array index idx complete, but 654 * loop an additional time if there is an expedited grace period pending. 655 * The caller must ensure that ->srcu_idx is not changed while checking. 656 */ 657 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) 658 { 659 for (;;) { 660 if (srcu_readers_active_idx_check(sp, idx)) 661 return true; 662 if (--trycount + !srcu_get_delay(sp) <= 0) 663 return false; 664 udelay(SRCU_RETRY_CHECK_DELAY); 665 } 666 } 667 668 /* 669 * Increment the ->srcu_idx counter so that future SRCU readers will 670 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 671 * us to wait for pre-existing readers in a starvation-free manner. 672 */ 673 static void srcu_flip(struct srcu_struct *sp) 674 { 675 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); 676 677 /* 678 * Ensure that if the updater misses an __srcu_read_unlock() 679 * increment, that task's next __srcu_read_lock() will see the 680 * above counter update. Note that both this memory barrier 681 * and the one in srcu_readers_active_idx_check() provide the 682 * guarantee for __srcu_read_lock(). 683 */ 684 smp_mb(); /* D */ /* Pairs with C. */ 685 } 686 687 /* 688 * If SRCU is likely idle, return true, otherwise return false. 689 * 690 * Note that it is OK for several current from-idle requests for a new 691 * grace period from idle to specify expediting because they will all end 692 * up requesting the same grace period anyhow. So no loss. 693 * 694 * Note also that if any CPU (including the current one) is still invoking 695 * callbacks, this function will nevertheless say "idle". This is not 696 * ideal, but the overhead of checking all CPUs' callback lists is even 697 * less ideal, especially on large systems. Furthermore, the wakeup 698 * can happen before the callback is fully removed, so we have no choice 699 * but to accept this type of error. 700 * 701 * This function is also subject to counter-wrap errors, but let's face 702 * it, if this function was preempted for enough time for the counters 703 * to wrap, it really doesn't matter whether or not we expedite the grace 704 * period. The extra overhead of a needlessly expedited grace period is 705 * negligible when amoritized over that time period, and the extra latency 706 * of a needlessly non-expedited grace period is similarly negligible. 707 */ 708 static bool srcu_might_be_idle(struct srcu_struct *sp) 709 { 710 unsigned long curseq; 711 unsigned long flags; 712 struct srcu_data *sdp; 713 unsigned long t; 714 715 /* If the local srcu_data structure has callbacks, not idle. */ 716 local_irq_save(flags); 717 sdp = this_cpu_ptr(sp->sda); 718 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 719 local_irq_restore(flags); 720 return false; /* Callbacks already present, so not idle. */ 721 } 722 local_irq_restore(flags); 723 724 /* 725 * No local callbacks, so probabalistically probe global state. 726 * Exact information would require acquiring locks, which would 727 * kill scalability, hence the probabalistic nature of the probe. 728 */ 729 730 /* First, see if enough time has passed since the last GP. */ 731 t = ktime_get_mono_fast_ns(); 732 if (exp_holdoff == 0 || 733 time_in_range_open(t, sp->srcu_last_gp_end, 734 sp->srcu_last_gp_end + exp_holdoff)) 735 return false; /* Too soon after last GP. */ 736 737 /* Next, check for probable idleness. */ 738 curseq = rcu_seq_current(&sp->srcu_gp_seq); 739 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 740 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) 741 return false; /* Grace period in progress, so not idle. */ 742 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 743 if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) 744 return false; /* GP # changed, so not idle. */ 745 return true; /* With reasonable probability, idle! */ 746 } 747 748 /* 749 * Enqueue an SRCU callback on the srcu_data structure associated with 750 * the current CPU and the specified srcu_struct structure, initiating 751 * grace-period processing if it is not already running. 752 * 753 * Note that all CPUs must agree that the grace period extended beyond 754 * all pre-existing SRCU read-side critical section. On systems with 755 * more than one CPU, this means that when "func()" is invoked, each CPU 756 * is guaranteed to have executed a full memory barrier since the end of 757 * its last corresponding SRCU read-side critical section whose beginning 758 * preceded the call to call_rcu(). It also means that each CPU executing 759 * an SRCU read-side critical section that continues beyond the start of 760 * "func()" must have executed a memory barrier after the call_rcu() 761 * but before the beginning of that SRCU read-side critical section. 762 * Note that these guarantees include CPUs that are offline, idle, or 763 * executing in user mode, as well as CPUs that are executing in the kernel. 764 * 765 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 766 * resulting SRCU callback function "func()", then both CPU A and CPU 767 * B are guaranteed to execute a full memory barrier during the time 768 * interval between the call to call_rcu() and the invocation of "func()". 769 * This guarantee applies even if CPU A and CPU B are the same CPU (but 770 * again only if the system has more than one CPU). 771 * 772 * Of course, these guarantees apply only for invocations of call_srcu(), 773 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 774 * srcu_struct structure. 775 */ 776 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, 777 rcu_callback_t func, bool do_norm) 778 { 779 unsigned long flags; 780 bool needexp = false; 781 bool needgp = false; 782 unsigned long s; 783 struct srcu_data *sdp; 784 785 check_init_srcu_struct(sp); 786 rhp->func = func; 787 local_irq_save(flags); 788 sdp = this_cpu_ptr(sp->sda); 789 spin_lock(&sdp->lock); 790 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 791 rcu_segcblist_advance(&sdp->srcu_cblist, 792 rcu_seq_current(&sp->srcu_gp_seq)); 793 s = rcu_seq_snap(&sp->srcu_gp_seq); 794 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 795 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 796 sdp->srcu_gp_seq_needed = s; 797 needgp = true; 798 } 799 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 800 sdp->srcu_gp_seq_needed_exp = s; 801 needexp = true; 802 } 803 spin_unlock_irqrestore(&sdp->lock, flags); 804 if (needgp) 805 srcu_funnel_gp_start(sp, sdp, s, do_norm); 806 else if (needexp) 807 srcu_funnel_exp_start(sp, sdp->mynode, s); 808 } 809 810 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, 811 rcu_callback_t func) 812 { 813 __call_srcu(sp, rhp, func, true); 814 } 815 EXPORT_SYMBOL_GPL(call_srcu); 816 817 /* 818 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 819 */ 820 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) 821 { 822 struct rcu_synchronize rcu; 823 824 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || 825 lock_is_held(&rcu_bh_lock_map) || 826 lock_is_held(&rcu_lock_map) || 827 lock_is_held(&rcu_sched_lock_map), 828 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 829 830 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 831 return; 832 might_sleep(); 833 check_init_srcu_struct(sp); 834 init_completion(&rcu.completion); 835 init_rcu_head_on_stack(&rcu.head); 836 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); 837 wait_for_completion(&rcu.completion); 838 destroy_rcu_head_on_stack(&rcu.head); 839 } 840 841 /** 842 * synchronize_srcu_expedited - Brute-force SRCU grace period 843 * @sp: srcu_struct with which to synchronize. 844 * 845 * Wait for an SRCU grace period to elapse, but be more aggressive about 846 * spinning rather than blocking when waiting. 847 * 848 * Note that synchronize_srcu_expedited() has the same deadlock and 849 * memory-ordering properties as does synchronize_srcu(). 850 */ 851 void synchronize_srcu_expedited(struct srcu_struct *sp) 852 { 853 __synchronize_srcu(sp, rcu_gp_is_normal()); 854 } 855 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 856 857 /** 858 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 859 * @sp: srcu_struct with which to synchronize. 860 * 861 * Wait for the count to drain to zero of both indexes. To avoid the 862 * possible starvation of synchronize_srcu(), it waits for the count of 863 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 864 * and then flip the srcu_idx and wait for the count of the other index. 865 * 866 * Can block; must be called from process context. 867 * 868 * Note that it is illegal to call synchronize_srcu() from the corresponding 869 * SRCU read-side critical section; doing so will result in deadlock. 870 * However, it is perfectly legal to call synchronize_srcu() on one 871 * srcu_struct from some other srcu_struct's read-side critical section, 872 * as long as the resulting graph of srcu_structs is acyclic. 873 * 874 * There are memory-ordering constraints implied by synchronize_srcu(). 875 * On systems with more than one CPU, when synchronize_srcu() returns, 876 * each CPU is guaranteed to have executed a full memory barrier since 877 * the end of its last corresponding SRCU-sched read-side critical section 878 * whose beginning preceded the call to synchronize_srcu(). In addition, 879 * each CPU having an SRCU read-side critical section that extends beyond 880 * the return from synchronize_srcu() is guaranteed to have executed a 881 * full memory barrier after the beginning of synchronize_srcu() and before 882 * the beginning of that SRCU read-side critical section. Note that these 883 * guarantees include CPUs that are offline, idle, or executing in user mode, 884 * as well as CPUs that are executing in the kernel. 885 * 886 * Furthermore, if CPU A invoked synchronize_srcu(), which returned 887 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 888 * to have executed a full memory barrier during the execution of 889 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 890 * are the same CPU, but again only if the system has more than one CPU. 891 * 892 * Of course, these memory-ordering guarantees apply only when 893 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 894 * passed the same srcu_struct structure. 895 * 896 * If SRCU is likely idle, expedite the first request. This semantic 897 * was provided by Classic SRCU, and is relied upon by its users, so TREE 898 * SRCU must also provide it. Note that detecting idleness is heuristic 899 * and subject to both false positives and negatives. 900 */ 901 void synchronize_srcu(struct srcu_struct *sp) 902 { 903 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) 904 synchronize_srcu_expedited(sp); 905 else 906 __synchronize_srcu(sp, true); 907 } 908 EXPORT_SYMBOL_GPL(synchronize_srcu); 909 910 /* 911 * Callback function for srcu_barrier() use. 912 */ 913 static void srcu_barrier_cb(struct rcu_head *rhp) 914 { 915 struct srcu_data *sdp; 916 struct srcu_struct *sp; 917 918 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 919 sp = sdp->sp; 920 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) 921 complete(&sp->srcu_barrier_completion); 922 } 923 924 /** 925 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 926 * @sp: srcu_struct on which to wait for in-flight callbacks. 927 */ 928 void srcu_barrier(struct srcu_struct *sp) 929 { 930 int cpu; 931 struct srcu_data *sdp; 932 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); 933 934 check_init_srcu_struct(sp); 935 mutex_lock(&sp->srcu_barrier_mutex); 936 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { 937 smp_mb(); /* Force ordering following return. */ 938 mutex_unlock(&sp->srcu_barrier_mutex); 939 return; /* Someone else did our work for us. */ 940 } 941 rcu_seq_start(&sp->srcu_barrier_seq); 942 init_completion(&sp->srcu_barrier_completion); 943 944 /* Initial count prevents reaching zero until all CBs are posted. */ 945 atomic_set(&sp->srcu_barrier_cpu_cnt, 1); 946 947 /* 948 * Each pass through this loop enqueues a callback, but only 949 * on CPUs already having callbacks enqueued. Note that if 950 * a CPU already has callbacks enqueue, it must have already 951 * registered the need for a future grace period, so all we 952 * need do is enqueue a callback that will use the same 953 * grace period as the last callback already in the queue. 954 */ 955 for_each_possible_cpu(cpu) { 956 sdp = per_cpu_ptr(sp->sda, cpu); 957 spin_lock_irq(&sdp->lock); 958 atomic_inc(&sp->srcu_barrier_cpu_cnt); 959 sdp->srcu_barrier_head.func = srcu_barrier_cb; 960 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 961 &sdp->srcu_barrier_head, 0)) 962 atomic_dec(&sp->srcu_barrier_cpu_cnt); 963 spin_unlock_irq(&sdp->lock); 964 } 965 966 /* Remove the initial count, at which point reaching zero can happen. */ 967 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) 968 complete(&sp->srcu_barrier_completion); 969 wait_for_completion(&sp->srcu_barrier_completion); 970 971 rcu_seq_end(&sp->srcu_barrier_seq); 972 mutex_unlock(&sp->srcu_barrier_mutex); 973 } 974 EXPORT_SYMBOL_GPL(srcu_barrier); 975 976 /** 977 * srcu_batches_completed - return batches completed. 978 * @sp: srcu_struct on which to report batch completion. 979 * 980 * Report the number of batches, correlated with, but not necessarily 981 * precisely the same as, the number of grace periods that have elapsed. 982 */ 983 unsigned long srcu_batches_completed(struct srcu_struct *sp) 984 { 985 return sp->srcu_idx; 986 } 987 EXPORT_SYMBOL_GPL(srcu_batches_completed); 988 989 /* 990 * Core SRCU state machine. Push state bits of ->srcu_gp_seq 991 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 992 * completed in that state. 993 */ 994 static void srcu_advance_state(struct srcu_struct *sp) 995 { 996 int idx; 997 998 mutex_lock(&sp->srcu_gp_mutex); 999 1000 /* 1001 * Because readers might be delayed for an extended period after 1002 * fetching ->srcu_idx for their index, at any point in time there 1003 * might well be readers using both idx=0 and idx=1. We therefore 1004 * need to wait for readers to clear from both index values before 1005 * invoking a callback. 1006 * 1007 * The load-acquire ensures that we see the accesses performed 1008 * by the prior grace period. 1009 */ 1010 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ 1011 if (idx == SRCU_STATE_IDLE) { 1012 spin_lock_irq(&sp->gp_lock); 1013 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1014 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); 1015 spin_unlock_irq(&sp->gp_lock); 1016 mutex_unlock(&sp->srcu_gp_mutex); 1017 return; 1018 } 1019 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 1020 if (idx == SRCU_STATE_IDLE) 1021 srcu_gp_start(sp); 1022 spin_unlock_irq(&sp->gp_lock); 1023 if (idx != SRCU_STATE_IDLE) { 1024 mutex_unlock(&sp->srcu_gp_mutex); 1025 return; /* Someone else started the grace period. */ 1026 } 1027 } 1028 1029 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1030 idx = 1 ^ (sp->srcu_idx & 1); 1031 if (!try_check_zero(sp, idx, 1)) { 1032 mutex_unlock(&sp->srcu_gp_mutex); 1033 return; /* readers present, retry later. */ 1034 } 1035 srcu_flip(sp); 1036 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); 1037 } 1038 1039 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1040 1041 /* 1042 * SRCU read-side critical sections are normally short, 1043 * so check at least twice in quick succession after a flip. 1044 */ 1045 idx = 1 ^ (sp->srcu_idx & 1); 1046 if (!try_check_zero(sp, idx, 2)) { 1047 mutex_unlock(&sp->srcu_gp_mutex); 1048 return; /* readers present, retry later. */ 1049 } 1050 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ 1051 } 1052 } 1053 1054 /* 1055 * Invoke a limited number of SRCU callbacks that have passed through 1056 * their grace period. If there are more to do, SRCU will reschedule 1057 * the workqueue. Note that needed memory barriers have been executed 1058 * in this task's context by srcu_readers_active_idx_check(). 1059 */ 1060 static void srcu_invoke_callbacks(struct work_struct *work) 1061 { 1062 bool more; 1063 struct rcu_cblist ready_cbs; 1064 struct rcu_head *rhp; 1065 struct srcu_data *sdp; 1066 struct srcu_struct *sp; 1067 1068 sdp = container_of(work, struct srcu_data, work.work); 1069 sp = sdp->sp; 1070 rcu_cblist_init(&ready_cbs); 1071 spin_lock_irq(&sdp->lock); 1072 smp_mb(); /* Old grace periods before callback invocation! */ 1073 rcu_segcblist_advance(&sdp->srcu_cblist, 1074 rcu_seq_current(&sp->srcu_gp_seq)); 1075 if (sdp->srcu_cblist_invoking || 1076 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1077 spin_unlock_irq(&sdp->lock); 1078 return; /* Someone else on the job or nothing to do. */ 1079 } 1080 1081 /* We are on the job! Extract and invoke ready callbacks. */ 1082 sdp->srcu_cblist_invoking = true; 1083 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1084 spin_unlock_irq(&sdp->lock); 1085 rhp = rcu_cblist_dequeue(&ready_cbs); 1086 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1087 local_bh_disable(); 1088 rhp->func(rhp); 1089 local_bh_enable(); 1090 } 1091 1092 /* 1093 * Update counts, accelerate new callbacks, and if needed, 1094 * schedule another round of callback invocation. 1095 */ 1096 spin_lock_irq(&sdp->lock); 1097 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1098 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1099 rcu_seq_snap(&sp->srcu_gp_seq)); 1100 sdp->srcu_cblist_invoking = false; 1101 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1102 spin_unlock_irq(&sdp->lock); 1103 if (more) 1104 srcu_schedule_cbs_sdp(sdp, 0); 1105 } 1106 1107 /* 1108 * Finished one round of SRCU grace period. Start another if there are 1109 * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1110 */ 1111 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) 1112 { 1113 bool pushgp = true; 1114 1115 spin_lock_irq(&sp->gp_lock); 1116 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1117 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { 1118 /* All requests fulfilled, time to go idle. */ 1119 pushgp = false; 1120 } 1121 } else if (!rcu_seq_state(sp->srcu_gp_seq)) { 1122 /* Outstanding request and no GP. Start one. */ 1123 srcu_gp_start(sp); 1124 } 1125 spin_unlock_irq(&sp->gp_lock); 1126 1127 if (pushgp) 1128 queue_delayed_work(system_power_efficient_wq, &sp->work, delay); 1129 } 1130 1131 /* 1132 * This is the work-queue function that handles SRCU grace periods. 1133 */ 1134 void process_srcu(struct work_struct *work) 1135 { 1136 struct srcu_struct *sp; 1137 1138 sp = container_of(work, struct srcu_struct, work.work); 1139 1140 srcu_advance_state(sp); 1141 srcu_reschedule(sp, srcu_get_delay(sp)); 1142 } 1143 EXPORT_SYMBOL_GPL(process_srcu); 1144 1145 void srcutorture_get_gp_data(enum rcutorture_type test_type, 1146 struct srcu_struct *sp, int *flags, 1147 unsigned long *gpnum, unsigned long *completed) 1148 { 1149 if (test_type != SRCU_FLAVOR) 1150 return; 1151 *flags = 0; 1152 *completed = rcu_seq_ctr(sp->srcu_gp_seq); 1153 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); 1154 } 1155 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 1156