1 /* 2 * Sleepable Read-Copy Update mechanism for mutual exclusion. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2006 19 * Copyright (C) Fujitsu, 2012 20 * 21 * Author: Paul McKenney <paulmck@us.ibm.com> 22 * Lai Jiangshan <laijs@cn.fujitsu.com> 23 * 24 * For detailed explanation of Read-Copy Update mechanism see - 25 * Documentation/RCU/ *.txt 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/mutex.h> 31 #include <linux/percpu.h> 32 #include <linux/preempt.h> 33 #include <linux/rcupdate_wait.h> 34 #include <linux/sched.h> 35 #include <linux/smp.h> 36 #include <linux/delay.h> 37 #include <linux/module.h> 38 #include <linux/srcu.h> 39 40 #include "rcu.h" 41 #include "rcu_segcblist.h" 42 43 ulong exp_holdoff = 25 * 1000; /* Holdoff (ns) for auto-expediting. */ 44 module_param(exp_holdoff, ulong, 0444); 45 46 static void srcu_invoke_callbacks(struct work_struct *work); 47 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); 48 49 /* 50 * Initialize SRCU combining tree. Note that statically allocated 51 * srcu_struct structures might already have srcu_read_lock() and 52 * srcu_read_unlock() running against them. So if the is_static parameter 53 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. 54 */ 55 static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) 56 { 57 int cpu; 58 int i; 59 int level = 0; 60 int levelspread[RCU_NUM_LVLS]; 61 struct srcu_data *sdp; 62 struct srcu_node *snp; 63 struct srcu_node *snp_first; 64 65 /* Work out the overall tree geometry. */ 66 sp->level[0] = &sp->node[0]; 67 for (i = 1; i < rcu_num_lvls; i++) 68 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; 69 rcu_init_levelspread(levelspread, num_rcu_lvl); 70 71 /* Each pass through this loop initializes one srcu_node structure. */ 72 rcu_for_each_node_breadth_first(sp, snp) { 73 spin_lock_init(&snp->lock); 74 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != 75 ARRAY_SIZE(snp->srcu_data_have_cbs)); 76 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { 77 snp->srcu_have_cbs[i] = 0; 78 snp->srcu_data_have_cbs[i] = 0; 79 } 80 snp->srcu_gp_seq_needed_exp = 0; 81 snp->grplo = -1; 82 snp->grphi = -1; 83 if (snp == &sp->node[0]) { 84 /* Root node, special case. */ 85 snp->srcu_parent = NULL; 86 continue; 87 } 88 89 /* Non-root node. */ 90 if (snp == sp->level[level + 1]) 91 level++; 92 snp->srcu_parent = sp->level[level - 1] + 93 (snp - sp->level[level]) / 94 levelspread[level - 1]; 95 } 96 97 /* 98 * Initialize the per-CPU srcu_data array, which feeds into the 99 * leaves of the srcu_node tree. 100 */ 101 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != 102 ARRAY_SIZE(sdp->srcu_unlock_count)); 103 level = rcu_num_lvls - 1; 104 snp_first = sp->level[level]; 105 for_each_possible_cpu(cpu) { 106 sdp = per_cpu_ptr(sp->sda, cpu); 107 spin_lock_init(&sdp->lock); 108 rcu_segcblist_init(&sdp->srcu_cblist); 109 sdp->srcu_cblist_invoking = false; 110 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; 111 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; 112 sdp->mynode = &snp_first[cpu / levelspread[level]]; 113 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { 114 if (snp->grplo < 0) 115 snp->grplo = cpu; 116 snp->grphi = cpu; 117 } 118 sdp->cpu = cpu; 119 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); 120 sdp->sp = sp; 121 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); 122 if (is_static) 123 continue; 124 125 /* Dynamically allocated, better be no srcu_read_locks()! */ 126 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { 127 sdp->srcu_lock_count[i] = 0; 128 sdp->srcu_unlock_count[i] = 0; 129 } 130 } 131 } 132 133 /* 134 * Initialize non-compile-time initialized fields, including the 135 * associated srcu_node and srcu_data structures. The is_static 136 * parameter is passed through to init_srcu_struct_nodes(), and 137 * also tells us that ->sda has already been wired up to srcu_data. 138 */ 139 static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) 140 { 141 mutex_init(&sp->srcu_cb_mutex); 142 mutex_init(&sp->srcu_gp_mutex); 143 sp->srcu_idx = 0; 144 sp->srcu_gp_seq = 0; 145 sp->srcu_barrier_seq = 0; 146 mutex_init(&sp->srcu_barrier_mutex); 147 atomic_set(&sp->srcu_barrier_cpu_cnt, 0); 148 INIT_DELAYED_WORK(&sp->work, process_srcu); 149 if (!is_static) 150 sp->sda = alloc_percpu(struct srcu_data); 151 init_srcu_struct_nodes(sp, is_static); 152 sp->srcu_gp_seq_needed_exp = 0; 153 sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 154 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ 155 return sp->sda ? 0 : -ENOMEM; 156 } 157 158 #ifdef CONFIG_DEBUG_LOCK_ALLOC 159 160 int __init_srcu_struct(struct srcu_struct *sp, const char *name, 161 struct lock_class_key *key) 162 { 163 /* Don't re-initialize a lock while it is held. */ 164 debug_check_no_locks_freed((void *)sp, sizeof(*sp)); 165 lockdep_init_map(&sp->dep_map, name, key, 0); 166 spin_lock_init(&sp->gp_lock); 167 return init_srcu_struct_fields(sp, false); 168 } 169 EXPORT_SYMBOL_GPL(__init_srcu_struct); 170 171 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 172 173 /** 174 * init_srcu_struct - initialize a sleep-RCU structure 175 * @sp: structure to initialize. 176 * 177 * Must invoke this on a given srcu_struct before passing that srcu_struct 178 * to any other function. Each srcu_struct represents a separate domain 179 * of SRCU protection. 180 */ 181 int init_srcu_struct(struct srcu_struct *sp) 182 { 183 spin_lock_init(&sp->gp_lock); 184 return init_srcu_struct_fields(sp, false); 185 } 186 EXPORT_SYMBOL_GPL(init_srcu_struct); 187 188 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 189 190 /* 191 * First-use initialization of statically allocated srcu_struct 192 * structure. Wiring up the combining tree is more than can be 193 * done with compile-time initialization, so this check is added 194 * to each update-side SRCU primitive. Use ->gp_lock, which -is- 195 * compile-time initialized, to resolve races involving multiple 196 * CPUs trying to garner first-use privileges. 197 */ 198 static void check_init_srcu_struct(struct srcu_struct *sp) 199 { 200 unsigned long flags; 201 202 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); 203 /* The smp_load_acquire() pairs with the smp_store_release(). */ 204 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ 205 return; /* Already initialized. */ 206 spin_lock_irqsave(&sp->gp_lock, flags); 207 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { 208 spin_unlock_irqrestore(&sp->gp_lock, flags); 209 return; 210 } 211 init_srcu_struct_fields(sp, true); 212 spin_unlock_irqrestore(&sp->gp_lock, flags); 213 } 214 215 /* 216 * Returns approximate total of the readers' ->srcu_lock_count[] values 217 * for the rank of per-CPU counters specified by idx. 218 */ 219 static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) 220 { 221 int cpu; 222 unsigned long sum = 0; 223 224 for_each_possible_cpu(cpu) { 225 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 226 227 sum += READ_ONCE(cpuc->srcu_lock_count[idx]); 228 } 229 return sum; 230 } 231 232 /* 233 * Returns approximate total of the readers' ->srcu_unlock_count[] values 234 * for the rank of per-CPU counters specified by idx. 235 */ 236 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) 237 { 238 int cpu; 239 unsigned long sum = 0; 240 241 for_each_possible_cpu(cpu) { 242 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 243 244 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); 245 } 246 return sum; 247 } 248 249 /* 250 * Return true if the number of pre-existing readers is determined to 251 * be zero. 252 */ 253 static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) 254 { 255 unsigned long unlocks; 256 257 unlocks = srcu_readers_unlock_idx(sp, idx); 258 259 /* 260 * Make sure that a lock is always counted if the corresponding 261 * unlock is counted. Needs to be a smp_mb() as the read side may 262 * contain a read from a variable that is written to before the 263 * synchronize_srcu() in the write side. In this case smp_mb()s 264 * A and B act like the store buffering pattern. 265 * 266 * This smp_mb() also pairs with smp_mb() C to prevent accesses 267 * after the synchronize_srcu() from being executed before the 268 * grace period ends. 269 */ 270 smp_mb(); /* A */ 271 272 /* 273 * If the locks are the same as the unlocks, then there must have 274 * been no readers on this index at some time in between. This does 275 * not mean that there are no more readers, as one could have read 276 * the current index but not have incremented the lock counter yet. 277 * 278 * Possible bug: There is no guarantee that there haven't been 279 * ULONG_MAX increments of ->srcu_lock_count[] since the unlocks were 280 * counted, meaning that this could return true even if there are 281 * still active readers. Since there are no memory barriers around 282 * srcu_flip(), the CPU is not required to increment ->srcu_idx 283 * before running srcu_readers_unlock_idx(), which means that there 284 * could be an arbitrarily large number of critical sections that 285 * execute after srcu_readers_unlock_idx() but use the old value 286 * of ->srcu_idx. 287 */ 288 return srcu_readers_lock_idx(sp, idx) == unlocks; 289 } 290 291 /** 292 * srcu_readers_active - returns true if there are readers. and false 293 * otherwise 294 * @sp: which srcu_struct to count active readers (holding srcu_read_lock). 295 * 296 * Note that this is not an atomic primitive, and can therefore suffer 297 * severe errors when invoked on an active srcu_struct. That said, it 298 * can be useful as an error check at cleanup time. 299 */ 300 static bool srcu_readers_active(struct srcu_struct *sp) 301 { 302 int cpu; 303 unsigned long sum = 0; 304 305 for_each_possible_cpu(cpu) { 306 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); 307 308 sum += READ_ONCE(cpuc->srcu_lock_count[0]); 309 sum += READ_ONCE(cpuc->srcu_lock_count[1]); 310 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); 311 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); 312 } 313 return sum; 314 } 315 316 #define SRCU_INTERVAL 1 317 318 /* 319 * Return grace-period delay, zero if there are expedited grace 320 * periods pending, SRCU_INTERVAL otherwise. 321 */ 322 static unsigned long srcu_get_delay(struct srcu_struct *sp) 323 { 324 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), 325 READ_ONCE(sp->srcu_gp_seq_needed_exp))) 326 return 0; 327 return SRCU_INTERVAL; 328 } 329 330 /** 331 * cleanup_srcu_struct - deconstruct a sleep-RCU structure 332 * @sp: structure to clean up. 333 * 334 * Must invoke this after you are finished using a given srcu_struct that 335 * was initialized via init_srcu_struct(), else you leak memory. 336 */ 337 void cleanup_srcu_struct(struct srcu_struct *sp) 338 { 339 int cpu; 340 341 if (WARN_ON(!srcu_get_delay(sp))) 342 return; /* Leakage unless caller handles error. */ 343 if (WARN_ON(srcu_readers_active(sp))) 344 return; /* Leakage unless caller handles error. */ 345 flush_delayed_work(&sp->work); 346 for_each_possible_cpu(cpu) 347 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); 348 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || 349 WARN_ON(srcu_readers_active(sp))) { 350 pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); 351 return; /* Caller forgot to stop doing call_srcu()? */ 352 } 353 free_percpu(sp->sda); 354 sp->sda = NULL; 355 } 356 EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 357 358 /* 359 * Counts the new reader in the appropriate per-CPU element of the 360 * srcu_struct. 361 * Returns an index that must be passed to the matching srcu_read_unlock(). 362 */ 363 int __srcu_read_lock(struct srcu_struct *sp) 364 { 365 int idx; 366 367 idx = READ_ONCE(sp->srcu_idx) & 0x1; 368 this_cpu_inc(sp->sda->srcu_lock_count[idx]); 369 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 370 return idx; 371 } 372 EXPORT_SYMBOL_GPL(__srcu_read_lock); 373 374 /* 375 * Removes the count for the old reader from the appropriate per-CPU 376 * element of the srcu_struct. Note that this may well be a different 377 * CPU than that which was incremented by the corresponding srcu_read_lock(). 378 */ 379 void __srcu_read_unlock(struct srcu_struct *sp, int idx) 380 { 381 smp_mb(); /* C */ /* Avoid leaking the critical section. */ 382 this_cpu_inc(sp->sda->srcu_unlock_count[idx]); 383 } 384 EXPORT_SYMBOL_GPL(__srcu_read_unlock); 385 386 /* 387 * We use an adaptive strategy for synchronize_srcu() and especially for 388 * synchronize_srcu_expedited(). We spin for a fixed time period 389 * (defined below) to allow SRCU readers to exit their read-side critical 390 * sections. If there are still some readers after a few microseconds, 391 * we repeatedly block for 1-millisecond time periods. 392 */ 393 #define SRCU_RETRY_CHECK_DELAY 5 394 395 /* 396 * Start an SRCU grace period. 397 */ 398 static void srcu_gp_start(struct srcu_struct *sp) 399 { 400 struct srcu_data *sdp = this_cpu_ptr(sp->sda); 401 int state; 402 403 RCU_LOCKDEP_WARN(!lockdep_is_held(&sp->gp_lock), 404 "Invoked srcu_gp_start() without ->gp_lock!"); 405 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 406 rcu_segcblist_advance(&sdp->srcu_cblist, 407 rcu_seq_current(&sp->srcu_gp_seq)); 408 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 409 rcu_seq_snap(&sp->srcu_gp_seq)); 410 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ 411 rcu_seq_start(&sp->srcu_gp_seq); 412 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 413 WARN_ON_ONCE(state != SRCU_STATE_SCAN1); 414 } 415 416 /* 417 * Track online CPUs to guide callback workqueue placement. 418 */ 419 DEFINE_PER_CPU(bool, srcu_online); 420 421 void srcu_online_cpu(unsigned int cpu) 422 { 423 WRITE_ONCE(per_cpu(srcu_online, cpu), true); 424 } 425 426 void srcu_offline_cpu(unsigned int cpu) 427 { 428 WRITE_ONCE(per_cpu(srcu_online, cpu), false); 429 } 430 431 /* 432 * Place the workqueue handler on the specified CPU if online, otherwise 433 * just run it whereever. This is useful for placing workqueue handlers 434 * that are to invoke the specified CPU's callbacks. 435 */ 436 static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 437 struct delayed_work *dwork, 438 unsigned long delay) 439 { 440 bool ret; 441 442 preempt_disable(); 443 if (READ_ONCE(per_cpu(srcu_online, cpu))) 444 ret = queue_delayed_work_on(cpu, wq, dwork, delay); 445 else 446 ret = queue_delayed_work(wq, dwork, delay); 447 preempt_enable(); 448 return ret; 449 } 450 451 /* 452 * Schedule callback invocation for the specified srcu_data structure, 453 * if possible, on the corresponding CPU. 454 */ 455 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) 456 { 457 srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, 458 &sdp->work, delay); 459 } 460 461 /* 462 * Schedule callback invocation for all srcu_data structures associated 463 * with the specified srcu_node structure that have callbacks for the 464 * just-completed grace period, the one corresponding to idx. If possible, 465 * schedule this invocation on the corresponding CPUs. 466 */ 467 static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, 468 unsigned long mask, unsigned long delay) 469 { 470 int cpu; 471 472 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { 473 if (!(mask & (1 << (cpu - snp->grplo)))) 474 continue; 475 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); 476 } 477 } 478 479 /* 480 * Note the end of an SRCU grace period. Initiates callback invocation 481 * and starts a new grace period if needed. 482 * 483 * The ->srcu_cb_mutex acquisition does not protect any data, but 484 * instead prevents more than one grace period from starting while we 485 * are initiating callback invocation. This allows the ->srcu_have_cbs[] 486 * array to have a finite number of elements. 487 */ 488 static void srcu_gp_end(struct srcu_struct *sp) 489 { 490 unsigned long cbdelay; 491 bool cbs; 492 unsigned long gpseq; 493 int idx; 494 int idxnext; 495 unsigned long mask; 496 struct srcu_node *snp; 497 498 /* Prevent more than one additional grace period. */ 499 mutex_lock(&sp->srcu_cb_mutex); 500 501 /* End the current grace period. */ 502 spin_lock_irq(&sp->gp_lock); 503 idx = rcu_seq_state(sp->srcu_gp_seq); 504 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); 505 cbdelay = srcu_get_delay(sp); 506 sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); 507 rcu_seq_end(&sp->srcu_gp_seq); 508 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 509 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) 510 sp->srcu_gp_seq_needed_exp = gpseq; 511 spin_unlock_irq(&sp->gp_lock); 512 mutex_unlock(&sp->srcu_gp_mutex); 513 /* A new grace period can start at this point. But only one. */ 514 515 /* Initiate callback invocation as needed. */ 516 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); 517 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); 518 rcu_for_each_node_breadth_first(sp, snp) { 519 spin_lock_irq(&snp->lock); 520 cbs = false; 521 if (snp >= sp->level[rcu_num_lvls - 1]) 522 cbs = snp->srcu_have_cbs[idx] == gpseq; 523 snp->srcu_have_cbs[idx] = gpseq; 524 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); 525 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) 526 snp->srcu_gp_seq_needed_exp = gpseq; 527 mask = snp->srcu_data_have_cbs[idx]; 528 snp->srcu_data_have_cbs[idx] = 0; 529 spin_unlock_irq(&snp->lock); 530 if (cbs) { 531 smp_mb(); /* GP end before CB invocation. */ 532 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); 533 } 534 } 535 536 /* Callback initiation done, allow grace periods after next. */ 537 mutex_unlock(&sp->srcu_cb_mutex); 538 539 /* Start a new grace period if needed. */ 540 spin_lock_irq(&sp->gp_lock); 541 gpseq = rcu_seq_current(&sp->srcu_gp_seq); 542 if (!rcu_seq_state(gpseq) && 543 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { 544 srcu_gp_start(sp); 545 spin_unlock_irq(&sp->gp_lock); 546 /* Throttle expedited grace periods: Should be rare! */ 547 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff 548 ? 0 : SRCU_INTERVAL); 549 } else { 550 spin_unlock_irq(&sp->gp_lock); 551 } 552 } 553 554 /* 555 * Funnel-locking scheme to scalably mediate many concurrent expedited 556 * grace-period requests. This function is invoked for the first known 557 * expedited request for a grace period that has already been requested, 558 * but without expediting. To start a completely new grace period, 559 * whether expedited or not, use srcu_funnel_gp_start() instead. 560 */ 561 static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, 562 unsigned long s) 563 { 564 unsigned long flags; 565 566 for (; snp != NULL; snp = snp->srcu_parent) { 567 if (rcu_seq_done(&sp->srcu_gp_seq, s) || 568 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) 569 return; 570 spin_lock_irqsave(&snp->lock, flags); 571 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { 572 spin_unlock_irqrestore(&snp->lock, flags); 573 return; 574 } 575 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); 576 spin_unlock_irqrestore(&snp->lock, flags); 577 } 578 spin_lock_irqsave(&sp->gp_lock, flags); 579 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 580 sp->srcu_gp_seq_needed_exp = s; 581 spin_unlock_irqrestore(&sp->gp_lock, flags); 582 } 583 584 /* 585 * Funnel-locking scheme to scalably mediate many concurrent grace-period 586 * requests. The winner has to do the work of actually starting grace 587 * period s. Losers must either ensure that their desired grace-period 588 * number is recorded on at least their leaf srcu_node structure, or they 589 * must take steps to invoke their own callbacks. 590 */ 591 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, 592 unsigned long s, bool do_norm) 593 { 594 unsigned long flags; 595 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); 596 struct srcu_node *snp = sdp->mynode; 597 unsigned long snp_seq; 598 599 /* Each pass through the loop does one level of the srcu_node tree. */ 600 for (; snp != NULL; snp = snp->srcu_parent) { 601 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) 602 return; /* GP already done and CBs recorded. */ 603 spin_lock_irqsave(&snp->lock, flags); 604 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { 605 snp_seq = snp->srcu_have_cbs[idx]; 606 if (snp == sdp->mynode && snp_seq == s) 607 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 608 spin_unlock_irqrestore(&snp->lock, flags); 609 if (snp == sdp->mynode && snp_seq != s) { 610 smp_mb(); /* CBs after GP! */ 611 srcu_schedule_cbs_sdp(sdp, do_norm 612 ? SRCU_INTERVAL 613 : 0); 614 return; 615 } 616 if (!do_norm) 617 srcu_funnel_exp_start(sp, snp, s); 618 return; 619 } 620 snp->srcu_have_cbs[idx] = s; 621 if (snp == sdp->mynode) 622 snp->srcu_data_have_cbs[idx] |= sdp->grpmask; 623 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) 624 snp->srcu_gp_seq_needed_exp = s; 625 spin_unlock_irqrestore(&snp->lock, flags); 626 } 627 628 /* Top of tree, must ensure the grace period will be started. */ 629 spin_lock_irqsave(&sp->gp_lock, flags); 630 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { 631 /* 632 * Record need for grace period s. Pair with load 633 * acquire setting up for initialization. 634 */ 635 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ 636 } 637 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) 638 sp->srcu_gp_seq_needed_exp = s; 639 640 /* If grace period not already done and none in progress, start it. */ 641 if (!rcu_seq_done(&sp->srcu_gp_seq, s) && 642 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { 643 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); 644 srcu_gp_start(sp); 645 queue_delayed_work(system_power_efficient_wq, &sp->work, 646 srcu_get_delay(sp)); 647 } 648 spin_unlock_irqrestore(&sp->gp_lock, flags); 649 } 650 651 /* 652 * Wait until all readers counted by array index idx complete, but 653 * loop an additional time if there is an expedited grace period pending. 654 * The caller must ensure that ->srcu_idx is not changed while checking. 655 */ 656 static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) 657 { 658 for (;;) { 659 if (srcu_readers_active_idx_check(sp, idx)) 660 return true; 661 if (--trycount + !srcu_get_delay(sp) <= 0) 662 return false; 663 udelay(SRCU_RETRY_CHECK_DELAY); 664 } 665 } 666 667 /* 668 * Increment the ->srcu_idx counter so that future SRCU readers will 669 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows 670 * us to wait for pre-existing readers in a starvation-free manner. 671 */ 672 static void srcu_flip(struct srcu_struct *sp) 673 { 674 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); 675 676 /* 677 * Ensure that if the updater misses an __srcu_read_unlock() 678 * increment, that task's next __srcu_read_lock() will see the 679 * above counter update. Note that both this memory barrier 680 * and the one in srcu_readers_active_idx_check() provide the 681 * guarantee for __srcu_read_lock(). 682 */ 683 smp_mb(); /* D */ /* Pairs with C. */ 684 } 685 686 /* 687 * If SRCU is likely idle, return true, otherwise return false. 688 * 689 * Note that it is OK for several current from-idle requests for a new 690 * grace period from idle to specify expediting because they will all end 691 * up requesting the same grace period anyhow. So no loss. 692 * 693 * Note also that if any CPU (including the current one) is still invoking 694 * callbacks, this function will nevertheless say "idle". This is not 695 * ideal, but the overhead of checking all CPUs' callback lists is even 696 * less ideal, especially on large systems. Furthermore, the wakeup 697 * can happen before the callback is fully removed, so we have no choice 698 * but to accept this type of error. 699 * 700 * This function is also subject to counter-wrap errors, but let's face 701 * it, if this function was preempted for enough time for the counters 702 * to wrap, it really doesn't matter whether or not we expedite the grace 703 * period. The extra overhead of a needlessly expedited grace period is 704 * negligible when amoritized over that time period, and the extra latency 705 * of a needlessly non-expedited grace period is similarly negligible. 706 */ 707 static bool srcu_might_be_idle(struct srcu_struct *sp) 708 { 709 unsigned long curseq; 710 unsigned long flags; 711 struct srcu_data *sdp; 712 unsigned long t; 713 714 /* If the local srcu_data structure has callbacks, not idle. */ 715 local_irq_save(flags); 716 sdp = this_cpu_ptr(sp->sda); 717 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { 718 local_irq_restore(flags); 719 return false; /* Callbacks already present, so not idle. */ 720 } 721 local_irq_restore(flags); 722 723 /* 724 * No local callbacks, so probabalistically probe global state. 725 * Exact information would require acquiring locks, which would 726 * kill scalability, hence the probabalistic nature of the probe. 727 */ 728 729 /* First, see if enough time has passed since the last GP. */ 730 t = ktime_get_mono_fast_ns(); 731 if (exp_holdoff == 0 || 732 time_in_range_open(t, sp->srcu_last_gp_end, 733 sp->srcu_last_gp_end + exp_holdoff)) 734 return false; /* Too soon after last GP. */ 735 736 /* Next, check for probable idleness. */ 737 curseq = rcu_seq_current(&sp->srcu_gp_seq); 738 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ 739 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) 740 return false; /* Grace period in progress, so not idle. */ 741 smp_mb(); /* Order ->srcu_gp_seq with prior access. */ 742 if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) 743 return false; /* GP # changed, so not idle. */ 744 return true; /* With reasonable probability, idle! */ 745 } 746 747 /* 748 * Enqueue an SRCU callback on the srcu_data structure associated with 749 * the current CPU and the specified srcu_struct structure, initiating 750 * grace-period processing if it is not already running. 751 * 752 * Note that all CPUs must agree that the grace period extended beyond 753 * all pre-existing SRCU read-side critical section. On systems with 754 * more than one CPU, this means that when "func()" is invoked, each CPU 755 * is guaranteed to have executed a full memory barrier since the end of 756 * its last corresponding SRCU read-side critical section whose beginning 757 * preceded the call to call_rcu(). It also means that each CPU executing 758 * an SRCU read-side critical section that continues beyond the start of 759 * "func()" must have executed a memory barrier after the call_rcu() 760 * but before the beginning of that SRCU read-side critical section. 761 * Note that these guarantees include CPUs that are offline, idle, or 762 * executing in user mode, as well as CPUs that are executing in the kernel. 763 * 764 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the 765 * resulting SRCU callback function "func()", then both CPU A and CPU 766 * B are guaranteed to execute a full memory barrier during the time 767 * interval between the call to call_rcu() and the invocation of "func()". 768 * This guarantee applies even if CPU A and CPU B are the same CPU (but 769 * again only if the system has more than one CPU). 770 * 771 * Of course, these guarantees apply only for invocations of call_srcu(), 772 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same 773 * srcu_struct structure. 774 */ 775 void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, 776 rcu_callback_t func, bool do_norm) 777 { 778 unsigned long flags; 779 bool needexp = false; 780 bool needgp = false; 781 unsigned long s; 782 struct srcu_data *sdp; 783 784 check_init_srcu_struct(sp); 785 rhp->func = func; 786 local_irq_save(flags); 787 sdp = this_cpu_ptr(sp->sda); 788 spin_lock(&sdp->lock); 789 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); 790 rcu_segcblist_advance(&sdp->srcu_cblist, 791 rcu_seq_current(&sp->srcu_gp_seq)); 792 s = rcu_seq_snap(&sp->srcu_gp_seq); 793 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 794 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 795 sdp->srcu_gp_seq_needed = s; 796 needgp = true; 797 } 798 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 799 sdp->srcu_gp_seq_needed_exp = s; 800 needexp = true; 801 } 802 spin_unlock_irqrestore(&sdp->lock, flags); 803 if (needgp) 804 srcu_funnel_gp_start(sp, sdp, s, do_norm); 805 else if (needexp) 806 srcu_funnel_exp_start(sp, sdp->mynode, s); 807 } 808 809 void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, 810 rcu_callback_t func) 811 { 812 __call_srcu(sp, rhp, func, true); 813 } 814 EXPORT_SYMBOL_GPL(call_srcu); 815 816 /* 817 * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). 818 */ 819 static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) 820 { 821 struct rcu_synchronize rcu; 822 823 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || 824 lock_is_held(&rcu_bh_lock_map) || 825 lock_is_held(&rcu_lock_map) || 826 lock_is_held(&rcu_sched_lock_map), 827 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); 828 829 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) 830 return; 831 might_sleep(); 832 check_init_srcu_struct(sp); 833 init_completion(&rcu.completion); 834 init_rcu_head_on_stack(&rcu.head); 835 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); 836 wait_for_completion(&rcu.completion); 837 destroy_rcu_head_on_stack(&rcu.head); 838 } 839 840 /** 841 * synchronize_srcu_expedited - Brute-force SRCU grace period 842 * @sp: srcu_struct with which to synchronize. 843 * 844 * Wait for an SRCU grace period to elapse, but be more aggressive about 845 * spinning rather than blocking when waiting. 846 * 847 * Note that synchronize_srcu_expedited() has the same deadlock and 848 * memory-ordering properties as does synchronize_srcu(). 849 */ 850 void synchronize_srcu_expedited(struct srcu_struct *sp) 851 { 852 __synchronize_srcu(sp, rcu_gp_is_normal()); 853 } 854 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); 855 856 /** 857 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 858 * @sp: srcu_struct with which to synchronize. 859 * 860 * Wait for the count to drain to zero of both indexes. To avoid the 861 * possible starvation of synchronize_srcu(), it waits for the count of 862 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, 863 * and then flip the srcu_idx and wait for the count of the other index. 864 * 865 * Can block; must be called from process context. 866 * 867 * Note that it is illegal to call synchronize_srcu() from the corresponding 868 * SRCU read-side critical section; doing so will result in deadlock. 869 * However, it is perfectly legal to call synchronize_srcu() on one 870 * srcu_struct from some other srcu_struct's read-side critical section, 871 * as long as the resulting graph of srcu_structs is acyclic. 872 * 873 * There are memory-ordering constraints implied by synchronize_srcu(). 874 * On systems with more than one CPU, when synchronize_srcu() returns, 875 * each CPU is guaranteed to have executed a full memory barrier since 876 * the end of its last corresponding SRCU-sched read-side critical section 877 * whose beginning preceded the call to synchronize_srcu(). In addition, 878 * each CPU having an SRCU read-side critical section that extends beyond 879 * the return from synchronize_srcu() is guaranteed to have executed a 880 * full memory barrier after the beginning of synchronize_srcu() and before 881 * the beginning of that SRCU read-side critical section. Note that these 882 * guarantees include CPUs that are offline, idle, or executing in user mode, 883 * as well as CPUs that are executing in the kernel. 884 * 885 * Furthermore, if CPU A invoked synchronize_srcu(), which returned 886 * to its caller on CPU B, then both CPU A and CPU B are guaranteed 887 * to have executed a full memory barrier during the execution of 888 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B 889 * are the same CPU, but again only if the system has more than one CPU. 890 * 891 * Of course, these memory-ordering guarantees apply only when 892 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are 893 * passed the same srcu_struct structure. 894 * 895 * If SRCU is likely idle, expedite the first request. This semantic 896 * was provided by Classic SRCU, and is relied upon by its users, so TREE 897 * SRCU must also provide it. Note that detecting idleness is heuristic 898 * and subject to both false positives and negatives. 899 */ 900 void synchronize_srcu(struct srcu_struct *sp) 901 { 902 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) 903 synchronize_srcu_expedited(sp); 904 else 905 __synchronize_srcu(sp, true); 906 } 907 EXPORT_SYMBOL_GPL(synchronize_srcu); 908 909 /* 910 * Callback function for srcu_barrier() use. 911 */ 912 static void srcu_barrier_cb(struct rcu_head *rhp) 913 { 914 struct srcu_data *sdp; 915 struct srcu_struct *sp; 916 917 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); 918 sp = sdp->sp; 919 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) 920 complete(&sp->srcu_barrier_completion); 921 } 922 923 /** 924 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. 925 * @sp: srcu_struct on which to wait for in-flight callbacks. 926 */ 927 void srcu_barrier(struct srcu_struct *sp) 928 { 929 int cpu; 930 struct srcu_data *sdp; 931 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); 932 933 check_init_srcu_struct(sp); 934 mutex_lock(&sp->srcu_barrier_mutex); 935 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { 936 smp_mb(); /* Force ordering following return. */ 937 mutex_unlock(&sp->srcu_barrier_mutex); 938 return; /* Someone else did our work for us. */ 939 } 940 rcu_seq_start(&sp->srcu_barrier_seq); 941 init_completion(&sp->srcu_barrier_completion); 942 943 /* Initial count prevents reaching zero until all CBs are posted. */ 944 atomic_set(&sp->srcu_barrier_cpu_cnt, 1); 945 946 /* 947 * Each pass through this loop enqueues a callback, but only 948 * on CPUs already having callbacks enqueued. Note that if 949 * a CPU already has callbacks enqueue, it must have already 950 * registered the need for a future grace period, so all we 951 * need do is enqueue a callback that will use the same 952 * grace period as the last callback already in the queue. 953 */ 954 for_each_possible_cpu(cpu) { 955 sdp = per_cpu_ptr(sp->sda, cpu); 956 spin_lock_irq(&sdp->lock); 957 atomic_inc(&sp->srcu_barrier_cpu_cnt); 958 sdp->srcu_barrier_head.func = srcu_barrier_cb; 959 if (!rcu_segcblist_entrain(&sdp->srcu_cblist, 960 &sdp->srcu_barrier_head, 0)) 961 atomic_dec(&sp->srcu_barrier_cpu_cnt); 962 spin_unlock_irq(&sdp->lock); 963 } 964 965 /* Remove the initial count, at which point reaching zero can happen. */ 966 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) 967 complete(&sp->srcu_barrier_completion); 968 wait_for_completion(&sp->srcu_barrier_completion); 969 970 rcu_seq_end(&sp->srcu_barrier_seq); 971 mutex_unlock(&sp->srcu_barrier_mutex); 972 } 973 EXPORT_SYMBOL_GPL(srcu_barrier); 974 975 /** 976 * srcu_batches_completed - return batches completed. 977 * @sp: srcu_struct on which to report batch completion. 978 * 979 * Report the number of batches, correlated with, but not necessarily 980 * precisely the same as, the number of grace periods that have elapsed. 981 */ 982 unsigned long srcu_batches_completed(struct srcu_struct *sp) 983 { 984 return sp->srcu_idx; 985 } 986 EXPORT_SYMBOL_GPL(srcu_batches_completed); 987 988 /* 989 * Core SRCU state machine. Push state bits of ->srcu_gp_seq 990 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has 991 * completed in that state. 992 */ 993 static void srcu_advance_state(struct srcu_struct *sp) 994 { 995 int idx; 996 997 mutex_lock(&sp->srcu_gp_mutex); 998 999 /* 1000 * Because readers might be delayed for an extended period after 1001 * fetching ->srcu_idx for their index, at any point in time there 1002 * might well be readers using both idx=0 and idx=1. We therefore 1003 * need to wait for readers to clear from both index values before 1004 * invoking a callback. 1005 * 1006 * The load-acquire ensures that we see the accesses performed 1007 * by the prior grace period. 1008 */ 1009 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ 1010 if (idx == SRCU_STATE_IDLE) { 1011 spin_lock_irq(&sp->gp_lock); 1012 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1013 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); 1014 spin_unlock_irq(&sp->gp_lock); 1015 mutex_unlock(&sp->srcu_gp_mutex); 1016 return; 1017 } 1018 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); 1019 if (idx == SRCU_STATE_IDLE) 1020 srcu_gp_start(sp); 1021 spin_unlock_irq(&sp->gp_lock); 1022 if (idx != SRCU_STATE_IDLE) { 1023 mutex_unlock(&sp->srcu_gp_mutex); 1024 return; /* Someone else started the grace period. */ 1025 } 1026 } 1027 1028 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { 1029 idx = 1 ^ (sp->srcu_idx & 1); 1030 if (!try_check_zero(sp, idx, 1)) { 1031 mutex_unlock(&sp->srcu_gp_mutex); 1032 return; /* readers present, retry later. */ 1033 } 1034 srcu_flip(sp); 1035 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); 1036 } 1037 1038 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { 1039 1040 /* 1041 * SRCU read-side critical sections are normally short, 1042 * so check at least twice in quick succession after a flip. 1043 */ 1044 idx = 1 ^ (sp->srcu_idx & 1); 1045 if (!try_check_zero(sp, idx, 2)) { 1046 mutex_unlock(&sp->srcu_gp_mutex); 1047 return; /* readers present, retry later. */ 1048 } 1049 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ 1050 } 1051 } 1052 1053 /* 1054 * Invoke a limited number of SRCU callbacks that have passed through 1055 * their grace period. If there are more to do, SRCU will reschedule 1056 * the workqueue. Note that needed memory barriers have been executed 1057 * in this task's context by srcu_readers_active_idx_check(). 1058 */ 1059 static void srcu_invoke_callbacks(struct work_struct *work) 1060 { 1061 bool more; 1062 struct rcu_cblist ready_cbs; 1063 struct rcu_head *rhp; 1064 struct srcu_data *sdp; 1065 struct srcu_struct *sp; 1066 1067 sdp = container_of(work, struct srcu_data, work.work); 1068 sp = sdp->sp; 1069 rcu_cblist_init(&ready_cbs); 1070 spin_lock_irq(&sdp->lock); 1071 smp_mb(); /* Old grace periods before callback invocation! */ 1072 rcu_segcblist_advance(&sdp->srcu_cblist, 1073 rcu_seq_current(&sp->srcu_gp_seq)); 1074 if (sdp->srcu_cblist_invoking || 1075 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { 1076 spin_unlock_irq(&sdp->lock); 1077 return; /* Someone else on the job or nothing to do. */ 1078 } 1079 1080 /* We are on the job! Extract and invoke ready callbacks. */ 1081 sdp->srcu_cblist_invoking = true; 1082 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); 1083 spin_unlock_irq(&sdp->lock); 1084 rhp = rcu_cblist_dequeue(&ready_cbs); 1085 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { 1086 local_bh_disable(); 1087 rhp->func(rhp); 1088 local_bh_enable(); 1089 } 1090 1091 /* 1092 * Update counts, accelerate new callbacks, and if needed, 1093 * schedule another round of callback invocation. 1094 */ 1095 spin_lock_irq(&sdp->lock); 1096 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); 1097 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, 1098 rcu_seq_snap(&sp->srcu_gp_seq)); 1099 sdp->srcu_cblist_invoking = false; 1100 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); 1101 spin_unlock_irq(&sdp->lock); 1102 if (more) 1103 srcu_schedule_cbs_sdp(sdp, 0); 1104 } 1105 1106 /* 1107 * Finished one round of SRCU grace period. Start another if there are 1108 * more SRCU callbacks queued, otherwise put SRCU into not-running state. 1109 */ 1110 static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) 1111 { 1112 bool pushgp = true; 1113 1114 spin_lock_irq(&sp->gp_lock); 1115 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { 1116 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { 1117 /* All requests fulfilled, time to go idle. */ 1118 pushgp = false; 1119 } 1120 } else if (!rcu_seq_state(sp->srcu_gp_seq)) { 1121 /* Outstanding request and no GP. Start one. */ 1122 srcu_gp_start(sp); 1123 } 1124 spin_unlock_irq(&sp->gp_lock); 1125 1126 if (pushgp) 1127 queue_delayed_work(system_power_efficient_wq, &sp->work, delay); 1128 } 1129 1130 /* 1131 * This is the work-queue function that handles SRCU grace periods. 1132 */ 1133 void process_srcu(struct work_struct *work) 1134 { 1135 struct srcu_struct *sp; 1136 1137 sp = container_of(work, struct srcu_struct, work.work); 1138 1139 srcu_advance_state(sp); 1140 srcu_reschedule(sp, srcu_get_delay(sp)); 1141 } 1142 EXPORT_SYMBOL_GPL(process_srcu); 1143 1144 void srcutorture_get_gp_data(enum rcutorture_type test_type, 1145 struct srcu_struct *sp, int *flags, 1146 unsigned long *gpnum, unsigned long *completed) 1147 { 1148 if (test_type != SRCU_FLAVOR) 1149 return; 1150 *flags = 0; 1151 *completed = rcu_seq_ctr(sp->srcu_gp_seq); 1152 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); 1153 } 1154 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); 1155