Lines Matching +full:current +full:- +full:boost +full:- +full:limit

1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
23 * non-preemptible reads are also safe. NOCB kthreads and in rcu_rdp_is_offloaded()
37 return rcu_segcblist_is_offloaded(&rdp->cblist); in rcu_rdp_is_offloaded()
50 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n", in rcu_bootup_announce_oddness()
57 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n"); in rcu_bootup_announce_oddness()
59 pr_info("\tFour(or more)-level hierarchy is enabled.\n"); in rcu_bootup_announce_oddness()
61 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n", in rcu_bootup_announce_oddness()
64 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", in rcu_bootup_announce_oddness()
73 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit); in rcu_bootup_announce_oddness()
75 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark); in rcu_bootup_announce_oddness()
77 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark); in rcu_bootup_announce_oddness()
79 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld); in rcu_bootup_announce_oddness()
81 …pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs… in rcu_bootup_announce_oddness()
83 …pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next… in rcu_bootup_announce_oddness()
85 …pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sch… in rcu_bootup_announce_oddness()
87 pr_info("\tKick kthreads if too-long grace period.\n"); in rcu_bootup_announce_oddness()
89 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n"); in rcu_bootup_announce_oddness()
91 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay); in rcu_bootup_announce_oddness()
124 * Queues a task preempted within an RCU-preempt read-side critical
125 * section into the appropriate location within the ->blkd_tasks list,
127 * periods. The ->gp_tasks pointer indicates which element the normal
128 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
131 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
146 * their RCU read-side critical sections. At that point, the ->gp_tasks
147 * pointer will equal the ->exp_tasks pointer, at which point the end of
152 __releases(rnp->lock) /* But leaves rrupts disabled. */ in rcu_preempt_ctxt_queue()
154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) + in rcu_preempt_ctxt_queue()
155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) + in rcu_preempt_ctxt_queue()
156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) + in rcu_preempt_ctxt_queue()
157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0); in rcu_preempt_ctxt_queue()
158 struct task_struct *t = current; in rcu_preempt_ctxt_queue()
161 WARN_ON_ONCE(rdp->mynode != rnp); in rcu_preempt_ctxt_queue()
164 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask & in rcu_preempt_ctxt_queue()
165 rdp->grpmask); in rcu_preempt_ctxt_queue()
169 * this could be an if-statement. In practice, when I tried in rcu_preempt_ctxt_queue()
181 * GP but not blocking the already-waiting expedited GP. in rcu_preempt_ctxt_queue()
183 * blocking the already-waiting GPs. in rcu_preempt_ctxt_queue()
185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
216 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
227 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
239 * block either grace period, update the ->gp_tasks and/or in rcu_preempt_ctxt_queue()
240 * ->exp_tasks pointers, respectively, to reference the newly in rcu_preempt_ctxt_queue()
243 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) { in rcu_preempt_ctxt_queue()
244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
245 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq); in rcu_preempt_ctxt_queue()
247 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD)) in rcu_preempt_ctxt_queue()
248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
250 !(rnp->qsmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
252 !(rnp->expmask & rdp->grpmask)); in rcu_preempt_ctxt_queue()
261 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change. in rcu_preempt_ctxt_queue()
263 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp) in rcu_preempt_ctxt_queue()
266 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp); in rcu_preempt_ctxt_queue()
270 * Record a preemptible-RCU quiescent state for the specified CPU.
272 * on the CPU is in a quiescent state: Instead, it means that the current
273 * grace period need not wait on any RCU read-side critical section that
274 * starts later on this CPU. It also means that if the current task is
275 * in an RCU read-side critical section, it has already added itself to
276 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
277 * current task, there might be any number of other tasks blocked while
278 * in an RCU read-side critical section.
280 * Unlike non-preemptible-RCU, quiescent state reports for expedited
295 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false); in rcu_qs()
300 * We have entered the scheduler, and the current task might soon be
301 * context-switched away from. If this task is in an RCU read-side
305 * RCU read-side critical section. Therefore, the current grace period
307 * predating the current grace period drain, in other words, until
308 * rnp->gp_tasks becomes NULL.
314 struct task_struct *t = current; in rcu_note_context_switch()
320 …WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side crit… in rcu_note_context_switch()
322 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch()
324 /* Possibly blocking in an RCU read-side critical section. */ in rcu_note_context_switch()
325 rnp = rdp->mynode; in rcu_note_context_switch()
327 t->rcu_read_unlock_special.b.blocked = true; in rcu_note_context_switch()
328 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
336 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_note_context_switch()
338 t->pid, in rcu_note_context_switch()
339 (rnp->qsmask & rdp->grpmask) in rcu_note_context_switch()
340 ? rnp->gp_seq in rcu_note_context_switch()
341 : rcu_seq_snap(&rnp->gp_seq)); in rcu_note_context_switch()
348 * Either we were not in an RCU read-side critical section to in rcu_note_context_switch()
351 * for this CPU. Again, if we were in an RCU read-side critical in rcu_note_context_switch()
352 * section, and if that critical section was blocking the current in rcu_note_context_switch()
354 * means that we continue to block the current grace period. in rcu_note_context_switch()
357 if (rdp->cpu_no_qs.b.exp) in rcu_note_context_switch()
359 rcu_tasks_qs(current, preempt); in rcu_note_context_switch()
365 * Check for preempted RCU readers blocking the current grace period
367 * answer, it must hold the rcu_node's ->lock.
371 return READ_ONCE(rnp->gp_tasks) != NULL; in rcu_preempt_blocked_readers_cgp()
374 /* limit value for ->rcu_read_lock_nesting. */
379 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1); in rcu_preempt_read_enter()
384 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1; in rcu_preempt_read_exit()
386 WRITE_ONCE(current->rcu_read_lock_nesting, ret); in rcu_preempt_read_exit()
392 WRITE_ONCE(current->rcu_read_lock_nesting, val); in rcu_preempt_depth_set()
397 * Just increment ->rcu_read_lock_nesting, shared state will be updated
406 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true); in __rcu_read_lock()
413 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
414 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
416 * in an RCU read-side critical section and other special cases.
420 struct task_struct *t = current; in __rcu_read_unlock()
424 barrier(); // critical-section exit before .s check. in __rcu_read_unlock()
425 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock()
437 * Advance a ->blkd_tasks-list pointer to the next entry, instead
445 np = t->rcu_node_entry.next; in rcu_next_node_entry()
446 if (np == &rnp->blkd_tasks) in rcu_next_node_entry()
453 * preempted within an RCU read-side critical section.
457 return !list_empty(&rnp->blkd_tasks); in rcu_preempt_has_tasks()
480 * t->rcu_read_unlock_special cannot change. in rcu_preempt_deferred_qs_irqrestore()
482 special = t->rcu_read_unlock_special; in rcu_preempt_deferred_qs_irqrestore()
484 if (!special.s && !rdp->cpu_no_qs.b.exp) { in rcu_preempt_deferred_qs_irqrestore()
488 t->rcu_read_unlock_special.s = 0; in rcu_preempt_deferred_qs_irqrestore()
491 rdp->cpu_no_qs.b.norm = false; in rcu_preempt_deferred_qs_irqrestore()
503 * blocked-tasks list below. in rcu_preempt_deferred_qs_irqrestore()
505 if (rdp->cpu_no_qs.b.exp) in rcu_preempt_deferred_qs_irqrestore()
508 /* Clean up if blocked during RCU read-side critical section. */ in rcu_preempt_deferred_qs_irqrestore()
517 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
519 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
522 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq && in rcu_preempt_deferred_qs_irqrestore()
523 (!empty_norm || rnp->qsmask)); in rcu_preempt_deferred_qs_irqrestore()
525 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ in rcu_preempt_deferred_qs_irqrestore()
527 list_del_init(&t->rcu_node_entry); in rcu_preempt_deferred_qs_irqrestore()
528 t->rcu_blocked_node = NULL; in rcu_preempt_deferred_qs_irqrestore()
530 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
531 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
532 WRITE_ONCE(rnp->gp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
533 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
534 WRITE_ONCE(rnp->exp_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
536 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */ in rcu_preempt_deferred_qs_irqrestore()
537 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore()
538 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
539 WRITE_ONCE(rnp->boost_tasks, np); in rcu_preempt_deferred_qs_irqrestore()
543 * If this was the last task on the current list, and if in rcu_preempt_deferred_qs_irqrestore()
545 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock, in rcu_preempt_deferred_qs_irqrestore()
551 rnp->gp_seq, in rcu_preempt_deferred_qs_irqrestore()
552 0, rnp->qsmask, in rcu_preempt_deferred_qs_irqrestore()
553 rnp->level, in rcu_preempt_deferred_qs_irqrestore()
554 rnp->grplo, in rcu_preempt_deferred_qs_irqrestore()
555 rnp->grphi, in rcu_preempt_deferred_qs_irqrestore()
556 !!rnp->gp_tasks); in rcu_preempt_deferred_qs_irqrestore()
571 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex); in rcu_preempt_deferred_qs_irqrestore()
578 * Is a deferred quiescent-state pending, and are we also not in
579 * an RCU read-side critical section? It is the caller's responsibility
589 READ_ONCE(t->rcu_read_unlock_special.s)) && in rcu_preempt_need_deferred_qs()
596 * not being in an RCU read-side critical section. The caller must
611 * Minimal handler to give the scheduler a chance to re-evaluate.
618 rdp->defer_qs_iw_pending = false; in rcu_preempt_deferred_qs_handler()
624 * read-side critical section.
642 struct rcu_node *rnp = rdp->mynode; in rcu_read_unlock_special()
644 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) || in rcu_read_unlock_special()
645 (rdp->grpmask & READ_ONCE(rnp->expmask)) || in rcu_read_unlock_special()
647 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) || in rcu_read_unlock_special()
649 t->rcu_blocked_node); in rcu_read_unlock_special()
661 set_tsk_need_resched(current); in rcu_read_unlock_special()
664 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) { in rcu_read_unlock_special()
665 // Get scheduler to re-evaluate and call hooks. in rcu_read_unlock_special()
669 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD( in rcu_read_unlock_special()
672 init_irq_work(&rdp->defer_qs_iw, in rcu_read_unlock_special()
674 rdp->defer_qs_iw_pending = true; in rcu_read_unlock_special()
675 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu); in rcu_read_unlock_special()
688 * invoked -before- updating this rnp's ->gp_seq.
691 * block the newly created grace period, so set up ->gp_tasks accordingly.
702 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) { in rcu_preempt_check_blocked_tasks()
703 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next); in rcu_preempt_check_blocked_tasks()
704 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
706 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"), in rcu_preempt_check_blocked_tasks()
707 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
709 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
713 * Check for a quiescent state from the current CPU, including voluntary
717 * related to the current CPU, not to those related to tasks.
721 struct task_struct *t = current; in rcu_flavor_sched_clock_irq()
743 !t->rcu_read_unlock_special.b.need_qs && in rcu_flavor_sched_clock_irq()
745 t->rcu_read_unlock_special.b.need_qs = true; in rcu_flavor_sched_clock_irq()
749 * Check for a task exiting while in a preemptible-RCU read-side
758 struct task_struct *t = current; in exit_rcu()
760 if (unlikely(!list_empty(&current->rcu_node_entry))) { in exit_rcu()
763 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); in exit_rcu()
770 rcu_preempt_deferred_qs(current); in exit_rcu()
774 * Dump the blocked-tasks state, but limit the list dump to the
787 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n", in dump_blkd_tasks()
788 __func__, rnp->grplo, rnp->grphi, rnp->level, in dump_blkd_tasks()
789 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs); in dump_blkd_tasks()
790 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent) in dump_blkd_tasks()
791 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n", in dump_blkd_tasks()
792 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext); in dump_blkd_tasks()
793 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n", in dump_blkd_tasks()
794 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks), in dump_blkd_tasks()
795 READ_ONCE(rnp->exp_tasks)); in dump_blkd_tasks()
796 pr_info("%s: ->blkd_tasks", __func__); in dump_blkd_tasks()
798 list_for_each(lhp, &rnp->blkd_tasks) { in dump_blkd_tasks()
804 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) { in dump_blkd_tasks()
808 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags, in dump_blkd_tasks()
809 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags); in dump_blkd_tasks()
827 rdp->cpu_no_qs.b.norm = false; in rcu_read_unlock_strict()
862 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
863 * dyntick-idle quiescent state visible to other CPUs, which will in
904 rcu_tasks_qs(current, preempt); in rcu_note_context_switch()
937 // non-preemptible kernels, there can be no context switches within RCU
938 // read-side critical sections, which in turn means that the leaf rcu_node
939 // structure's blocked-tasks list is always empty. is therefore no need to
946 if (READ_ONCE(rdp->cpu_no_qs.b.exp)) in rcu_preempt_deferred_qs()
957 WARN_ON_ONCE(rnp->qsmask); in rcu_preempt_check_blocked_tasks()
961 * Check to see if this CPU is in a non-context-switch quiescent state,
975 * references only CPU-local variables that other CPUs in rcu_flavor_sched_clock_irq()
985 * while in preemptible RCU read-side critical sections.
992 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
997 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks)); in dump_blkd_tasks()
1012 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); in rcu_cpu_kthread_setup()
1015 WRITE_ONCE(rdp->rcuc_activity, jiffies); in rcu_cpu_kthread_setup()
1021 return rdp->nocb_cb_kthread == current; in rcu_is_callbacks_nocb_kthread()
1028 * Is the current CPU running the RCU-callbacks kthread?
1033 return rdp->rcu_cpu_kthread_task == current || in rcu_is_callbacks_kthread()
1040 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1041 * or ->boost_tasks, advancing the pointer to the next task in the
1042 * ->blkd_tasks list.
1053 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
1054 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1055 return 0; /* Nothing left to boost. */ in rcu_boost()
1061 * might exit their RCU read-side critical sections on their own. in rcu_boost()
1063 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) { in rcu_boost()
1069 * Preferentially boost tasks blocking expedited grace periods. in rcu_boost()
1071 * expedited grace period must boost all blocked tasks, including in rcu_boost()
1072 * those blocking the pre-existing normal grace period. in rcu_boost()
1074 if (rnp->exp_tasks != NULL) in rcu_boost()
1075 tb = rnp->exp_tasks; in rcu_boost()
1077 tb = rnp->boost_tasks; in rcu_boost()
1080 * We boost task t by manufacturing an rt_mutex that appears to in rcu_boost()
1083 * exits its outermost RCU read-side critical section. Then in rcu_boost()
1084 * simply acquiring this artificial rt_mutex will boost task in rcu_boost()
1087 * Note that task t must acquire rnp->lock to remove itself from in rcu_boost()
1088 * the ->blkd_tasks list, which it will do from exit() if from in rcu_boost()
1090 * stay around at least until we drop rnp->lock. Note that in rcu_boost()
1091 * rnp->lock also resolves races between our priority boosting in rcu_boost()
1092 * and task t's exiting its outermost RCU read-side critical in rcu_boost()
1096 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
1099 rt_mutex_lock(&rnp->boost_mtx); in rcu_boost()
1100 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ in rcu_boost()
1101 rnp->n_boosts++; in rcu_boost()
1103 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1104 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1108 * Priority-boosting kthread, one per leaf rcu_node.
1116 trace_rcu_utilization(TPS("Start boost kthread@init")); in rcu_boost_kthread()
1118 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING); in rcu_boost_kthread()
1119 trace_rcu_utilization(TPS("End boost kthread@rcu_wait")); in rcu_boost_kthread()
1120 rcu_wait(READ_ONCE(rnp->boost_tasks) || in rcu_boost_kthread()
1121 READ_ONCE(rnp->exp_tasks)); in rcu_boost_kthread()
1122 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait")); in rcu_boost_kthread()
1123 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING); in rcu_boost_kthread()
1130 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING); in rcu_boost_kthread()
1131 trace_rcu_utilization(TPS("End boost kthread@rcu_yield")); in rcu_boost_kthread()
1133 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield")); in rcu_boost_kthread()
1138 trace_rcu_utilization(TPS("End boost kthread@notreached")); in rcu_boost_kthread()
1144 * blocking the current grace period, and, if so, tell the per-rcu_node
1146 * period in progress, it is always time to boost.
1148 * The caller must hold rnp->lock, which this function releases.
1149 * The ->boost_kthread_task is immortal, so we don't need to worry
1153 __releases(rnp->lock) in rcu_initiate_boost()
1156 if (!rnp->boost_kthread_task || in rcu_initiate_boost()
1157 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) { in rcu_initiate_boost()
1161 if (rnp->exp_tasks != NULL || in rcu_initiate_boost()
1162 (rnp->gp_tasks != NULL && in rcu_initiate_boost()
1163 rnp->boost_tasks == NULL && in rcu_initiate_boost()
1164 rnp->qsmask == 0 && in rcu_initiate_boost()
1165 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld || in rcu_initiate_boost()
1167 if (rnp->exp_tasks == NULL) in rcu_initiate_boost()
1168 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks); in rcu_initiate_boost()
1170 rcu_wake_cond(rnp->boost_kthread_task, in rcu_initiate_boost()
1171 READ_ONCE(rnp->boost_kthread_status)); in rcu_initiate_boost()
1180 * Do priority-boost accounting for the start of a new grace period.
1184 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES; in rcu_preempt_boost_start_gp()
1188 * Create an RCU-boost kthread for the specified node if one does not
1194 int rnp_index = rnp - rcu_get_root(); in rcu_spawn_one_boost_kthread()
1198 mutex_lock(&rnp->boost_kthread_mutex); in rcu_spawn_one_boost_kthread()
1199 if (rnp->boost_kthread_task || !rcu_scheduler_fully_active) in rcu_spawn_one_boost_kthread()
1208 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1215 mutex_unlock(&rnp->boost_kthread_mutex); in rcu_spawn_one_boost_kthread()
1219 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1221 * held, so the value of rnp->qsmaskinit will be stable.
1223 * We don't include outgoingcpu in the affinity set, use -1 if there is
1227 * Any future concurrent calls are serialized via ->boost_kthread_mutex.
1231 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity()
1240 mutex_lock(&rnp->boost_kthread_mutex); in rcu_boost_kthread_setaffinity()
1253 mutex_unlock(&rnp->boost_kthread_mutex); in rcu_boost_kthread_setaffinity()
1260 __releases(rnp->lock) in rcu_initiate_boost()
1281 * grace-period kthread will do force_quiescent_state() processing?
1300 * Bind the RCU grace-period kthreads to the housekeeping CPU.
1306 housekeeping_affine(current, HK_TYPE_RCU); in rcu_bind_gp_kthread()