Lines Matching refs:t

106 static void rcu_read_unlock_special(struct task_struct *t);
158 struct task_struct *t = current; in rcu_preempt_ctxt_queue() local
185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
216 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
227 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
314 struct task_struct *t = current; in rcu_note_context_switch() local
322 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch()
327 t->rcu_read_unlock_special.b.blocked = true; in rcu_note_context_switch()
328 t->rcu_blocked_node = rnp; in rcu_note_context_switch()
336 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_note_context_switch()
338 t->pid, in rcu_note_context_switch()
344 rcu_preempt_deferred_qs(t); in rcu_note_context_switch()
420 struct task_struct *t = current; in __rcu_read_unlock() local
425 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock()
426 rcu_read_unlock_special(t); in __rcu_read_unlock()
440 static struct list_head *rcu_next_node_entry(struct task_struct *t, in rcu_next_node_entry() argument
445 np = t->rcu_node_entry.next; in rcu_next_node_entry()
466 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) in rcu_preempt_deferred_qs_irqrestore() argument
482 special = t->rcu_read_unlock_special; in rcu_preempt_deferred_qs_irqrestore()
488 t->rcu_read_unlock_special.s = 0; in rcu_preempt_deferred_qs_irqrestore()
517 rnp = t->rcu_blocked_node; in rcu_preempt_deferred_qs_irqrestore()
519 WARN_ON_ONCE(rnp != t->rcu_blocked_node); in rcu_preempt_deferred_qs_irqrestore()
526 np = rcu_next_node_entry(t, rnp); in rcu_preempt_deferred_qs_irqrestore()
527 list_del_init(&t->rcu_node_entry); in rcu_preempt_deferred_qs_irqrestore()
528 t->rcu_blocked_node = NULL; in rcu_preempt_deferred_qs_irqrestore()
530 rnp->gp_seq, t->pid); in rcu_preempt_deferred_qs_irqrestore()
531 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_deferred_qs_irqrestore()
533 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_deferred_qs_irqrestore()
537 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t; in rcu_preempt_deferred_qs_irqrestore()
538 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_deferred_qs_irqrestore()
586 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) in rcu_preempt_need_deferred_qs() argument
589 READ_ONCE(t->rcu_read_unlock_special.s)) && in rcu_preempt_need_deferred_qs()
600 notrace void rcu_preempt_deferred_qs(struct task_struct *t) in rcu_preempt_deferred_qs() argument
604 if (!rcu_preempt_need_deferred_qs(t)) in rcu_preempt_deferred_qs()
607 rcu_preempt_deferred_qs_irqrestore(t, flags); in rcu_preempt_deferred_qs()
626 static void rcu_read_unlock_special(struct task_struct *t) in rcu_read_unlock_special() argument
644 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) || in rcu_read_unlock_special()
647 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) || in rcu_read_unlock_special()
649 t->rcu_blocked_node); in rcu_read_unlock_special()
681 rcu_preempt_deferred_qs_irqrestore(t, flags); in rcu_read_unlock_special()
695 struct task_struct *t; in rcu_preempt_check_blocked_tasks() local
704 t = container_of(rnp->gp_tasks, struct task_struct, in rcu_preempt_check_blocked_tasks()
707 rnp->gp_seq, t->pid); in rcu_preempt_check_blocked_tasks()
721 struct task_struct *t = current; in rcu_flavor_sched_clock_irq() local
727 if (rcu_preempt_need_deferred_qs(t)) { in rcu_flavor_sched_clock_irq()
728 set_tsk_need_resched(t); in rcu_flavor_sched_clock_irq()
731 } else if (rcu_preempt_need_deferred_qs(t)) { in rcu_flavor_sched_clock_irq()
732 rcu_preempt_deferred_qs(t); /* Report deferred QS. */ in rcu_flavor_sched_clock_irq()
743 !t->rcu_read_unlock_special.b.need_qs && in rcu_flavor_sched_clock_irq()
745 t->rcu_read_unlock_special.b.need_qs = true; in rcu_flavor_sched_clock_irq()
758 struct task_struct *t = current; in exit_rcu() local
763 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true); in exit_rcu()
930 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t) in rcu_preempt_need_deferred_qs() argument
942 notrace void rcu_preempt_deferred_qs(struct task_struct *t) in rcu_preempt_deferred_qs() argument
1050 struct task_struct *t; in rcu_boost() local
1095 t = container_of(tb, struct task_struct, rcu_node_entry); in rcu_boost()
1096 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t); in rcu_boost()
1196 struct task_struct *t; in rcu_spawn_one_boost_kthread() local
1202 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1204 if (WARN_ON_ONCE(IS_ERR(t))) in rcu_spawn_one_boost_kthread()
1208 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1211 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); in rcu_spawn_one_boost_kthread()
1212 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ in rcu_spawn_one_boost_kthread()
1231 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity() local
1236 if (!t) in rcu_boost_kthread_setaffinity()
1252 set_cpus_allowed_ptr(t, cm); in rcu_boost_kthread_setaffinity()