Lines Matching refs:t
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
679 struct task_struct *t; in rcu_spawn_tasks_kthread_generic() local
681 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
682 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavio… in rcu_spawn_tasks_kthread_generic()
754 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
773 struct task_struct *t; in rcu_tasks_wait_gp() local
787 for_each_process_thread(g, t) in rcu_tasks_wait_gp()
788 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
927 static bool rcu_tasks_is_holdout(struct task_struct *t) in rcu_tasks_is_holdout() argument
932 if (!READ_ONCE(t->on_rq)) in rcu_tasks_is_holdout()
940 if (is_idle_task(t)) in rcu_tasks_is_holdout()
943 cpu = task_cpu(t); in rcu_tasks_is_holdout()
946 if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) in rcu_tasks_is_holdout()
953 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_pertask() argument
955 if (t != current && rcu_tasks_is_holdout(t)) { in rcu_tasks_pertask()
956 get_task_struct(t); in rcu_tasks_pertask()
957 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask()
958 WRITE_ONCE(t->rcu_tasks_holdout, true); in rcu_tasks_pertask()
959 list_add(&t->rcu_tasks_holdout_list, hop); in rcu_tasks_pertask()
997 static void check_holdout_task(struct task_struct *t, in check_holdout_task() argument
1002 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
1003 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
1004 !rcu_tasks_is_holdout(t) || in check_holdout_task()
1006 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { in check_holdout_task()
1007 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task()
1008 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task()
1009 put_task_struct(t); in check_holdout_task()
1012 rcu_request_urgent_qs_task(t); in check_holdout_task()
1019 cpu = task_cpu(t); in check_holdout_task()
1021 t, ".I"[is_idle_task(t)], in check_holdout_task()
1023 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task()
1024 t->rcu_tasks_idle_cpu, cpu); in check_holdout_task()
1025 sched_show_task(t); in check_holdout_task()
1032 struct task_struct *t, *t1; in check_all_holdout_tasks() local
1034 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { in check_all_holdout_tasks()
1035 check_holdout_task(t, needreport, firstreport); in check_all_holdout_tasks()
1194 struct task_struct *t = current; in exit_tasks_rcu_stop() local
1196 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); in exit_tasks_rcu_stop()
1418 static u8 rcu_ld_need_qs(struct task_struct *t) in rcu_ld_need_qs() argument
1421 return smp_load_acquire(&t->trc_reader_special.b.need_qs); in rcu_ld_need_qs()
1425 static void rcu_st_need_qs(struct task_struct *t, u8 v) in rcu_st_need_qs() argument
1427 smp_store_release(&t->trc_reader_special.b.need_qs, v); in rcu_st_need_qs()
1436 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) in rcu_trc_cmpxchg_need_qs() argument
1439 union rcu_special trs_old = READ_ONCE(t->trc_reader_special); in rcu_trc_cmpxchg_need_qs()
1445 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s); in rcu_trc_cmpxchg_need_qs()
1454 void rcu_read_unlock_trace_special(struct task_struct *t) in rcu_read_unlock_trace_special() argument
1462 trs = smp_load_acquire(&t->trc_reader_special); in rcu_read_unlock_trace_special()
1464 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special()
1468 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, in rcu_read_unlock_trace_special()
1474 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); in rcu_read_unlock_trace_special()
1476 list_del_init(&t->trc_blkd_node); in rcu_read_unlock_trace_special()
1477 WRITE_ONCE(t->trc_reader_special.b.blocked, false); in rcu_read_unlock_trace_special()
1480 WRITE_ONCE(t->trc_reader_nesting, 0); in rcu_read_unlock_trace_special()
1485 void rcu_tasks_trace_qs_blkd(struct task_struct *t) in rcu_tasks_trace_qs_blkd() argument
1493 t->trc_blkd_cpu = smp_processor_id(); in rcu_tasks_trace_qs_blkd()
1496 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_qs_blkd()
1497 WRITE_ONCE(t->trc_reader_special.b.blocked, true); in rcu_tasks_trace_qs_blkd()
1503 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) in trc_add_holdout() argument
1505 if (list_empty(&t->trc_holdout_list)) { in trc_add_holdout()
1506 get_task_struct(t); in trc_add_holdout()
1507 list_add(&t->trc_holdout_list, bhp); in trc_add_holdout()
1513 static void trc_del_holdout(struct task_struct *t) in trc_del_holdout() argument
1515 if (!list_empty(&t->trc_holdout_list)) { in trc_del_holdout()
1516 list_del_init(&t->trc_holdout_list); in trc_del_holdout()
1517 put_task_struct(t); in trc_del_holdout()
1526 struct task_struct *t = current; in trc_read_check_handler() local
1530 if (unlikely(texp != t)) in trc_read_check_handler()
1535 nesting = READ_ONCE(t->trc_reader_nesting); in trc_read_check_handler()
1537 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_read_check_handler()
1547 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); in trc_read_check_handler()
1558 static int trc_inspect_reader(struct task_struct *t, void *bhp_in) in trc_inspect_reader() argument
1561 int cpu = task_cpu(t); in trc_inspect_reader()
1565 if (task_curr(t) && !ofl) { in trc_inspect_reader()
1575 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) in trc_inspect_reader()
1581 nesting = t->trc_reader_nesting; in trc_inspect_reader()
1582 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); in trc_inspect_reader()
1591 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_inspect_reader()
1600 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) in trc_inspect_reader()
1601 trc_add_holdout(t, bhp); in trc_inspect_reader()
1606 static void trc_wait_for_one_reader(struct task_struct *t, in trc_wait_for_one_reader() argument
1612 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI in trc_wait_for_one_reader()
1616 if (t == current) { in trc_wait_for_one_reader()
1617 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_wait_for_one_reader()
1618 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in trc_wait_for_one_reader()
1623 get_task_struct(t); in trc_wait_for_one_reader()
1624 if (!task_call_func(t, trc_inspect_reader, bhp)) { in trc_wait_for_one_reader()
1625 put_task_struct(t); in trc_wait_for_one_reader()
1628 put_task_struct(t); in trc_wait_for_one_reader()
1637 trc_add_holdout(t, bhp); in trc_wait_for_one_reader()
1638 if (task_curr(t) && in trc_wait_for_one_reader()
1641 cpu = task_cpu(t); in trc_wait_for_one_reader()
1644 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) in trc_wait_for_one_reader()
1648 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
1650 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { in trc_wait_for_one_reader()
1657 t->trc_ipi_to_cpu = -1; in trc_wait_for_one_reader()
1666 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) in rcu_tasks_trace_pertask_prep() argument
1672 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) in rcu_tasks_trace_pertask_prep()
1675 rcu_st_need_qs(t, 0); in rcu_tasks_trace_pertask_prep()
1676 t->trc_ipi_to_cpu = -1; in rcu_tasks_trace_pertask_prep()
1681 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_trace_pertask() argument
1683 if (rcu_tasks_trace_pertask_prep(t, true)) in rcu_tasks_trace_pertask()
1684 trc_wait_for_one_reader(t, hop); in rcu_tasks_trace_pertask()
1694 struct task_struct *t; in rcu_tasks_trace_pregp_step() local
1719 t = cpu_curr_snapshot(cpu); in rcu_tasks_trace_pregp_step()
1720 if (rcu_tasks_trace_pertask_prep(t, true)) in rcu_tasks_trace_pregp_step()
1721 trc_add_holdout(t, hop); in rcu_tasks_trace_pregp_step()
1735 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); in rcu_tasks_trace_pregp_step()
1736 list_del_init(&t->trc_blkd_node); in rcu_tasks_trace_pregp_step()
1737 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_pregp_step()
1739 rcu_tasks_trace_pertask(t, hop); in rcu_tasks_trace_pregp_step()
1772 static int trc_check_slow_task(struct task_struct *t, void *arg) in trc_check_slow_task() argument
1776 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task()
1778 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); in trc_check_slow_task()
1779 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); in trc_check_slow_task()
1780 trc_rdrp->needqs = rcu_ld_need_qs(t); in trc_check_slow_task()
1785 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) in show_stalled_task_trace() argument
1789 bool is_idle_tsk = is_idle_task(t); in show_stalled_task_trace()
1795 cpu = task_cpu(t); in show_stalled_task_trace()
1796 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) in show_stalled_task_trace()
1798 t->pid, in show_stalled_task_trace()
1799 ".I"[t->trc_ipi_to_cpu >= 0], in show_stalled_task_trace()
1803 t->pid, in show_stalled_task_trace()
1807 ".B"[!!data_race(t->trc_reader_special.b.blocked)], in show_stalled_task_trace()
1812 sched_show_task(t); in show_stalled_task_trace()
1829 struct task_struct *g, *t; in check_all_holdout_tasks_trace() local
1834 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { in check_all_holdout_tasks_trace()
1836 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1837 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) in check_all_holdout_tasks_trace()
1838 trc_wait_for_one_reader(t, hop); in check_all_holdout_tasks_trace()
1841 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1842 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) in check_all_holdout_tasks_trace()
1843 trc_del_holdout(t); in check_all_holdout_tasks_trace()
1845 show_stalled_task_trace(t, firstreport); in check_all_holdout_tasks_trace()
1882 static void exit_tasks_rcu_finish_trace(struct task_struct *t) in exit_tasks_rcu_finish_trace() argument
1884 union rcu_special trs = READ_ONCE(t->trc_reader_special); in exit_tasks_rcu_finish_trace()
1886 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in exit_tasks_rcu_finish_trace()
1887 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in exit_tasks_rcu_finish_trace()
1888 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) in exit_tasks_rcu_finish_trace()
1889 rcu_read_unlock_trace_special(t); in exit_tasks_rcu_finish_trace()
1891 WRITE_ONCE(t->trc_reader_nesting, 0); in exit_tasks_rcu_finish_trace()
1999 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } in exit_tasks_rcu_finish_trace() argument