Lines Matching refs:p
159 static inline int __task_prio(const struct task_struct *p) in __task_prio() argument
161 if (p->sched_class == &stop_sched_class) /* trumps deadline */ in __task_prio()
164 if (rt_prio(p->prio)) /* includes deadline */ in __task_prio()
165 return p->prio; /* [-1, 99] */ in __task_prio()
167 if (p->sched_class == &idle_sched_class) in __task_prio()
227 const struct task_struct *p = __node_2_sc(node); in rb_sched_core_cmp() local
230 if (cookie < p->core_cookie) in rb_sched_core_cmp()
233 if (cookie > p->core_cookie) in rb_sched_core_cmp()
239 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
243 if (!p->core_cookie) in sched_core_enqueue()
246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
249 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
253 if (sched_core_enqueued(p)) { in sched_core_dequeue()
254 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
255 RB_CLEAR_NODE(&p->core_node); in sched_core_dequeue()
268 static int sched_task_is_throttled(struct task_struct *p, int cpu) in sched_task_is_throttled() argument
270 if (p->sched_class->task_is_throttled) in sched_task_is_throttled()
271 return p->sched_class->task_is_throttled(p, cpu); in sched_task_is_throttled()
276 static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) in sched_core_next() argument
278 struct rb_node *node = &p->core_node; in sched_core_next()
279 int cpu = task_cpu(p); in sched_core_next()
286 p = __node_2_sc(node); in sched_core_next()
287 if (p->core_cookie != cookie) in sched_core_next()
290 } while (sched_task_is_throttled(p, cpu)); in sched_core_next()
292 return p; in sched_core_next()
301 struct task_struct *p; in sched_core_find() local
308 p = __node_2_sc(node); in sched_core_find()
309 if (!sched_task_is_throttled(p, rq->cpu)) in sched_core_find()
310 return p; in sched_core_find()
312 return sched_core_next(p, cookie); in sched_core_find()
453 static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } in sched_core_enqueue() argument
455 sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } in sched_core_dequeue() argument
627 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() argument
632 lockdep_assert_held(&p->pi_lock); in __task_rq_lock()
635 rq = task_rq(p); in __task_rq_lock()
637 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock()
643 while (unlikely(task_on_rq_migrating(p))) in __task_rq_lock()
651 struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) in task_rq_lock() argument
652 __acquires(p->pi_lock) in task_rq_lock()
658 raw_spin_lock_irqsave(&p->pi_lock, rf->flags); in task_rq_lock()
659 rq = task_rq(p); in task_rq_lock()
678 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in task_rq_lock()
683 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); in task_rq_lock()
685 while (unlikely(task_on_rq_migrating(p))) in task_rq_lock()
904 static inline bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
906 struct thread_info *ti = task_thread_info(p); in set_nr_and_not_polling()
916 static bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
918 struct thread_info *ti = task_thread_info(p); in set_nr_if_polling()
933 static inline bool set_nr_and_not_polling(struct task_struct *p) in set_nr_and_not_polling() argument
935 set_tsk_need_resched(p); in set_nr_and_not_polling()
940 static inline bool set_nr_if_polling(struct task_struct *p) in set_nr_if_polling() argument
1193 static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) in __need_bw_check() argument
1198 if (p->sched_class != &fair_sched_class) in __need_bw_check()
1201 if (!task_on_rq_queued(p)) in __need_bw_check()
1304 static void set_load_weight(struct task_struct *p, bool update_load) in set_load_weight() argument
1306 int prio = p->static_prio - MAX_RT_PRIO; in set_load_weight()
1309 if (task_has_idle_policy(p)) { in set_load_weight()
1321 if (update_load && p->sched_class == &fair_sched_class) in set_load_weight()
1322 reweight_task(p, &lw); in set_load_weight()
1324 p->se.load = lw; in set_load_weight()
1460 static void __uclamp_update_util_min_rt_default(struct task_struct *p) in __uclamp_update_util_min_rt_default() argument
1465 lockdep_assert_held(&p->pi_lock); in __uclamp_update_util_min_rt_default()
1467 uc_se = &p->uclamp_req[UCLAMP_MIN]; in __uclamp_update_util_min_rt_default()
1477 static void uclamp_update_util_min_rt_default(struct task_struct *p) in uclamp_update_util_min_rt_default() argument
1482 if (!rt_task(p)) in uclamp_update_util_min_rt_default()
1486 rq = task_rq_lock(p, &rf); in uclamp_update_util_min_rt_default()
1487 __uclamp_update_util_min_rt_default(p); in uclamp_update_util_min_rt_default()
1488 task_rq_unlock(rq, p, &rf); in uclamp_update_util_min_rt_default()
1492 uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_tg_restrict() argument
1495 struct uclamp_se uc_req = p->uclamp_req[clamp_id]; in uclamp_tg_restrict()
1503 if (task_group_is_autogroup(task_group(p))) in uclamp_tg_restrict()
1505 if (task_group(p) == &root_task_group) in uclamp_tg_restrict()
1508 tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; in uclamp_tg_restrict()
1509 tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; in uclamp_tg_restrict()
1527 uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_get() argument
1529 struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); in uclamp_eff_get()
1539 unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) in uclamp_eff_value() argument
1544 if (p->uclamp[clamp_id].active) in uclamp_eff_value()
1545 return (unsigned long)p->uclamp[clamp_id].value; in uclamp_eff_value()
1547 uc_eff = uclamp_eff_get(p, clamp_id); in uclamp_eff_value()
1562 static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_inc_id() argument
1566 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_inc_id()
1572 p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); in uclamp_rq_inc_id()
1600 static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, in uclamp_rq_dec_id() argument
1604 struct uclamp_se *uc_se = &p->uclamp[clamp_id]; in uclamp_rq_dec_id()
1666 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) in uclamp_rq_inc() argument
1679 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_inc()
1683 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_inc()
1690 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) in uclamp_rq_dec() argument
1703 if (unlikely(!p->sched_class->uclamp_enabled)) in uclamp_rq_dec()
1707 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_dec()
1710 static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, in uclamp_rq_reinc_id() argument
1713 if (!p->uclamp[clamp_id].active) in uclamp_rq_reinc_id()
1716 uclamp_rq_dec_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1717 uclamp_rq_inc_id(rq, p, clamp_id); in uclamp_rq_reinc_id()
1728 uclamp_update_active(struct task_struct *p) in uclamp_update_active() argument
1742 rq = task_rq_lock(p, &rf); in uclamp_update_active()
1751 uclamp_rq_reinc_id(rq, p, clamp_id); in uclamp_update_active()
1753 task_rq_unlock(rq, p, &rf); in uclamp_update_active()
1761 struct task_struct *p; in uclamp_update_active_tasks() local
1764 while ((p = css_task_iter_next(&it))) in uclamp_update_active_tasks()
1765 uclamp_update_active(p); in uclamp_update_active_tasks()
1794 struct task_struct *g, *p; in uclamp_sync_util_min_rt_default() local
1814 for_each_process_thread(g, p) in uclamp_sync_util_min_rt_default()
1815 uclamp_update_util_min_rt_default(p); in uclamp_sync_util_min_rt_default()
1883 static int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
1886 int util_min = p->uclamp_req[UCLAMP_MIN].value; in uclamp_validate()
1887 int util_max = p->uclamp_req[UCLAMP_MAX].value; in uclamp_validate()
1943 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
1949 struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; in __setscheduler_uclamp()
1959 if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) in __setscheduler_uclamp()
1973 uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], in __setscheduler_uclamp()
1979 uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], in __setscheduler_uclamp()
1984 static void uclamp_fork(struct task_struct *p) in uclamp_fork() argument
1993 p->uclamp[clamp_id].active = false; in uclamp_fork()
1995 if (likely(!p->sched_reset_on_fork)) in uclamp_fork()
1999 uclamp_se_set(&p->uclamp_req[clamp_id], in uclamp_fork()
2004 static void uclamp_post_fork(struct task_struct *p) in uclamp_post_fork() argument
2006 uclamp_update_util_min_rt_default(p); in uclamp_post_fork()
2049 static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } in uclamp_rq_inc() argument
2050 static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } in uclamp_rq_dec() argument
2051 static inline int uclamp_validate(struct task_struct *p, in uclamp_validate() argument
2056 static void __setscheduler_uclamp(struct task_struct *p, in __setscheduler_uclamp() argument
2058 static inline void uclamp_fork(struct task_struct *p) { } in uclamp_fork() argument
2059 static inline void uclamp_post_fork(struct task_struct *p) { } in uclamp_post_fork() argument
2063 bool sched_task_on_rq(struct task_struct *p) in sched_task_on_rq() argument
2065 return task_on_rq_queued(p); in sched_task_on_rq()
2068 unsigned long get_wchan(struct task_struct *p) in get_wchan() argument
2073 if (!p || p == current) in get_wchan()
2077 raw_spin_lock_irq(&p->pi_lock); in get_wchan()
2078 state = READ_ONCE(p->__state); in get_wchan()
2080 if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) in get_wchan()
2081 ip = __get_wchan(p); in get_wchan()
2082 raw_spin_unlock_irq(&p->pi_lock); in get_wchan()
2087 static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) in enqueue_task() argument
2093 sched_info_enqueue(rq, p); in enqueue_task()
2094 psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); in enqueue_task()
2097 uclamp_rq_inc(rq, p); in enqueue_task()
2098 p->sched_class->enqueue_task(rq, p, flags); in enqueue_task()
2101 sched_core_enqueue(rq, p); in enqueue_task()
2104 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) in dequeue_task() argument
2107 sched_core_dequeue(rq, p, flags); in dequeue_task()
2113 sched_info_dequeue(rq, p); in dequeue_task()
2114 psi_dequeue(p, flags & DEQUEUE_SLEEP); in dequeue_task()
2117 uclamp_rq_dec(rq, p); in dequeue_task()
2118 p->sched_class->dequeue_task(rq, p, flags); in dequeue_task()
2121 void activate_task(struct rq *rq, struct task_struct *p, int flags) in activate_task() argument
2123 if (task_on_rq_migrating(p)) in activate_task()
2126 sched_mm_cid_migrate_to(rq, p); in activate_task()
2128 enqueue_task(rq, p, flags); in activate_task()
2130 p->on_rq = TASK_ON_RQ_QUEUED; in activate_task()
2133 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) in deactivate_task() argument
2135 p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; in deactivate_task()
2137 dequeue_task(rq, p, flags); in deactivate_task()
2161 static inline int normal_prio(struct task_struct *p) in normal_prio() argument
2163 return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); in normal_prio()
2173 static int effective_prio(struct task_struct *p) in effective_prio() argument
2175 p->normal_prio = normal_prio(p); in effective_prio()
2181 if (!rt_prio(p->prio)) in effective_prio()
2182 return p->normal_prio; in effective_prio()
2183 return p->prio; in effective_prio()
2192 inline int task_curr(const struct task_struct *p) in task_curr() argument
2194 return cpu_curr(task_cpu(p)) == p; in task_curr()
2204 static inline void check_class_changed(struct rq *rq, struct task_struct *p, in check_class_changed() argument
2208 if (prev_class != p->sched_class) { in check_class_changed()
2210 prev_class->switched_from(rq, p); in check_class_changed()
2212 p->sched_class->switched_to(rq, p); in check_class_changed()
2213 } else if (oldprio != p->prio || dl_task(p)) in check_class_changed()
2214 p->sched_class->prio_changed(rq, p, oldprio); in check_class_changed()
2217 void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr() argument
2219 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
2220 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
2221 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) in check_preempt_curr()
2233 int __task_state_match(struct task_struct *p, unsigned int state) in __task_state_match() argument
2235 if (READ_ONCE(p->__state) & state) in __task_state_match()
2239 if (READ_ONCE(p->saved_state) & state) in __task_state_match()
2246 int task_state_match(struct task_struct *p, unsigned int state) in task_state_match() argument
2255 raw_spin_lock_irq(&p->pi_lock); in task_state_match()
2256 match = __task_state_match(p, state); in task_state_match()
2257 raw_spin_unlock_irq(&p->pi_lock); in task_state_match()
2261 return __task_state_match(p, state); in task_state_match()
2281 unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) in wait_task_inactive() argument
2295 rq = task_rq(p); in wait_task_inactive()
2308 while (task_on_cpu(rq, p)) { in wait_task_inactive()
2309 if (!task_state_match(p, match_state)) in wait_task_inactive()
2319 rq = task_rq_lock(p, &rf); in wait_task_inactive()
2320 trace_sched_wait_task(p); in wait_task_inactive()
2321 running = task_on_cpu(rq, p); in wait_task_inactive()
2322 queued = task_on_rq_queued(p); in wait_task_inactive()
2324 if ((match = __task_state_match(p, match_state))) { in wait_task_inactive()
2331 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ in wait_task_inactive()
2333 task_rq_unlock(rq, p, &rf); in wait_task_inactive()
2383 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);
2385 static int __set_cpus_allowed_ptr(struct task_struct *p,
2388 static void migrate_disable_switch(struct rq *rq, struct task_struct *p) in migrate_disable_switch() argument
2395 if (likely(!p->migration_disabled)) in migrate_disable_switch()
2398 if (p->cpus_ptr != &p->cpus_mask) in migrate_disable_switch()
2404 __do_set_cpus_allowed(p, &ac); in migrate_disable_switch()
2409 struct task_struct *p = current; in migrate_disable() local
2411 if (p->migration_disabled) { in migrate_disable()
2412 p->migration_disabled++; in migrate_disable()
2418 p->migration_disabled = 1; in migrate_disable()
2425 struct task_struct *p = current; in migrate_enable() local
2427 .new_mask = &p->cpus_mask, in migrate_enable()
2431 if (p->migration_disabled > 1) { in migrate_enable()
2432 p->migration_disabled--; in migrate_enable()
2436 if (WARN_ON_ONCE(!p->migration_disabled)) in migrate_enable()
2444 if (p->cpus_ptr != &p->cpus_mask) in migrate_enable()
2445 __set_cpus_allowed_ptr(p, &ac); in migrate_enable()
2452 p->migration_disabled = 0; in migrate_enable()
2467 static inline bool is_cpu_allowed(struct task_struct *p, int cpu) in is_cpu_allowed() argument
2470 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in is_cpu_allowed()
2474 if (is_migration_disabled(p)) in is_cpu_allowed()
2478 if (!(p->flags & PF_KTHREAD)) in is_cpu_allowed()
2479 return cpu_active(cpu) && task_cpu_possible(cpu, p); in is_cpu_allowed()
2482 if (kthread_is_per_cpu(p)) in is_cpu_allowed()
2513 struct task_struct *p, int new_cpu) in move_queued_task() argument
2517 deactivate_task(rq, p, DEQUEUE_NOCLOCK); in move_queued_task()
2518 set_task_cpu(p, new_cpu); in move_queued_task()
2524 WARN_ON_ONCE(task_cpu(p) != new_cpu); in move_queued_task()
2525 activate_task(rq, p, 0); in move_queued_task()
2526 check_preempt_curr(rq, p, 0); in move_queued_task()
2559 struct task_struct *p, int dest_cpu) in __migrate_task() argument
2562 if (!is_cpu_allowed(p, dest_cpu)) in __migrate_task()
2565 rq = move_queued_task(rq, rf, p, dest_cpu); in __migrate_task()
2579 struct task_struct *p = arg->task; in migration_cpu_stop() local
2596 raw_spin_lock(&p->pi_lock); in migration_cpu_stop()
2603 WARN_ON_ONCE(pending && pending != p->migration_pending); in migration_cpu_stop()
2610 if (task_rq(p) == rq) { in migration_cpu_stop()
2611 if (is_migration_disabled(p)) in migration_cpu_stop()
2615 p->migration_pending = NULL; in migration_cpu_stop()
2618 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) in migration_cpu_stop()
2622 if (task_on_rq_queued(p)) { in migration_cpu_stop()
2624 rq = __migrate_task(rq, &rf, p, arg->dest_cpu); in migration_cpu_stop()
2626 p->wake_cpu = arg->dest_cpu; in migration_cpu_stop()
2651 if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { in migration_cpu_stop()
2652 p->migration_pending = NULL; in migration_cpu_stop()
2664 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2665 stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, in migration_cpu_stop()
2673 task_rq_unlock(rq, p, &rf); in migration_cpu_stop()
2684 struct task_struct *p = arg; in push_cpu_stop() local
2686 raw_spin_lock_irq(&p->pi_lock); in push_cpu_stop()
2689 if (task_rq(p) != rq) in push_cpu_stop()
2692 if (is_migration_disabled(p)) { in push_cpu_stop()
2693 p->migration_flags |= MDF_PUSH; in push_cpu_stop()
2697 p->migration_flags &= ~MDF_PUSH; in push_cpu_stop()
2699 if (p->sched_class->find_lock_rq) in push_cpu_stop()
2700 lowest_rq = p->sched_class->find_lock_rq(p, rq); in push_cpu_stop()
2706 if (task_rq(p) == rq) { in push_cpu_stop()
2707 deactivate_task(rq, p, 0); in push_cpu_stop()
2708 set_task_cpu(p, lowest_rq->cpu); in push_cpu_stop()
2709 activate_task(lowest_rq, p, 0); in push_cpu_stop()
2718 raw_spin_unlock_irq(&p->pi_lock); in push_cpu_stop()
2720 put_task_struct(p); in push_cpu_stop()
2728 void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) in set_cpus_allowed_common() argument
2731 p->cpus_ptr = ctx->new_mask; in set_cpus_allowed_common()
2735 cpumask_copy(&p->cpus_mask, ctx->new_mask); in set_cpus_allowed_common()
2736 p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); in set_cpus_allowed_common()
2742 swap(p->user_cpus_ptr, ctx->user_mask); in set_cpus_allowed_common()
2746 __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) in __do_set_cpus_allowed() argument
2748 struct rq *rq = task_rq(p); in __do_set_cpus_allowed()
2764 SCHED_WARN_ON(!p->on_cpu); in __do_set_cpus_allowed()
2766 lockdep_assert_held(&p->pi_lock); in __do_set_cpus_allowed()
2768 queued = task_on_rq_queued(p); in __do_set_cpus_allowed()
2769 running = task_current(rq, p); in __do_set_cpus_allowed()
2777 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2780 put_prev_task(rq, p); in __do_set_cpus_allowed()
2782 p->sched_class->set_cpus_allowed(p, ctx); in __do_set_cpus_allowed()
2785 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in __do_set_cpus_allowed()
2787 set_next_task(rq, p); in __do_set_cpus_allowed()
2794 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) in do_set_cpus_allowed() argument
2806 __do_set_cpus_allowed(p, &ac); in do_set_cpus_allowed()
2869 static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) in clear_user_cpus_ptr() argument
2873 swap(p->user_cpus_ptr, user_mask); in clear_user_cpus_ptr()
2878 void release_user_cpus_ptr(struct task_struct *p) in release_user_cpus_ptr() argument
2880 kfree(clear_user_cpus_ptr(p)); in release_user_cpus_ptr()
2959 static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, in affine_move_task() argument
2962 __releases(p->pi_lock) in affine_move_task()
2968 if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { in affine_move_task()
2972 (p->migration_flags & MDF_PUSH) && !rq->push_busy) { in affine_move_task()
2974 push_task = get_task_struct(p); in affine_move_task()
2981 pending = p->migration_pending; in affine_move_task()
2983 p->migration_pending = NULL; in affine_move_task()
2988 task_rq_unlock(rq, p, rf); in affine_move_task()
2991 p, &rq->push_work); in affine_move_task()
3003 if (!p->migration_pending) { in affine_move_task()
3008 .task = p, in affine_move_task()
3013 p->migration_pending = &my_pending; in affine_move_task()
3015 pending = p->migration_pending; in affine_move_task()
3028 pending = p->migration_pending; in affine_move_task()
3042 task_rq_unlock(rq, p, rf); in affine_move_task()
3046 if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { in affine_move_task()
3057 p->migration_flags &= ~MDF_PUSH; in affine_move_task()
3060 task_rq_unlock(rq, p, rf); in affine_move_task()
3071 if (!is_migration_disabled(p)) { in affine_move_task()
3072 if (task_on_rq_queued(p)) in affine_move_task()
3073 rq = move_queued_task(rq, rf, p, dest_cpu); in affine_move_task()
3076 p->migration_pending = NULL; in affine_move_task()
3080 task_rq_unlock(rq, p, rf); in affine_move_task()
3106 static int __set_cpus_allowed_ptr_locked(struct task_struct *p, in __set_cpus_allowed_ptr_locked() argument
3111 __releases(p->pi_lock) in __set_cpus_allowed_ptr_locked()
3113 const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); in __set_cpus_allowed_ptr_locked()
3115 bool kthread = p->flags & PF_KTHREAD; in __set_cpus_allowed_ptr_locked()
3121 if (kthread || is_migration_disabled(p)) { in __set_cpus_allowed_ptr_locked()
3144 if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { in __set_cpus_allowed_ptr_locked()
3150 if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { in __set_cpus_allowed_ptr_locked()
3152 swap(p->user_cpus_ptr, ctx->user_mask); in __set_cpus_allowed_ptr_locked()
3156 if (WARN_ON_ONCE(p == current && in __set_cpus_allowed_ptr_locked()
3157 is_migration_disabled(p) && in __set_cpus_allowed_ptr_locked()
3158 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { in __set_cpus_allowed_ptr_locked()
3175 __do_set_cpus_allowed(p, ctx); in __set_cpus_allowed_ptr_locked()
3177 return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); in __set_cpus_allowed_ptr_locked()
3180 task_rq_unlock(rq, p, rf); in __set_cpus_allowed_ptr_locked()
3194 static int __set_cpus_allowed_ptr(struct task_struct *p, in __set_cpus_allowed_ptr() argument
3200 rq = task_rq_lock(p, &rf); in __set_cpus_allowed_ptr()
3205 if (p->user_cpus_ptr && in __set_cpus_allowed_ptr()
3207 cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) in __set_cpus_allowed_ptr()
3210 return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); in __set_cpus_allowed_ptr()
3213 int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) in set_cpus_allowed_ptr() argument
3220 return __set_cpus_allowed_ptr(p, &ac); in set_cpus_allowed_ptr()
3233 static int restrict_cpus_allowed_ptr(struct task_struct *p, in restrict_cpus_allowed_ptr() argument
3245 rq = task_rq_lock(p, &rf); in restrict_cpus_allowed_ptr()
3252 if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { in restrict_cpus_allowed_ptr()
3257 if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { in restrict_cpus_allowed_ptr()
3262 return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); in restrict_cpus_allowed_ptr()
3265 task_rq_unlock(rq, p, &rf); in restrict_cpus_allowed_ptr()
3275 void force_compatible_cpus_allowed_ptr(struct task_struct *p) in force_compatible_cpus_allowed_ptr() argument
3278 const struct cpumask *override_mask = task_cpu_possible_mask(p); in force_compatible_cpus_allowed_ptr()
3291 if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) in force_compatible_cpus_allowed_ptr()
3298 cpuset_cpus_allowed(p, new_mask); in force_compatible_cpus_allowed_ptr()
3304 task_pid_nr(p), p->comm, in force_compatible_cpus_allowed_ptr()
3308 WARN_ON(set_cpus_allowed_ptr(p, override_mask)); in force_compatible_cpus_allowed_ptr()
3315 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);
3324 void relax_compatible_cpus_allowed_ptr(struct task_struct *p) in relax_compatible_cpus_allowed_ptr() argument
3327 .new_mask = task_user_cpus(p), in relax_compatible_cpus_allowed_ptr()
3336 ret = __sched_setaffinity(p, &ac); in relax_compatible_cpus_allowed_ptr()
3340 void set_task_cpu(struct task_struct *p, unsigned int new_cpu) in set_task_cpu() argument
3343 unsigned int state = READ_ONCE(p->__state); in set_task_cpu()
3349 WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); in set_task_cpu()
3357 p->sched_class == &fair_sched_class && in set_task_cpu()
3358 (p->on_rq && !task_on_rq_migrating(p))); in set_task_cpu()
3371 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || in set_task_cpu()
3372 lockdep_is_held(__rq_lockp(task_rq(p))))); in set_task_cpu()
3379 WARN_ON_ONCE(is_migration_disabled(p)); in set_task_cpu()
3382 trace_sched_migrate_task(p, new_cpu); in set_task_cpu()
3384 if (task_cpu(p) != new_cpu) { in set_task_cpu()
3385 if (p->sched_class->migrate_task_rq) in set_task_cpu()
3386 p->sched_class->migrate_task_rq(p, new_cpu); in set_task_cpu()
3387 p->se.nr_migrations++; in set_task_cpu()
3388 rseq_migrate(p); in set_task_cpu()
3389 sched_mm_cid_migrate_from(p); in set_task_cpu()
3390 perf_event_task_migrate(p); in set_task_cpu()
3393 __set_task_cpu(p, new_cpu); in set_task_cpu()
3397 static void __migrate_swap_task(struct task_struct *p, int cpu) in __migrate_swap_task() argument
3399 if (task_on_rq_queued(p)) { in __migrate_swap_task()
3403 src_rq = task_rq(p); in __migrate_swap_task()
3409 deactivate_task(src_rq, p, 0); in __migrate_swap_task()
3410 set_task_cpu(p, cpu); in __migrate_swap_task()
3411 activate_task(dst_rq, p, 0); in __migrate_swap_task()
3412 check_preempt_curr(dst_rq, p, 0); in __migrate_swap_task()
3423 p->wake_cpu = cpu; in __migrate_swap_task()
3467 int migrate_swap(struct task_struct *cur, struct task_struct *p, in migrate_swap() argument
3476 .dst_task = p, in migrate_swap()
3496 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); in migrate_swap()
3517 void kick_process(struct task_struct *p) in kick_process() argument
3522 cpu = task_cpu(p); in kick_process()
3523 if ((cpu != smp_processor_id()) && task_curr(p)) in kick_process()
3551 static int select_fallback_rq(int cpu, struct task_struct *p) in select_fallback_rq() argument
3568 if (is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
3575 for_each_cpu(dest_cpu, p->cpus_ptr) { in select_fallback_rq()
3576 if (!is_cpu_allowed(p, dest_cpu)) in select_fallback_rq()
3585 if (cpuset_cpus_allowed_fallback(p)) { in select_fallback_rq()
3597 do_set_cpus_allowed(p, task_cpu_possible_mask(p)); in select_fallback_rq()
3613 if (p->mm && printk_ratelimit()) { in select_fallback_rq()
3615 task_pid_nr(p), p->comm, cpu); in select_fallback_rq()
3626 int select_task_rq(struct task_struct *p, int cpu, int wake_flags) in select_task_rq() argument
3628 lockdep_assert_held(&p->pi_lock); in select_task_rq()
3630 if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) in select_task_rq()
3631 cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); in select_task_rq()
3633 cpu = cpumask_any(p->cpus_ptr); in select_task_rq()
3645 if (unlikely(!is_cpu_allowed(p, cpu))) in select_task_rq()
3646 cpu = select_fallback_rq(task_cpu(p), p); in select_task_rq()
3698 static inline int __set_cpus_allowed_ptr(struct task_struct *p, in __set_cpus_allowed_ptr() argument
3701 return set_cpus_allowed_ptr(p, ctx->new_mask); in __set_cpus_allowed_ptr()
3704 static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } in migrate_disable_switch() argument
3719 ttwu_stat(struct task_struct *p, int cpu, int wake_flags) in ttwu_stat() argument
3731 __schedstat_inc(p->stats.nr_wakeups_local); in ttwu_stat()
3735 __schedstat_inc(p->stats.nr_wakeups_remote); in ttwu_stat()
3747 __schedstat_inc(p->stats.nr_wakeups_migrate); in ttwu_stat()
3751 __schedstat_inc(p->stats.nr_wakeups); in ttwu_stat()
3754 __schedstat_inc(p->stats.nr_wakeups_sync); in ttwu_stat()
3760 static inline void ttwu_do_wakeup(struct task_struct *p) in ttwu_do_wakeup() argument
3762 WRITE_ONCE(p->__state, TASK_RUNNING); in ttwu_do_wakeup()
3763 trace_sched_wakeup(p); in ttwu_do_wakeup()
3767 ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, in ttwu_do_activate() argument
3774 if (p->sched_contributes_to_load) in ttwu_do_activate()
3782 if (p->in_iowait) { in ttwu_do_activate()
3783 delayacct_blkio_end(p); in ttwu_do_activate()
3784 atomic_dec(&task_rq(p)->nr_iowait); in ttwu_do_activate()
3787 activate_task(rq, p, en_flags); in ttwu_do_activate()
3788 check_preempt_curr(rq, p, wake_flags); in ttwu_do_activate()
3790 ttwu_do_wakeup(p); in ttwu_do_activate()
3793 if (p->sched_class->task_woken) { in ttwu_do_activate()
3799 p->sched_class->task_woken(rq, p); in ttwu_do_activate()
3845 static int ttwu_runnable(struct task_struct *p, int wake_flags) in ttwu_runnable() argument
3851 rq = __task_rq_lock(p, &rf); in ttwu_runnable()
3852 if (task_on_rq_queued(p)) { in ttwu_runnable()
3853 if (!task_on_cpu(rq, p)) { in ttwu_runnable()
3859 check_preempt_curr(rq, p, wake_flags); in ttwu_runnable()
3861 ttwu_do_wakeup(p); in ttwu_runnable()
3874 struct task_struct *p, *t; in sched_ttwu_pending() local
3883 llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { in sched_ttwu_pending()
3884 if (WARN_ON_ONCE(p->on_cpu)) in sched_ttwu_pending()
3885 smp_cond_load_acquire(&p->on_cpu, !VAL); in sched_ttwu_pending()
3887 if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) in sched_ttwu_pending()
3888 set_task_cpu(p, cpu_of(rq)); in sched_ttwu_pending()
3890 ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); in sched_ttwu_pending()
3929 static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in __ttwu_queue_wakelist() argument
3933 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); in __ttwu_queue_wakelist()
3936 __smp_call_single_queue(cpu, &p->wake_entry.llist); in __ttwu_queue_wakelist()
3959 static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) in ttwu_queue_cond() argument
3969 if (!cpumask_test_cpu(cpu, p->cpus_ptr)) in ttwu_queue_cond()
3999 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
4001 if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { in ttwu_queue_wakelist()
4003 __ttwu_queue_wakelist(p, cpu, wake_flags); in ttwu_queue_wakelist()
4012 static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue_wakelist() argument
4019 static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) in ttwu_queue() argument
4024 if (ttwu_queue_wakelist(p, cpu, wake_flags)) in ttwu_queue()
4029 ttwu_do_activate(rq, p, wake_flags, &rf); in ttwu_queue()
4048 bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) in ttwu_state_match() argument
4057 *success = !!(match = __task_state_match(p, state)); in ttwu_state_match()
4074 p->saved_state = TASK_RUNNING; in ttwu_state_match()
4199 int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) in try_to_wake_up() argument
4204 if (p == current) { in try_to_wake_up()
4216 if (!ttwu_state_match(p, state, &success)) in try_to_wake_up()
4219 trace_sched_waking(p); in try_to_wake_up()
4220 ttwu_do_wakeup(p); in try_to_wake_up()
4230 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in try_to_wake_up()
4232 if (!ttwu_state_match(p, state, &success)) in try_to_wake_up()
4235 trace_sched_waking(p); in try_to_wake_up()
4260 if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) in try_to_wake_up()
4295 WRITE_ONCE(p->__state, TASK_WAKING); in try_to_wake_up()
4316 if (smp_load_acquire(&p->on_cpu) && in try_to_wake_up()
4317 ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) in try_to_wake_up()
4329 smp_cond_load_acquire(&p->on_cpu, !VAL); in try_to_wake_up()
4331 cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); in try_to_wake_up()
4332 if (task_cpu(p) != cpu) { in try_to_wake_up()
4333 if (p->in_iowait) { in try_to_wake_up()
4334 delayacct_blkio_end(p); in try_to_wake_up()
4335 atomic_dec(&task_rq(p)->nr_iowait); in try_to_wake_up()
4339 psi_ttwu_dequeue(p); in try_to_wake_up()
4340 set_task_cpu(p, cpu); in try_to_wake_up()
4343 cpu = task_cpu(p); in try_to_wake_up()
4346 ttwu_queue(p, cpu, wake_flags); in try_to_wake_up()
4350 ttwu_stat(p, task_cpu(p), wake_flags); in try_to_wake_up()
4355 static bool __task_needs_rq_lock(struct task_struct *p) in __task_needs_rq_lock() argument
4357 unsigned int state = READ_ONCE(p->__state); in __task_needs_rq_lock()
4374 if (p->on_rq) in __task_needs_rq_lock()
4383 smp_cond_load_acquire(&p->on_cpu, !VAL); in __task_needs_rq_lock()
4403 int task_call_func(struct task_struct *p, task_call_f func, void *arg) in task_call_func() argument
4409 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in task_call_func()
4411 if (__task_needs_rq_lock(p)) in task_call_func()
4412 rq = __task_rq_lock(p, &rf); in task_call_func()
4424 ret = func(p, arg); in task_call_func()
4429 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in task_call_func()
4476 int wake_up_process(struct task_struct *p) in wake_up_process() argument
4478 return try_to_wake_up(p, TASK_NORMAL, 0); in wake_up_process()
4482 int wake_up_state(struct task_struct *p, unsigned int state) in wake_up_state() argument
4484 return try_to_wake_up(p, state, 0); in wake_up_state()
4493 static void __sched_fork(unsigned long clone_flags, struct task_struct *p) in __sched_fork() argument
4495 p->on_rq = 0; in __sched_fork()
4497 p->se.on_rq = 0; in __sched_fork()
4498 p->se.exec_start = 0; in __sched_fork()
4499 p->se.sum_exec_runtime = 0; in __sched_fork()
4500 p->se.prev_sum_exec_runtime = 0; in __sched_fork()
4501 p->se.nr_migrations = 0; in __sched_fork()
4502 p->se.vruntime = 0; in __sched_fork()
4503 p->se.vlag = 0; in __sched_fork()
4504 p->se.slice = sysctl_sched_base_slice; in __sched_fork()
4505 INIT_LIST_HEAD(&p->se.group_node); in __sched_fork()
4508 p->se.cfs_rq = NULL; in __sched_fork()
4513 memset(&p->stats, 0, sizeof(p->stats)); in __sched_fork()
4516 RB_CLEAR_NODE(&p->dl.rb_node); in __sched_fork()
4517 init_dl_task_timer(&p->dl); in __sched_fork()
4518 init_dl_inactive_task_timer(&p->dl); in __sched_fork()
4519 __dl_clear_params(p); in __sched_fork()
4521 INIT_LIST_HEAD(&p->rt.run_list); in __sched_fork()
4522 p->rt.timeout = 0; in __sched_fork()
4523 p->rt.time_slice = sched_rr_timeslice; in __sched_fork()
4524 p->rt.on_rq = 0; in __sched_fork()
4525 p->rt.on_list = 0; in __sched_fork()
4528 INIT_HLIST_HEAD(&p->preempt_notifiers); in __sched_fork()
4532 p->capture_control = NULL; in __sched_fork()
4534 init_numa_balancing(clone_flags, p); in __sched_fork()
4536 p->wake_entry.u_flags = CSD_TYPE_TTWU; in __sched_fork()
4537 p->migration_pending = NULL; in __sched_fork()
4539 init_sched_mm_cid(p); in __sched_fork()
4728 int sched_fork(unsigned long clone_flags, struct task_struct *p) in sched_fork() argument
4730 __sched_fork(clone_flags, p); in sched_fork()
4736 p->__state = TASK_NEW; in sched_fork()
4741 p->prio = current->normal_prio; in sched_fork()
4743 uclamp_fork(p); in sched_fork()
4748 if (unlikely(p->sched_reset_on_fork)) { in sched_fork()
4749 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in sched_fork()
4750 p->policy = SCHED_NORMAL; in sched_fork()
4751 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4752 p->rt_priority = 0; in sched_fork()
4753 } else if (PRIO_TO_NICE(p->static_prio) < 0) in sched_fork()
4754 p->static_prio = NICE_TO_PRIO(0); in sched_fork()
4756 p->prio = p->normal_prio = p->static_prio; in sched_fork()
4757 set_load_weight(p, false); in sched_fork()
4763 p->sched_reset_on_fork = 0; in sched_fork()
4766 if (dl_prio(p->prio)) in sched_fork()
4768 else if (rt_prio(p->prio)) in sched_fork()
4769 p->sched_class = &rt_sched_class; in sched_fork()
4771 p->sched_class = &fair_sched_class; in sched_fork()
4773 init_entity_runnable_average(&p->se); in sched_fork()
4778 memset(&p->sched_info, 0, sizeof(p->sched_info)); in sched_fork()
4781 p->on_cpu = 0; in sched_fork()
4783 init_task_preempt_count(p); in sched_fork()
4785 plist_node_init(&p->pushable_tasks, MAX_PRIO); in sched_fork()
4786 RB_CLEAR_NODE(&p->pushable_dl_tasks); in sched_fork()
4791 void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) in sched_cgroup_fork() argument
4799 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_cgroup_fork()
4805 tg = autogroup_task_group(p, tg); in sched_cgroup_fork()
4806 p->sched_task_group = tg; in sched_cgroup_fork()
4809 rseq_migrate(p); in sched_cgroup_fork()
4814 __set_task_cpu(p, smp_processor_id()); in sched_cgroup_fork()
4815 if (p->sched_class->task_fork) in sched_cgroup_fork()
4816 p->sched_class->task_fork(p); in sched_cgroup_fork()
4817 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_cgroup_fork()
4820 void sched_post_fork(struct task_struct *p) in sched_post_fork() argument
4822 uclamp_post_fork(p); in sched_post_fork()
4848 void wake_up_new_task(struct task_struct *p) in wake_up_new_task() argument
4853 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in wake_up_new_task()
4854 WRITE_ONCE(p->__state, TASK_RUNNING); in wake_up_new_task()
4864 p->recent_used_cpu = task_cpu(p); in wake_up_new_task()
4865 rseq_migrate(p); in wake_up_new_task()
4866 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); in wake_up_new_task()
4868 rq = __task_rq_lock(p, &rf); in wake_up_new_task()
4870 post_init_entity_util_avg(p); in wake_up_new_task()
4872 activate_task(rq, p, ENQUEUE_NOCLOCK); in wake_up_new_task()
4873 trace_sched_wakeup_new(p); in wake_up_new_task()
4874 check_preempt_curr(rq, p, WF_FORK); in wake_up_new_task()
4876 if (p->sched_class->task_woken) { in wake_up_new_task()
4882 p->sched_class->task_woken(rq, p); in wake_up_new_task()
4886 task_rq_unlock(rq, p, &rf); in wake_up_new_task()
5497 struct task_struct *p = current; in sched_exec() local
5501 scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { in sched_exec()
5502 dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); in sched_exec()
5509 arg = (struct migration_arg){ p, dest_cpu }; in sched_exec()
5511 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); in sched_exec()
5528 static inline void prefetch_curr_exec_start(struct task_struct *p) in prefetch_curr_exec_start() argument
5531 struct sched_entity *curr = (&p->se)->cfs_rq->curr; in prefetch_curr_exec_start()
5533 struct sched_entity *curr = (&task_rq(p)->cfs)->curr; in prefetch_curr_exec_start()
5544 unsigned long long task_sched_runtime(struct task_struct *p) in task_sched_runtime() argument
5562 if (!p->on_cpu || !task_on_rq_queued(p)) in task_sched_runtime()
5563 return p->se.sum_exec_runtime; in task_sched_runtime()
5566 rq = task_rq_lock(p, &rf); in task_sched_runtime()
5572 if (task_current(rq, p) && task_on_rq_queued(p)) { in task_sched_runtime()
5573 prefetch_curr_exec_start(p); in task_sched_runtime()
5575 p->sched_class->update_curr(rq); in task_sched_runtime()
5577 ns = p->se.sum_exec_runtime; in task_sched_runtime()
5578 task_rq_unlock(rq, p, &rf); in task_sched_runtime()
5893 static inline unsigned long get_preempt_disable_ip(struct task_struct *p) in get_preempt_disable_ip() argument
5896 return p->preempt_disable_ip; in get_preempt_disable_ip()
5994 struct task_struct *p; in __pick_next_task() local
6005 p = pick_next_task_fair(rq, prev, rf); in __pick_next_task()
6006 if (unlikely(p == RETRY_TASK)) in __pick_next_task()
6010 if (!p) { in __pick_next_task()
6012 p = pick_next_task_idle(rq); in __pick_next_task()
6015 return p; in __pick_next_task()
6022 p = class->pick_next_task(rq); in __pick_next_task()
6023 if (p) in __pick_next_task()
6024 return p; in __pick_next_task()
6052 struct task_struct *p; in pick_task() local
6055 p = class->pick_task(rq); in pick_task()
6056 if (p) in pick_task()
6057 return p; in pick_task()
6063 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
6070 struct task_struct *next, *p, *max = NULL; in pick_next_task() local
6187 p = rq_i->core_pick = pick_task(rq_i); in pick_next_task()
6188 if (!max || prio_less(max, p, fi_before)) in pick_next_task()
6189 max = p; in pick_next_task()
6200 p = rq_i->core_pick; in pick_next_task()
6202 if (!cookie_equals(p, cookie)) { in pick_next_task()
6203 p = NULL; in pick_next_task()
6205 p = sched_core_find(rq_i, cookie); in pick_next_task()
6206 if (!p) in pick_next_task()
6207 p = idle_sched_class.pick_task(rq_i); in pick_next_task()
6210 rq_i->core_pick = p; in pick_next_task()
6212 if (p == rq_i->idle) { in pick_next_task()
6297 struct task_struct *p; in try_steal_cookie() local
6311 p = sched_core_find(src, cookie); in try_steal_cookie()
6312 if (!p) in try_steal_cookie()
6316 if (p == src->core_pick || p == src->curr) in try_steal_cookie()
6319 if (!is_cpu_allowed(p, this)) in try_steal_cookie()
6322 if (p->core_occupation > dst->idle->core_occupation) in try_steal_cookie()
6330 if (sched_task_is_throttled(p, this)) in try_steal_cookie()
6333 deactivate_task(src, p, 0); in try_steal_cookie()
6334 set_task_cpu(p, this); in try_steal_cookie()
6335 activate_task(dst, p, 0); in try_steal_cookie()
6343 p = sched_core_next(p, cookie); in try_steal_cookie()
6344 } while (p); in try_steal_cookie()
7025 static void __setscheduler_prio(struct task_struct *p, int prio) in __setscheduler_prio() argument
7028 p->sched_class = &dl_sched_class; in __setscheduler_prio()
7030 p->sched_class = &rt_sched_class; in __setscheduler_prio()
7032 p->sched_class = &fair_sched_class; in __setscheduler_prio()
7034 p->prio = prio; in __setscheduler_prio()
7047 static inline int rt_effective_prio(struct task_struct *p, int prio) in rt_effective_prio() argument
7049 struct task_struct *pi_task = rt_mutex_get_top_task(p); in rt_effective_prio()
7065 void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) in rt_mutex_setprio() argument
7074 prio = __rt_effective_prio(pi_task, p->normal_prio); in rt_mutex_setprio()
7079 if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7082 rq = __task_rq_lock(p, &rf); in rt_mutex_setprio()
7094 p->pi_top_task = pi_task; in rt_mutex_setprio()
7099 if (prio == p->prio && !dl_prio(prio)) in rt_mutex_setprio()
7114 if (unlikely(p == rq->idle)) { in rt_mutex_setprio()
7115 WARN_ON(p != rq->curr); in rt_mutex_setprio()
7116 WARN_ON(p->pi_blocked_on); in rt_mutex_setprio()
7120 trace_sched_pi_setprio(p, pi_task); in rt_mutex_setprio()
7121 oldprio = p->prio; in rt_mutex_setprio()
7126 prev_class = p->sched_class; in rt_mutex_setprio()
7127 queued = task_on_rq_queued(p); in rt_mutex_setprio()
7128 running = task_current(rq, p); in rt_mutex_setprio()
7130 dequeue_task(rq, p, queue_flag); in rt_mutex_setprio()
7132 put_prev_task(rq, p); in rt_mutex_setprio()
7144 if (!dl_prio(p->normal_prio) || in rt_mutex_setprio()
7146 dl_entity_preempt(&pi_task->dl, &p->dl))) { in rt_mutex_setprio()
7147 p->dl.pi_se = pi_task->dl.pi_se; in rt_mutex_setprio()
7150 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7154 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7159 p->dl.pi_se = &p->dl; in rt_mutex_setprio()
7161 p->rt.timeout = 0; in rt_mutex_setprio()
7164 __setscheduler_prio(p, prio); in rt_mutex_setprio()
7167 enqueue_task(rq, p, queue_flag); in rt_mutex_setprio()
7169 set_next_task(rq, p); in rt_mutex_setprio()
7171 check_class_changed(rq, p, prev_class, oldprio); in rt_mutex_setprio()
7183 static inline int rt_effective_prio(struct task_struct *p, int prio) in rt_effective_prio() argument
7189 void set_user_nice(struct task_struct *p, long nice) in set_user_nice() argument
7196 if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) in set_user_nice()
7202 rq = task_rq_lock(p, &rf); in set_user_nice()
7211 if (task_has_dl_policy(p) || task_has_rt_policy(p)) { in set_user_nice()
7212 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
7215 queued = task_on_rq_queued(p); in set_user_nice()
7216 running = task_current(rq, p); in set_user_nice()
7218 dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); in set_user_nice()
7220 put_prev_task(rq, p); in set_user_nice()
7222 p->static_prio = NICE_TO_PRIO(nice); in set_user_nice()
7223 set_load_weight(p, true); in set_user_nice()
7224 old_prio = p->prio; in set_user_nice()
7225 p->prio = effective_prio(p); in set_user_nice()
7228 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in set_user_nice()
7230 set_next_task(rq, p); in set_user_nice()
7236 p->sched_class->prio_changed(rq, p, old_prio); in set_user_nice()
7239 task_rq_unlock(rq, p, &rf); in set_user_nice()
7251 static bool is_nice_reduction(const struct task_struct *p, const int nice) in is_nice_reduction() argument
7256 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); in is_nice_reduction()
7264 int can_nice(const struct task_struct *p, const int nice) in can_nice() argument
7266 return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); in can_nice()
7316 int task_prio(const struct task_struct *p) in task_prio() argument
7318 return p->prio - MAX_RT_PRIO; in task_prio()
7409 struct task_struct *p) in effective_cpu_util() argument
7444 util = uclamp_rq_util_with(rq, util, p); in effective_cpu_util()
7518 static void __setscheduler_params(struct task_struct *p, in __setscheduler_params() argument
7524 policy = p->policy; in __setscheduler_params()
7526 p->policy = policy; in __setscheduler_params()
7529 __setparam_dl(p, attr); in __setscheduler_params()
7531 p->static_prio = NICE_TO_PRIO(attr->sched_nice); in __setscheduler_params()
7538 p->rt_priority = attr->sched_priority; in __setscheduler_params()
7539 p->normal_prio = normal_prio(p); in __setscheduler_params()
7540 set_load_weight(p, true); in __setscheduler_params()
7546 static bool check_same_owner(struct task_struct *p) in check_same_owner() argument
7552 pcred = __task_cred(p); in check_same_owner()
7564 static int user_check_sched_setscheduler(struct task_struct *p, in user_check_sched_setscheduler() argument
7569 if (attr->sched_nice < task_nice(p) && in user_check_sched_setscheduler()
7570 !is_nice_reduction(p, attr->sched_nice)) in user_check_sched_setscheduler()
7575 unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); in user_check_sched_setscheduler()
7578 if (policy != p->policy && !rlim_rtprio) in user_check_sched_setscheduler()
7582 if (attr->sched_priority > p->rt_priority && in user_check_sched_setscheduler()
7600 if (task_has_idle_policy(p) && !idle_policy(policy)) { in user_check_sched_setscheduler()
7601 if (!is_nice_reduction(p, task_nice(p))) in user_check_sched_setscheduler()
7606 if (!check_same_owner(p)) in user_check_sched_setscheduler()
7610 if (p->sched_reset_on_fork && !reset_on_fork) in user_check_sched_setscheduler()
7622 static int __sched_setscheduler(struct task_struct *p, in __sched_setscheduler() argument
7641 reset_on_fork = p->sched_reset_on_fork; in __sched_setscheduler()
7642 policy = oldpolicy = p->policy; in __sched_setscheduler()
7665 retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); in __sched_setscheduler()
7672 retval = security_task_setscheduler(p); in __sched_setscheduler()
7679 retval = uclamp_validate(p, attr); in __sched_setscheduler()
7688 if (dl_policy(policy) || dl_policy(p->policy)) { in __sched_setscheduler()
7700 rq = task_rq_lock(p, &rf); in __sched_setscheduler()
7706 if (p == rq->stop) { in __sched_setscheduler()
7715 if (unlikely(policy == p->policy)) { in __sched_setscheduler()
7716 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) in __sched_setscheduler()
7718 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) in __sched_setscheduler()
7720 if (dl_policy(policy) && dl_param_changed(p, attr)) in __sched_setscheduler()
7725 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
7738 task_group(p)->rt_bandwidth.rt_runtime == 0 && in __sched_setscheduler()
7739 !task_group_is_autogroup(task_group(p))) { in __sched_setscheduler()
7754 if (!cpumask_subset(span, p->cpus_ptr) || in __sched_setscheduler()
7764 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { in __sched_setscheduler()
7766 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7777 if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { in __sched_setscheduler()
7782 p->sched_reset_on_fork = reset_on_fork; in __sched_setscheduler()
7783 oldprio = p->prio; in __sched_setscheduler()
7794 newprio = rt_effective_prio(p, newprio); in __sched_setscheduler()
7799 queued = task_on_rq_queued(p); in __sched_setscheduler()
7800 running = task_current(rq, p); in __sched_setscheduler()
7802 dequeue_task(rq, p, queue_flags); in __sched_setscheduler()
7804 put_prev_task(rq, p); in __sched_setscheduler()
7806 prev_class = p->sched_class; in __sched_setscheduler()
7809 __setscheduler_params(p, attr); in __sched_setscheduler()
7810 __setscheduler_prio(p, newprio); in __sched_setscheduler()
7812 __setscheduler_uclamp(p, attr); in __sched_setscheduler()
7819 if (oldprio < p->prio) in __sched_setscheduler()
7822 enqueue_task(rq, p, queue_flags); in __sched_setscheduler()
7825 set_next_task(rq, p); in __sched_setscheduler()
7827 check_class_changed(rq, p, prev_class, oldprio); in __sched_setscheduler()
7832 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7837 rt_mutex_adjust_pi(p); in __sched_setscheduler()
7847 task_rq_unlock(rq, p, &rf); in __sched_setscheduler()
7853 static int _sched_setscheduler(struct task_struct *p, int policy, in _sched_setscheduler() argument
7859 .sched_nice = PRIO_TO_NICE(p->static_prio), in _sched_setscheduler()
7869 return __sched_setscheduler(p, &attr, check, true); in _sched_setscheduler()
7883 int sched_setscheduler(struct task_struct *p, int policy, in sched_setscheduler() argument
7886 return _sched_setscheduler(p, policy, param, true); in sched_setscheduler()
7889 int sched_setattr(struct task_struct *p, const struct sched_attr *attr) in sched_setattr() argument
7891 return __sched_setscheduler(p, attr, true, true); in sched_setattr()
7894 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) in sched_setattr_nocheck() argument
7896 return __sched_setscheduler(p, attr, false, true); in sched_setattr_nocheck()
7913 int sched_setscheduler_nocheck(struct task_struct *p, int policy, in sched_setscheduler_nocheck() argument
7916 return _sched_setscheduler(p, policy, param, false); in sched_setscheduler_nocheck()
7937 void sched_set_fifo(struct task_struct *p) in sched_set_fifo() argument
7940 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo()
7947 void sched_set_fifo_low(struct task_struct *p) in sched_set_fifo_low() argument
7950 WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); in sched_set_fifo_low()
7954 void sched_set_normal(struct task_struct *p, int nice) in sched_set_normal() argument
7960 WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); in sched_set_normal()
7968 struct task_struct *p; in do_sched_setscheduler() local
7978 p = find_process_by_pid(pid); in do_sched_setscheduler()
7979 if (likely(p)) in do_sched_setscheduler()
7980 get_task_struct(p); in do_sched_setscheduler()
7983 if (likely(p)) { in do_sched_setscheduler()
7984 retval = sched_setscheduler(p, policy, &lparam); in do_sched_setscheduler()
7985 put_task_struct(p); in do_sched_setscheduler()
8036 static void get_params(struct task_struct *p, struct sched_attr *attr) in get_params() argument
8038 if (task_has_dl_policy(p)) in get_params()
8039 __getparam_dl(p, attr); in get_params()
8040 else if (task_has_rt_policy(p)) in get_params()
8041 attr->sched_priority = p->rt_priority; in get_params()
8043 attr->sched_nice = task_nice(p); in get_params()
8084 struct task_struct *p; in SYSCALL_DEFINE3() local
8101 p = find_process_by_pid(pid); in SYSCALL_DEFINE3()
8102 if (likely(p)) in SYSCALL_DEFINE3()
8103 get_task_struct(p); in SYSCALL_DEFINE3()
8106 if (likely(p)) { in SYSCALL_DEFINE3()
8108 get_params(p, &attr); in SYSCALL_DEFINE3()
8109 retval = sched_setattr(p, &attr); in SYSCALL_DEFINE3()
8110 put_task_struct(p); in SYSCALL_DEFINE3()
8125 struct task_struct *p; in SYSCALL_DEFINE1() local
8133 p = find_process_by_pid(pid); in SYSCALL_DEFINE1()
8134 if (p) { in SYSCALL_DEFINE1()
8135 retval = security_task_getscheduler(p); in SYSCALL_DEFINE1()
8137 retval = p->policy in SYSCALL_DEFINE1()
8138 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); in SYSCALL_DEFINE1()
8155 struct task_struct *p; in SYSCALL_DEFINE2() local
8162 p = find_process_by_pid(pid); in SYSCALL_DEFINE2()
8164 if (!p) in SYSCALL_DEFINE2()
8167 retval = security_task_getscheduler(p); in SYSCALL_DEFINE2()
8171 if (task_has_rt_policy(p)) in SYSCALL_DEFINE2()
8172 lp.sched_priority = p->rt_priority; in SYSCALL_DEFINE2()
8237 struct task_struct *p; in SYSCALL_DEFINE4() local
8245 p = find_process_by_pid(pid); in SYSCALL_DEFINE4()
8247 if (!p) in SYSCALL_DEFINE4()
8250 retval = security_task_getscheduler(p); in SYSCALL_DEFINE4()
8254 kattr.sched_policy = p->policy; in SYSCALL_DEFINE4()
8255 if (p->sched_reset_on_fork) in SYSCALL_DEFINE4()
8257 get_params(p, &kattr); in SYSCALL_DEFINE4()
8266 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; in SYSCALL_DEFINE4()
8267 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; in SYSCALL_DEFINE4()
8280 int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) in dl_task_check_affinity() argument
8288 if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) in dl_task_check_affinity()
8298 if (!cpumask_subset(task_rq(p)->rd->span, mask)) in dl_task_check_affinity()
8306 __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) in __sched_setaffinity() argument
8319 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
8325 retval = dl_task_check_affinity(p, new_mask); in __sched_setaffinity()
8329 retval = __set_cpus_allowed_ptr(p, ctx); in __sched_setaffinity()
8333 cpuset_cpus_allowed(p, cpus_allowed); in __sched_setaffinity()
8356 __set_cpus_allowed_ptr(p, ctx); in __sched_setaffinity()
8371 struct task_struct *p; in sched_setaffinity() local
8376 p = find_process_by_pid(pid); in sched_setaffinity()
8377 if (!p) { in sched_setaffinity()
8383 get_task_struct(p); in sched_setaffinity()
8386 if (p->flags & PF_NO_SETAFFINITY) { in sched_setaffinity()
8391 if (!check_same_owner(p)) { in sched_setaffinity()
8393 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { in sched_setaffinity()
8401 retval = security_task_setscheduler(p); in sched_setaffinity()
8423 retval = __sched_setaffinity(p, &ac); in sched_setaffinity()
8427 put_task_struct(p); in sched_setaffinity()
8468 struct task_struct *p; in sched_getaffinity() local
8475 p = find_process_by_pid(pid); in sched_getaffinity()
8476 if (!p) in sched_getaffinity()
8479 retval = security_task_getscheduler(p); in sched_getaffinity()
8483 raw_spin_lock_irqsave(&p->pi_lock, flags); in sched_getaffinity()
8484 cpumask_and(mask, &p->cpus_mask, cpu_active_mask); in sched_getaffinity()
8485 raw_spin_unlock_irqrestore(&p->pi_lock, flags); in sched_getaffinity()
8933 int __sched yield_to(struct task_struct *p, bool preempt) in yield_to() argument
8944 p_rq = task_rq(p); in yield_to()
8955 if (task_rq(p) != p_rq) { in yield_to()
8963 if (curr->sched_class != p->sched_class) in yield_to()
8966 if (task_on_cpu(p_rq, p) || !task_is_running(p)) in yield_to()
8969 yielded = curr->sched_class->yield_to_task(rq, p); in yield_to()
9088 struct task_struct *p; in sched_rr_get_interval() local
9099 p = find_process_by_pid(pid); in sched_rr_get_interval()
9100 if (!p) in sched_rr_get_interval()
9103 retval = security_task_getscheduler(p); in sched_rr_get_interval()
9107 rq = task_rq_lock(p, &rf); in sched_rr_get_interval()
9109 if (p->sched_class->get_rr_interval) in sched_rr_get_interval()
9110 time_slice = p->sched_class->get_rr_interval(rq, p); in sched_rr_get_interval()
9111 task_rq_unlock(rq, p, &rf); in sched_rr_get_interval()
9158 void sched_show_task(struct task_struct *p) in sched_show_task() argument
9163 if (!try_get_task_stack(p)) in sched_show_task()
9166 pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); in sched_show_task()
9168 if (task_is_running(p)) in sched_show_task()
9171 free = stack_not_used(p); in sched_show_task()
9175 if (pid_alive(p)) in sched_show_task()
9176 ppid = task_pid_nr(rcu_dereference(p->real_parent)); in sched_show_task()
9179 free, task_pid_nr(p), ppid, in sched_show_task()
9180 read_task_thread_flags(p)); in sched_show_task()
9182 print_worker_info(KERN_INFO, p); in sched_show_task()
9183 print_stop_info(KERN_INFO, p); in sched_show_task()
9184 show_stack(p, NULL, KERN_INFO); in sched_show_task()
9185 put_task_stack(p); in sched_show_task()
9190 state_filter_match(unsigned long state_filter, struct task_struct *p) in state_filter_match() argument
9192 unsigned int state = READ_ONCE(p->__state); in state_filter_match()
9215 struct task_struct *g, *p; in show_state_filter() local
9218 for_each_process_thread(g, p) { in show_state_filter()
9228 if (state_filter_match(state_filter, p)) in show_state_filter()
9229 sched_show_task(p); in show_state_filter()
9338 int task_can_attach(struct task_struct *p) in task_can_attach() argument
9351 if (p->flags & PF_NO_SETAFFINITY) in task_can_attach()
9361 int migrate_task_to(struct task_struct *p, int target_cpu) in migrate_task_to() argument
9363 struct migration_arg arg = { p, target_cpu }; in migrate_task_to()
9364 int curr_cpu = task_cpu(p); in migrate_task_to()
9369 if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) in migrate_task_to()
9374 trace_sched_move_numa(p, curr_cpu, target_cpu); in migrate_task_to()
9382 void sched_setnuma(struct task_struct *p, int nid) in sched_setnuma() argument
9388 rq = task_rq_lock(p, &rf); in sched_setnuma()
9389 queued = task_on_rq_queued(p); in sched_setnuma()
9390 running = task_current(rq, p); in sched_setnuma()
9393 dequeue_task(rq, p, DEQUEUE_SAVE); in sched_setnuma()
9395 put_prev_task(rq, p); in sched_setnuma()
9397 p->numa_preferred_nid = nid; in sched_setnuma()
9400 enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); in sched_setnuma()
9402 set_next_task(rq, p); in sched_setnuma()
9403 task_rq_unlock(rq, p, &rf); in sched_setnuma()
9429 struct task_struct *p = arg; in __balance_push_cpu_stop() local
9434 raw_spin_lock_irq(&p->pi_lock); in __balance_push_cpu_stop()
9439 if (task_rq(p) == rq && task_on_rq_queued(p)) { in __balance_push_cpu_stop()
9440 cpu = select_fallback_rq(rq->cpu, p); in __balance_push_cpu_stop()
9441 rq = __migrate_task(rq, &rf, p, cpu); in __balance_push_cpu_stop()
9445 raw_spin_unlock_irq(&p->pi_lock); in __balance_push_cpu_stop()
9447 put_task_struct(p); in __balance_push_cpu_stop()
9841 struct task_struct *g, *p; in dump_rq_tasks() local
9847 for_each_process_thread(g, p) { in dump_rq_tasks()
9848 if (task_cpu(p) != cpu) in dump_rq_tasks()
9851 if (!task_on_rq_queued(p)) in dump_rq_tasks()
9854 printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); in dump_rq_tasks()
10287 struct task_struct *g, *p; in normalize_rt_tasks() local
10293 for_each_process_thread(g, p) { in normalize_rt_tasks()
10297 if (p->flags & PF_KTHREAD) in normalize_rt_tasks()
10300 p->se.exec_start = 0; in normalize_rt_tasks()
10301 schedstat_set(p->stats.wait_start, 0); in normalize_rt_tasks()
10302 schedstat_set(p->stats.sleep_start, 0); in normalize_rt_tasks()
10303 schedstat_set(p->stats.block_start, 0); in normalize_rt_tasks()
10305 if (!dl_task(p) && !rt_task(p)) { in normalize_rt_tasks()
10310 if (task_nice(p) < 0) in normalize_rt_tasks()
10311 set_user_nice(p, 0); in normalize_rt_tasks()
10315 __sched_setscheduler(p, &attr, false, false); in normalize_rt_tasks()
10364 void ia64_set_curr_task(int cpu, struct task_struct *p) in ia64_set_curr_task() argument
10366 cpu_curr(cpu) = p; in ia64_set_curr_task()