Lines Matching refs:p

69 	struct task_struct *p = dl_task_of(dl_se);  in dl_rq_of_se()  local
70 struct rq *rq = task_rq(p); in dl_rq_of_se()
311 static void dl_change_utilization(struct task_struct *p, u64 new_bw) in dl_change_utilization() argument
315 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); in dl_change_utilization()
317 if (task_on_rq_queued(p)) in dl_change_utilization()
320 rq = task_rq(p); in dl_change_utilization()
321 if (p->dl.dl_non_contending) { in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
323 p->dl.dl_non_contending = 0; in dl_change_utilization()
331 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in dl_change_utilization()
332 put_task_struct(p); in dl_change_utilization()
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
392 static void task_non_contending(struct task_struct *p) in task_non_contending() argument
394 struct sched_dl_entity *dl_se = &p->dl; in task_non_contending()
427 if (dl_task(p)) in task_non_contending()
429 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in task_non_contending()
430 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in task_non_contending()
432 if (READ_ONCE(p->__state) == TASK_DEAD) in task_non_contending()
433 sub_rq_bw(&p->dl, &rq->dl); in task_non_contending()
435 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in task_non_contending()
437 __dl_clear_params(p); in task_non_contending()
444 get_task_struct(p); in task_non_contending()
485 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) in is_leftmost() argument
487 struct sched_dl_entity *dl_se = &p->dl; in is_leftmost()
571 struct task_struct *p = dl_task_of(dl_se); in inc_dl_migration() local
573 if (p->nr_cpus_allowed > 1) in inc_dl_migration()
581 struct task_struct *p = dl_task_of(dl_se); in dec_dl_migration() local
583 if (p->nr_cpus_allowed > 1) in dec_dl_migration()
601 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
605 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); in enqueue_pushable_dl_task()
607 leftmost = rb_add_cached(&p->pushable_dl_tasks, in enqueue_pushable_dl_task()
611 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
614 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
620 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) in dequeue_pushable_dl_task()
623 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root); in dequeue_pushable_dl_task()
627 RB_CLEAR_NODE(&p->pushable_dl_tasks); in dequeue_pushable_dl_task()
663 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) in dl_task_offline_migration() argument
668 later_rq = find_lock_later_rq(p, rq); in dl_task_offline_migration()
676 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr); in dl_task_offline_migration()
695 if (p->dl.dl_non_contending || p->dl.dl_throttled) { in dl_task_offline_migration()
702 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
703 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
705 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
706 add_running_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
708 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
709 add_rq_bw(&p->dl, &later_rq->dl); in dl_task_offline_migration()
719 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
724 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span)); in dl_task_offline_migration()
727 set_task_cpu(p, later_rq->cpu); in dl_task_offline_migration()
736 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
741 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
764 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
765 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
766 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
1045 static int start_dl_timer(struct task_struct *p) in start_dl_timer() argument
1047 struct sched_dl_entity *dl_se = &p->dl; in start_dl_timer()
1049 struct rq *rq = task_rq(p); in start_dl_timer()
1083 get_task_struct(p); in start_dl_timer()
1108 struct task_struct *p = dl_task_of(dl_se); in dl_task_timer() local
1112 rq = task_rq_lock(p, &rf); in dl_task_timer()
1118 if (!dl_task(p)) in dl_task_timer()
1152 if (!task_on_rq_queued(p)) { in dl_task_timer()
1164 rq = dl_task_offline_migration(rq, p); in dl_task_timer()
1176 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); in dl_task_timer()
1178 check_preempt_curr_dl(rq, p, 0); in dl_task_timer()
1199 task_rq_unlock(rq, p, &rf); in dl_task_timer()
1205 put_task_struct(p); in dl_task_timer()
1238 struct task_struct *p = dl_task_of(dl_se); in dl_check_constrained_dl() local
1243 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p))) in dl_check_constrained_dl()
1404 struct task_struct *p = dl_task_of(dl_se); in inactive_task_timer() local
1408 rq = task_rq_lock(p, &rf); in inactive_task_timer()
1413 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) { in inactive_task_timer()
1414 struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); in inactive_task_timer()
1416 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) { in inactive_task_timer()
1417 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1418 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl)); in inactive_task_timer()
1423 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in inactive_task_timer()
1425 __dl_clear_params(p); in inactive_task_timer()
1435 task_rq_unlock(rq, p, &rf); in inactive_task_timer()
1436 put_task_struct(p); in inactive_task_timer()
1586 struct task_struct *p = dl_task_of(dl_se); in update_stats_dequeue_dl() local
1594 state = READ_ONCE(p->__state); in update_stats_dequeue_dl()
1596 __schedstat_set(p->stats.sleep_start, in update_stats_dequeue_dl()
1600 __schedstat_set(p->stats.block_start, in update_stats_dequeue_dl()
1661 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_dl() argument
1663 if (is_dl_boosted(&p->dl)) { in enqueue_task_dl()
1676 if (p->dl.dl_throttled) { in enqueue_task_dl()
1682 hrtimer_try_to_cancel(&p->dl.dl_timer); in enqueue_task_dl()
1683 p->dl.dl_throttled = 0; in enqueue_task_dl()
1685 } else if (!dl_prio(p->normal_prio)) { in enqueue_task_dl()
1695 p->dl.dl_throttled = 0; in enqueue_task_dl()
1698 task_pid_nr(p)); in enqueue_task_dl()
1709 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) in enqueue_task_dl()
1710 dl_check_constrained_dl(&p->dl); in enqueue_task_dl()
1712 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) { in enqueue_task_dl()
1713 add_rq_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1714 add_running_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1729 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { in enqueue_task_dl()
1731 task_contending(&p->dl, flags); in enqueue_task_dl()
1737 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl); in enqueue_task_dl()
1739 enqueue_dl_entity(&p->dl, flags); in enqueue_task_dl()
1741 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_dl()
1742 enqueue_pushable_dl_task(rq, p); in enqueue_task_dl()
1745 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in __dequeue_task_dl() argument
1747 update_stats_dequeue_dl(&rq->dl, &p->dl, flags); in __dequeue_task_dl()
1748 dequeue_dl_entity(&p->dl); in __dequeue_task_dl()
1749 dequeue_pushable_dl_task(rq, p); in __dequeue_task_dl()
1752 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_dl() argument
1755 __dequeue_task_dl(rq, p, flags); in dequeue_task_dl()
1757 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) { in dequeue_task_dl()
1758 sub_running_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1759 sub_rq_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1772 task_non_contending(p); in dequeue_task_dl()
1807 static inline bool dl_task_is_earliest_deadline(struct task_struct *p, in dl_task_is_earliest_deadline() argument
1811 dl_time_before(p->dl.deadline, in dl_task_is_earliest_deadline()
1818 select_task_rq_dl(struct task_struct *p, int cpu, int flags) in select_task_rq_dl() argument
1843 !dl_entity_preempt(&p->dl, &curr->dl)) && in select_task_rq_dl()
1844 p->nr_cpus_allowed > 1; in select_task_rq_dl()
1851 select_rq |= !dl_task_fits_capacity(p, cpu); in select_task_rq_dl()
1854 int target = find_later_rq(p); in select_task_rq_dl()
1857 dl_task_is_earliest_deadline(p, cpu_rq(target))) in select_task_rq_dl()
1866 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused) in migrate_task_rq_dl() argument
1871 if (READ_ONCE(p->__state) != TASK_WAKING) in migrate_task_rq_dl()
1874 rq = task_rq(p); in migrate_task_rq_dl()
1881 if (p->dl.dl_non_contending) { in migrate_task_rq_dl()
1883 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1884 p->dl.dl_non_contending = 0; in migrate_task_rq_dl()
1892 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in migrate_task_rq_dl()
1893 put_task_struct(p); in migrate_task_rq_dl()
1895 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1899 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) in check_preempt_equal_dl() argument
1913 if (p->nr_cpus_allowed != 1 && in check_preempt_equal_dl()
1914 cpudl_find(&rq->rd->cpudl, p, NULL)) in check_preempt_equal_dl()
1920 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_dl() argument
1922 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
1942 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, in check_preempt_curr_dl() argument
1945 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1955 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1957 check_preempt_equal_dl(rq, p); in check_preempt_curr_dl()
1962 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1964 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1967 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1972 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) in set_next_task_dl() argument
1974 struct sched_dl_entity *dl_se = &p->dl; in set_next_task_dl()
1977 p->se.exec_start = rq_clock_task(rq); in set_next_task_dl()
1978 if (on_dl_rq(&p->dl)) in set_next_task_dl()
1982 dequeue_pushable_dl_task(rq, p); in set_next_task_dl()
1988 start_hrtick_dl(rq, p); in set_next_task_dl()
2010 struct task_struct *p; in pick_task_dl() local
2017 p = dl_task_of(dl_se); in pick_task_dl()
2019 return p; in pick_task_dl()
2024 struct task_struct *p; in pick_next_task_dl() local
2026 p = pick_task_dl(rq); in pick_next_task_dl()
2027 if (p) in pick_next_task_dl()
2028 set_next_task_dl(rq, p, true); in pick_next_task_dl()
2030 return p; in pick_next_task_dl()
2033 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) in put_prev_task_dl() argument
2035 struct sched_dl_entity *dl_se = &p->dl; in put_prev_task_dl()
2038 if (on_dl_rq(&p->dl)) in put_prev_task_dl()
2044 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) in put_prev_task_dl()
2045 enqueue_pushable_dl_task(rq, p); in put_prev_task_dl()
2056 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) in task_tick_dl() argument
2066 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2067 is_leftmost(p, &rq->dl)) in task_tick_dl()
2068 start_hrtick_dl(rq, p); in task_tick_dl()
2071 static void task_fork_dl(struct task_struct *p) in task_fork_dl() argument
2084 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) in pick_dl_task() argument
2086 if (!task_on_cpu(rq, p) && in pick_dl_task()
2087 cpumask_test_cpu(cpu, &p->cpus_mask)) in pick_dl_task()
2098 struct task_struct *p = NULL; in pick_earliest_pushable_dl_task() local
2108 p = __node_2_pdl(next_node); in pick_earliest_pushable_dl_task()
2110 if (pick_dl_task(rq, p, cpu)) in pick_earliest_pushable_dl_task()
2111 return p; in pick_earliest_pushable_dl_task()
2266 struct task_struct *p; in pick_next_pushable_dl_task() local
2271 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); in pick_next_pushable_dl_task()
2273 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2274 WARN_ON_ONCE(task_current(rq, p)); in pick_next_pushable_dl_task()
2275 WARN_ON_ONCE(p->nr_cpus_allowed <= 1); in pick_next_pushable_dl_task()
2277 WARN_ON_ONCE(!task_on_rq_queued(p)); in pick_next_pushable_dl_task()
2278 WARN_ON_ONCE(!dl_task(p)); in pick_next_pushable_dl_task()
2280 return p; in pick_next_pushable_dl_task()
2376 struct task_struct *p, *push_task; in pull_dl_task() local
2416 p = pick_earliest_pushable_dl_task(src_rq, this_cpu); in pull_dl_task()
2423 if (p && dl_time_before(p->dl.deadline, dmin) && in pull_dl_task()
2424 dl_task_is_earliest_deadline(p, this_rq)) { in pull_dl_task()
2425 WARN_ON(p == src_rq->curr); in pull_dl_task()
2426 WARN_ON(!task_on_rq_queued(p)); in pull_dl_task()
2432 if (dl_time_before(p->dl.deadline, in pull_dl_task()
2436 if (is_migration_disabled(p)) { in pull_dl_task()
2439 deactivate_task(src_rq, p, 0); in pull_dl_task()
2440 set_task_cpu(p, this_cpu); in pull_dl_task()
2441 activate_task(this_rq, p, 0); in pull_dl_task()
2442 dmin = p->dl.deadline; in pull_dl_task()
2469 static void task_woken_dl(struct rq *rq, struct task_struct *p) in task_woken_dl() argument
2471 if (!task_on_cpu(rq, p) && in task_woken_dl()
2473 p->nr_cpus_allowed > 1 && in task_woken_dl()
2476 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2481 static void set_cpus_allowed_dl(struct task_struct *p, in set_cpus_allowed_dl() argument
2487 WARN_ON_ONCE(!dl_task(p)); in set_cpus_allowed_dl()
2489 rq = task_rq(p); in set_cpus_allowed_dl()
2507 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p))); in set_cpus_allowed_dl()
2511 set_cpus_allowed_common(p, ctx); in set_cpus_allowed_dl()
2544 void dl_add_task_root_domain(struct task_struct *p) in dl_add_task_root_domain() argument
2550 raw_spin_lock_irqsave(&p->pi_lock, rf.flags); in dl_add_task_root_domain()
2551 if (!dl_task(p)) { in dl_add_task_root_domain()
2552 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); in dl_add_task_root_domain()
2556 rq = __task_rq_lock(p, &rf); in dl_add_task_root_domain()
2561 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2565 task_rq_unlock(rq, p, &rf); in dl_add_task_root_domain()
2579 static void switched_from_dl(struct rq *rq, struct task_struct *p) in switched_from_dl() argument
2589 if (task_on_rq_queued(p) && p->dl.dl_runtime) in switched_from_dl()
2590 task_non_contending(p); in switched_from_dl()
2596 dec_dl_tasks_cs(p); in switched_from_dl()
2598 if (!task_on_rq_queued(p)) { in switched_from_dl()
2605 if (p->dl.dl_non_contending) in switched_from_dl()
2606 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2607 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2615 if (p->dl.dl_non_contending) in switched_from_dl()
2616 p->dl.dl_non_contending = 0; in switched_from_dl()
2623 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2633 static void switched_to_dl(struct rq *rq, struct task_struct *p) in switched_to_dl() argument
2635 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) in switched_to_dl()
2636 put_task_struct(p); in switched_to_dl()
2642 inc_dl_tasks_cs(p); in switched_to_dl()
2645 if (!task_on_rq_queued(p)) { in switched_to_dl()
2646 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2651 if (rq->curr != p) { in switched_to_dl()
2653 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2657 check_preempt_curr_dl(rq, p, 0); in switched_to_dl()
2669 static void prio_changed_dl(struct rq *rq, struct task_struct *p, in prio_changed_dl() argument
2672 if (!task_on_rq_queued(p)) in prio_changed_dl()
2685 if (task_current(rq, p)) { in prio_changed_dl()
2691 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2701 dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) in prio_changed_dl()
2714 static int task_is_throttled_dl(struct task_struct *p, int cpu) in task_is_throttled_dl() argument
2716 return p->dl.dl_throttled; in task_is_throttled_dl()
2850 int sched_dl_overflow(struct task_struct *p, int policy, in sched_dl_overflow() argument
2856 int cpus, err = -1, cpu = task_cpu(p); in sched_dl_overflow()
2864 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) in sched_dl_overflow()
2876 if (dl_policy(policy) && !task_has_dl_policy(p) && in sched_dl_overflow()
2878 if (hrtimer_active(&p->dl.inactive_timer)) in sched_dl_overflow()
2879 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2882 } else if (dl_policy(policy) && task_has_dl_policy(p) && in sched_dl_overflow()
2883 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) { in sched_dl_overflow()
2891 __dl_sub(dl_b, p->dl.dl_bw, cpus); in sched_dl_overflow()
2893 dl_change_utilization(p, new_bw); in sched_dl_overflow()
2895 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { in sched_dl_overflow()
2916 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr) in __setparam_dl() argument
2918 struct sched_dl_entity *dl_se = &p->dl; in __setparam_dl()
2928 void __getparam_dl(struct task_struct *p, struct sched_attr *attr) in __getparam_dl() argument
2930 struct sched_dl_entity *dl_se = &p->dl; in __getparam_dl()
2932 attr->sched_priority = p->rt_priority; in __getparam_dl()
2998 void __dl_clear_params(struct task_struct *p) in __dl_clear_params() argument
3000 struct sched_dl_entity *dl_se = &p->dl; in __dl_clear_params()
3019 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) in dl_param_changed() argument
3021 struct sched_dl_entity *dl_se = &p->dl; in dl_param_changed()