Lines Matching refs:rq

62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)  in rq_of_dl_rq()
64 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
70 struct rq *rq = task_rq(p); in dl_rq_of_se() local
72 return &rq->dl; in dl_rq_of_se()
177 struct rq *rq = cpu_rq(i); in __dl_update() local
179 rq->dl.extra_bw += bw; in __dl_update()
313 struct rq *rq; in dl_change_utilization() local
320 rq = task_rq(p); in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
335 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization()
397 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local
420 zerolag_time -= rq_clock(rq); in task_non_contending()
433 sub_rq_bw(&p->dl, &rq->dl); in task_non_contending()
526 static inline int dl_overloaded(struct rq *rq) in dl_overloaded() argument
528 return atomic_read(&rq->rd->dlo_count); in dl_overloaded()
531 static inline void dl_set_overload(struct rq *rq) in dl_set_overload() argument
533 if (!rq->online) in dl_set_overload()
536 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); in dl_set_overload()
544 atomic_inc(&rq->rd->dlo_count); in dl_set_overload()
547 static inline void dl_clear_overload(struct rq *rq) in dl_clear_overload() argument
549 if (!rq->online) in dl_clear_overload()
552 atomic_dec(&rq->rd->dlo_count); in dl_clear_overload()
553 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); in dl_clear_overload()
601 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
608 &rq->dl.pushable_dl_tasks_root, in enqueue_pushable_dl_task()
611 rq->dl.earliest_dl.next = p->dl.deadline; in enqueue_pushable_dl_task()
614 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
616 struct dl_rq *dl_rq = &rq->dl; in dequeue_pushable_dl_task()
630 static inline int has_pushable_dl_tasks(struct rq *rq) in has_pushable_dl_tasks() argument
632 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root); in has_pushable_dl_tasks()
635 static int push_dl_task(struct rq *rq);
637 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) in need_pull_dl_task() argument
639 return rq->online && dl_task(prev); in need_pull_dl_task()
645 static void push_dl_tasks(struct rq *);
646 static void pull_dl_task(struct rq *);
648 static inline void deadline_queue_push_tasks(struct rq *rq) in deadline_queue_push_tasks() argument
650 if (!has_pushable_dl_tasks(rq)) in deadline_queue_push_tasks()
653 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks); in deadline_queue_push_tasks()
656 static inline void deadline_queue_pull_task(struct rq *rq) in deadline_queue_pull_task() argument
658 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task); in deadline_queue_pull_task()
661 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
663 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p) in dl_task_offline_migration() argument
665 struct rq *later_rq = NULL; in dl_task_offline_migration()
668 later_rq = find_lock_later_rq(p, rq); in dl_task_offline_migration()
692 double_lock_balance(rq, later_rq); in dl_task_offline_migration()
702 sub_running_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
703 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
708 sub_rq_bw(&p->dl, &rq->dl); in dl_task_offline_migration()
717 dl_b = &rq->rd->dl_bw; in dl_task_offline_migration()
719 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_task_offline_migration()
728 double_unlock_balance(later_rq, rq); in dl_task_offline_migration()
736 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) in enqueue_pushable_dl_task() argument
741 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) in dequeue_pushable_dl_task() argument
755 static inline void deadline_queue_push_tasks(struct rq *rq) in deadline_queue_push_tasks() argument
759 static inline void deadline_queue_pull_task(struct rq *rq) in deadline_queue_pull_task() argument
764 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
765 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
766 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
769 struct rq *rq) in replenish_dl_new_period() argument
772 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; in replenish_dl_new_period()
791 struct rq *rq = rq_of_dl_rq(dl_rq); in setup_new_dl_entity() local
794 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline)); in setup_new_dl_entity()
809 replenish_dl_new_period(dl_se, rq); in setup_new_dl_entity()
833 struct rq *rq = rq_of_dl_rq(dl_rq); in replenish_dl_entity() local
842 replenish_dl_new_period(dl_se, rq); in replenish_dl_entity()
867 if (dl_time_before(dl_se->deadline, rq_clock(rq))) { in replenish_dl_entity()
869 replenish_dl_new_period(dl_se, rq); in replenish_dl_entity()
950 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) in update_dl_revised_wakeup() argument
952 u64 laxity = dl_se->deadline - rq_clock(rq); in update_dl_revised_wakeup()
960 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); in update_dl_revised_wakeup()
1014 struct rq *rq = rq_of_dl_rq(dl_rq); in update_dl_entity() local
1016 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || in update_dl_entity()
1017 dl_entity_overflow(dl_se, rq_clock(rq))) { in update_dl_entity()
1020 !dl_time_before(dl_se->deadline, rq_clock(rq)) && in update_dl_entity()
1022 update_dl_revised_wakeup(dl_se, rq); in update_dl_entity()
1026 replenish_dl_new_period(dl_se, rq); in update_dl_entity()
1049 struct rq *rq = task_rq(p); in start_dl_timer() local
1053 lockdep_assert_rq_held(rq); in start_dl_timer()
1062 delta = ktime_to_ns(now) - rq_clock(rq); in start_dl_timer()
1110 struct rq *rq; in dl_task_timer() local
1112 rq = task_rq_lock(p, &rf); in dl_task_timer()
1136 update_rq_clock(rq); in dl_task_timer()
1158 if (unlikely(!rq->online)) { in dl_task_timer()
1163 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie); in dl_task_timer()
1164 rq = dl_task_offline_migration(rq, p); in dl_task_timer()
1165 rf.cookie = lockdep_pin_lock(__rq_lockp(rq)); in dl_task_timer()
1166 update_rq_clock(rq); in dl_task_timer()
1176 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); in dl_task_timer()
1177 if (dl_task(rq->curr)) in dl_task_timer()
1178 check_preempt_curr_dl(rq, p, 0); in dl_task_timer()
1180 resched_curr(rq); in dl_task_timer()
1187 if (has_pushable_dl_tasks(rq)) { in dl_task_timer()
1192 rq_unpin_lock(rq, &rf); in dl_task_timer()
1193 push_dl_task(rq); in dl_task_timer()
1194 rq_repin_lock(rq, &rf); in dl_task_timer()
1199 task_rq_unlock(rq, p, &rf); in dl_task_timer()
1239 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); in dl_check_constrained_dl() local
1241 if (dl_time_before(dl_se->deadline, rq_clock(rq)) && in dl_check_constrained_dl()
1242 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { in dl_check_constrained_dl()
1274 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) in grub_reclaim() argument
1277 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */ in grub_reclaim()
1285 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw) in grub_reclaim()
1288 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw; in grub_reclaim()
1290 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT; in grub_reclaim()
1298 static void update_curr_dl(struct rq *rq) in update_curr_dl() argument
1300 struct task_struct *curr = rq->curr; in update_curr_dl()
1303 int cpu = cpu_of(rq); in update_curr_dl()
1317 now = rq_clock_task(rq); in update_curr_dl()
1344 rq, in update_curr_dl()
1365 __dequeue_task_dl(rq, curr, 0); in update_curr_dl()
1367 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); in update_curr_dl()
1369 if (!is_leftmost(curr, &rq->dl)) in update_curr_dl()
1370 resched_curr(rq); in update_curr_dl()
1385 struct rt_rq *rt_rq = &rq->rt; in update_curr_dl()
1406 struct rq *rq; in inactive_task_timer() local
1408 rq = task_rq_lock(p, &rf); in inactive_task_timer()
1411 update_rq_clock(rq); in inactive_task_timer()
1432 sub_running_bw(dl_se, &rq->dl); in inactive_task_timer()
1435 task_rq_unlock(rq, p, &rf); in inactive_task_timer()
1456 struct rq *rq = rq_of_dl_rq(dl_rq); in inc_dl_deadline() local
1461 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER); in inc_dl_deadline()
1463 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline); in inc_dl_deadline()
1469 struct rq *rq = rq_of_dl_rq(dl_rq); in dec_dl_deadline() local
1478 cpudl_clear(&rq->rd->cpudl, rq->cpu); in dec_dl_deadline()
1479 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); in dec_dl_deadline()
1485 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline); in dec_dl_deadline()
1661 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_dl() argument
1713 add_rq_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1714 add_running_bw(&p->dl, &rq->dl); in enqueue_task_dl()
1741 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) in enqueue_task_dl()
1742 enqueue_pushable_dl_task(rq, p); in enqueue_task_dl()
1745 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in __dequeue_task_dl() argument
1747 update_stats_dequeue_dl(&rq->dl, &p->dl, flags); in __dequeue_task_dl()
1749 dequeue_pushable_dl_task(rq, p); in __dequeue_task_dl()
1752 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_dl() argument
1754 update_curr_dl(rq); in dequeue_task_dl()
1755 __dequeue_task_dl(rq, p, flags); in dequeue_task_dl()
1758 sub_running_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1759 sub_rq_bw(&p->dl, &rq->dl); in dequeue_task_dl()
1785 static void yield_task_dl(struct rq *rq) in yield_task_dl() argument
1793 rq->curr->dl.dl_yielded = 1; in yield_task_dl()
1795 update_rq_clock(rq); in yield_task_dl()
1796 update_curr_dl(rq); in yield_task_dl()
1802 rq_clock_skip_update(rq); in yield_task_dl()
1808 struct rq *rq) in dl_task_is_earliest_deadline() argument
1810 return (!rq->dl.dl_nr_running || in dl_task_is_earliest_deadline()
1812 rq->dl.earliest_dl.curr)); in dl_task_is_earliest_deadline()
1822 struct rq *rq; in select_task_rq_dl() local
1827 rq = cpu_rq(cpu); in select_task_rq_dl()
1830 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_dl()
1869 struct rq *rq; in migrate_task_rq_dl() local
1874 rq = task_rq(p); in migrate_task_rq_dl()
1880 rq_lock(rq, &rf); in migrate_task_rq_dl()
1882 update_rq_clock(rq); in migrate_task_rq_dl()
1883 sub_running_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1895 sub_rq_bw(&p->dl, &rq->dl); in migrate_task_rq_dl()
1896 rq_unlock(rq, &rf); in migrate_task_rq_dl()
1899 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) in check_preempt_equal_dl() argument
1905 if (rq->curr->nr_cpus_allowed == 1 || in check_preempt_equal_dl()
1906 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL)) in check_preempt_equal_dl()
1914 cpudl_find(&rq->rd->cpudl, p, NULL)) in check_preempt_equal_dl()
1917 resched_curr(rq); in check_preempt_equal_dl()
1920 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) in balance_dl() argument
1922 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) { in balance_dl()
1929 rq_unpin_lock(rq, rf); in balance_dl()
1930 pull_dl_task(rq); in balance_dl()
1931 rq_repin_lock(rq, rf); in balance_dl()
1934 return sched_stop_runnable(rq) || sched_dl_runnable(rq); in balance_dl()
1942 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, in check_preempt_curr_dl() argument
1945 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { in check_preempt_curr_dl()
1946 resched_curr(rq); in check_preempt_curr_dl()
1955 if ((p->dl.deadline == rq->curr->dl.deadline) && in check_preempt_curr_dl()
1956 !test_tsk_need_resched(rq->curr)) in check_preempt_curr_dl()
1957 check_preempt_equal_dl(rq, p); in check_preempt_curr_dl()
1962 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1964 hrtick_start(rq, p->dl.runtime); in start_hrtick_dl()
1967 static void start_hrtick_dl(struct rq *rq, struct task_struct *p) in start_hrtick_dl() argument
1972 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) in set_next_task_dl() argument
1975 struct dl_rq *dl_rq = &rq->dl; in set_next_task_dl()
1977 p->se.exec_start = rq_clock_task(rq); in set_next_task_dl()
1982 dequeue_pushable_dl_task(rq, p); in set_next_task_dl()
1987 if (hrtick_enabled_dl(rq)) in set_next_task_dl()
1988 start_hrtick_dl(rq, p); in set_next_task_dl()
1990 if (rq->curr->sched_class != &dl_sched_class) in set_next_task_dl()
1991 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); in set_next_task_dl()
1993 deadline_queue_push_tasks(rq); in set_next_task_dl()
2006 static struct task_struct *pick_task_dl(struct rq *rq) in pick_task_dl() argument
2009 struct dl_rq *dl_rq = &rq->dl; in pick_task_dl()
2012 if (!sched_dl_runnable(rq)) in pick_task_dl()
2022 static struct task_struct *pick_next_task_dl(struct rq *rq) in pick_next_task_dl() argument
2026 p = pick_task_dl(rq); in pick_next_task_dl()
2028 set_next_task_dl(rq, p, true); in pick_next_task_dl()
2033 static void put_prev_task_dl(struct rq *rq, struct task_struct *p) in put_prev_task_dl() argument
2036 struct dl_rq *dl_rq = &rq->dl; in put_prev_task_dl()
2041 update_curr_dl(rq); in put_prev_task_dl()
2043 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); in put_prev_task_dl()
2045 enqueue_pushable_dl_task(rq, p); in put_prev_task_dl()
2056 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) in task_tick_dl() argument
2058 update_curr_dl(rq); in task_tick_dl()
2060 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); in task_tick_dl()
2066 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 && in task_tick_dl()
2067 is_leftmost(p, &rq->dl)) in task_tick_dl()
2068 start_hrtick_dl(rq, p); in task_tick_dl()
2084 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) in pick_dl_task() argument
2086 if (!task_on_cpu(rq, p) && in pick_dl_task()
2096 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) in pick_earliest_pushable_dl_task() argument
2101 if (!has_pushable_dl_tasks(rq)) in pick_earliest_pushable_dl_task()
2104 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); in pick_earliest_pushable_dl_task()
2110 if (pick_dl_task(rq, p, cpu)) in pick_earliest_pushable_dl_task()
2210 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) in find_lock_later_rq() argument
2212 struct rq *later_rq = NULL; in find_lock_later_rq()
2219 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_later_rq()
2235 if (double_lock_balance(rq, later_rq)) { in find_lock_later_rq()
2236 if (unlikely(task_rq(task) != rq || in find_lock_later_rq()
2238 task_on_cpu(rq, task) || in find_lock_later_rq()
2242 double_unlock_balance(rq, later_rq); in find_lock_later_rq()
2257 double_unlock_balance(rq, later_rq); in find_lock_later_rq()
2264 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) in pick_next_pushable_dl_task() argument
2268 if (!has_pushable_dl_tasks(rq)) in pick_next_pushable_dl_task()
2271 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root)); in pick_next_pushable_dl_task()
2273 WARN_ON_ONCE(rq->cpu != task_cpu(p)); in pick_next_pushable_dl_task()
2274 WARN_ON_ONCE(task_current(rq, p)); in pick_next_pushable_dl_task()
2288 static int push_dl_task(struct rq *rq) in push_dl_task() argument
2291 struct rq *later_rq; in push_dl_task()
2294 if (!rq->dl.overloaded) in push_dl_task()
2297 next_task = pick_next_pushable_dl_task(rq); in push_dl_task()
2307 if (dl_task(rq->curr) && in push_dl_task()
2308 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && in push_dl_task()
2309 rq->curr->nr_cpus_allowed > 1) { in push_dl_task()
2310 resched_curr(rq); in push_dl_task()
2317 if (WARN_ON(next_task == rq->curr)) in push_dl_task()
2324 later_rq = find_lock_later_rq(next_task, rq); in push_dl_task()
2333 task = pick_next_pushable_dl_task(rq); in push_dl_task()
2351 deactivate_task(rq, next_task, 0); in push_dl_task()
2358 double_unlock_balance(rq, later_rq); in push_dl_task()
2366 static void push_dl_tasks(struct rq *rq) in push_dl_tasks() argument
2369 while (push_dl_task(rq)) in push_dl_tasks()
2373 static void pull_dl_task(struct rq *this_rq) in pull_dl_task()
2378 struct rq *src_rq; in pull_dl_task()
2469 static void task_woken_dl(struct rq *rq, struct task_struct *p) in task_woken_dl() argument
2471 if (!task_on_cpu(rq, p) && in task_woken_dl()
2472 !test_tsk_need_resched(rq->curr) && in task_woken_dl()
2474 dl_task(rq->curr) && in task_woken_dl()
2475 (rq->curr->nr_cpus_allowed < 2 || in task_woken_dl()
2476 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { in task_woken_dl()
2477 push_dl_tasks(rq); in task_woken_dl()
2485 struct rq *rq; in set_cpus_allowed_dl() local
2489 rq = task_rq(p); in set_cpus_allowed_dl()
2490 src_rd = rq->rd; in set_cpus_allowed_dl()
2500 src_dl_b = dl_bw_of(cpu_of(rq)); in set_cpus_allowed_dl()
2515 static void rq_online_dl(struct rq *rq) in rq_online_dl() argument
2517 if (rq->dl.overloaded) in rq_online_dl()
2518 dl_set_overload(rq); in rq_online_dl()
2520 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu); in rq_online_dl()
2521 if (rq->dl.dl_nr_running > 0) in rq_online_dl()
2522 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr); in rq_online_dl()
2526 static void rq_offline_dl(struct rq *rq) in rq_offline_dl() argument
2528 if (rq->dl.overloaded) in rq_offline_dl()
2529 dl_clear_overload(rq); in rq_offline_dl()
2531 cpudl_clear(&rq->rd->cpudl, rq->cpu); in rq_offline_dl()
2532 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu); in rq_offline_dl()
2547 struct rq *rq; in dl_add_task_root_domain() local
2556 rq = __task_rq_lock(p, &rf); in dl_add_task_root_domain()
2558 dl_b = &rq->rd->dl_bw; in dl_add_task_root_domain()
2561 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span)); in dl_add_task_root_domain()
2565 task_rq_unlock(rq, p, &rf); in dl_add_task_root_domain()
2579 static void switched_from_dl(struct rq *rq, struct task_struct *p) in switched_from_dl() argument
2606 sub_running_bw(&p->dl, &rq->dl); in switched_from_dl()
2607 sub_rq_bw(&p->dl, &rq->dl); in switched_from_dl()
2623 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running) in switched_from_dl()
2626 deadline_queue_pull_task(rq); in switched_from_dl()
2633 static void switched_to_dl(struct rq *rq, struct task_struct *p) in switched_to_dl() argument
2646 add_rq_bw(&p->dl, &rq->dl); in switched_to_dl()
2651 if (rq->curr != p) { in switched_to_dl()
2653 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) in switched_to_dl()
2654 deadline_queue_push_tasks(rq); in switched_to_dl()
2656 if (dl_task(rq->curr)) in switched_to_dl()
2657 check_preempt_curr_dl(rq, p, 0); in switched_to_dl()
2659 resched_curr(rq); in switched_to_dl()
2661 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); in switched_to_dl()
2669 static void prio_changed_dl(struct rq *rq, struct task_struct *p, in prio_changed_dl() argument
2682 if (!rq->dl.overloaded) in prio_changed_dl()
2683 deadline_queue_pull_task(rq); in prio_changed_dl()
2685 if (task_current(rq, p)) { in prio_changed_dl()
2691 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) in prio_changed_dl()
2692 resched_curr(rq); in prio_changed_dl()
2700 if (!dl_task(rq->curr) || in prio_changed_dl()
2701 dl_time_before(p->dl.deadline, rq->curr->dl.deadline)) in prio_changed_dl()
2702 resched_curr(rq); in prio_changed_dl()
2709 resched_curr(rq); in prio_changed_dl()