deadline.c (b97d6790d03b763eca08847a9a5869a4291b9f9a) deadline.c (b2f7d7507982ed56d301ba0f75f040fd1e930214)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.

--- 749 unchanged lines hidden (view full) ---

758
759static inline void deadline_queue_pull_task(struct rq *rq)
760{
761}
762#endif /* CONFIG_SMP */
763
764static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
765static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.

--- 749 unchanged lines hidden (view full) ---

758
759static inline void deadline_queue_pull_task(struct rq *rq)
760{
761}
762#endif /* CONFIG_SMP */
763
764static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
765static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
766static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
766static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
767
768static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
769 struct rq *rq)
770{
771 /* for non-boosted task, pi_of(dl_se) == dl_se */
772 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
773 dl_se->runtime = pi_of(dl_se)->dl_runtime;
774}

--- 395 unchanged lines hidden (view full) ---

1170 * have that locked, proceed as normal and enqueue the task
1171 * there.
1172 */
1173 }
1174#endif
1175
1176 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1177 if (dl_task(rq->curr))
767
768static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
769 struct rq *rq)
770{
771 /* for non-boosted task, pi_of(dl_se) == dl_se */
772 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
773 dl_se->runtime = pi_of(dl_se)->dl_runtime;
774}

--- 395 unchanged lines hidden (view full) ---

1170 * have that locked, proceed as normal and enqueue the task
1171 * there.
1172 */
1173 }
1174#endif
1175
1176 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1177 if (dl_task(rq->curr))
1178 check_preempt_curr_dl(rq, p, 0);
1178 wakeup_preempt_dl(rq, p, 0);
1179 else
1180 resched_curr(rq);
1181
1182#ifdef CONFIG_SMP
1183 /*
1184 * Queueing this task back might have overloaded rq, check if we need
1185 * to kick someone away.
1186 */

--- 747 unchanged lines hidden (view full) ---

1934 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1935}
1936#endif /* CONFIG_SMP */
1937
1938/*
1939 * Only called when both the current and waking task are -deadline
1940 * tasks.
1941 */
1179 else
1180 resched_curr(rq);
1181
1182#ifdef CONFIG_SMP
1183 /*
1184 * Queueing this task back might have overloaded rq, check if we need
1185 * to kick someone away.
1186 */

--- 747 unchanged lines hidden (view full) ---

1934 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1935}
1936#endif /* CONFIG_SMP */
1937
1938/*
1939 * Only called when both the current and waking task are -deadline
1940 * tasks.
1941 */
1942static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1942static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
1943 int flags)
1944{
1945 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1946 resched_curr(rq);
1947 return;
1948 }
1949
1950#ifdef CONFIG_SMP

--- 698 unchanged lines hidden (view full) ---

2649 }
2650
2651 if (rq->curr != p) {
2652#ifdef CONFIG_SMP
2653 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2654 deadline_queue_push_tasks(rq);
2655#endif
2656 if (dl_task(rq->curr))
1943 int flags)
1944{
1945 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
1946 resched_curr(rq);
1947 return;
1948 }
1949
1950#ifdef CONFIG_SMP

--- 698 unchanged lines hidden (view full) ---

2649 }
2650
2651 if (rq->curr != p) {
2652#ifdef CONFIG_SMP
2653 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
2654 deadline_queue_push_tasks(rq);
2655#endif
2656 if (dl_task(rq->curr))
2657 check_preempt_curr_dl(rq, p, 0);
2657 wakeup_preempt_dl(rq, p, 0);
2658 else
2659 resched_curr(rq);
2660 } else {
2661 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2662 }
2663}
2664
2665/*

--- 52 unchanged lines hidden (view full) ---

2718#endif
2719
2720DEFINE_SCHED_CLASS(dl) = {
2721
2722 .enqueue_task = enqueue_task_dl,
2723 .dequeue_task = dequeue_task_dl,
2724 .yield_task = yield_task_dl,
2725
2658 else
2659 resched_curr(rq);
2660 } else {
2661 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2662 }
2663}
2664
2665/*

--- 52 unchanged lines hidden (view full) ---

2718#endif
2719
2720DEFINE_SCHED_CLASS(dl) = {
2721
2722 .enqueue_task = enqueue_task_dl,
2723 .dequeue_task = dequeue_task_dl,
2724 .yield_task = yield_task_dl,
2725
2726 .check_preempt_curr = check_preempt_curr_dl,
2726 .wakeup_preempt = wakeup_preempt_dl,
2727
2728 .pick_next_task = pick_next_task_dl,
2729 .put_prev_task = put_prev_task_dl,
2730 .set_next_task = set_next_task_dl,
2731
2732#ifdef CONFIG_SMP
2733 .balance = balance_dl,
2734 .pick_task = pick_task_dl,

--- 379 unchanged lines hidden ---
2727
2728 .pick_next_task = pick_next_task_dl,
2729 .put_prev_task = put_prev_task_dl,
2730 .set_next_task = set_next_task_dl,
2731
2732#ifdef CONFIG_SMP
2733 .balance = balance_dl,
2734 .pick_task = pick_task_dl,

--- 379 unchanged lines hidden ---