core.c (b607a3886e61a1e0e42f8d8b3a38c3b39a6b904d) core.c (b2f7d7507982ed56d301ba0f75f040fd1e930214)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */

--- 2200 unchanged lines hidden (view full) ---

2209 if (prev_class->switched_from)
2210 prev_class->switched_from(rq, p);
2211
2212 p->sched_class->switched_to(rq, p);
2213 } else if (oldprio != p->prio || dl_task(p))
2214 p->sched_class->prio_changed(rq, p, oldprio);
2215}
2216
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/sched/core.c
4 *
5 * Core kernel scheduler code and related syscalls
6 *
7 * Copyright (C) 1991-2002 Linus Torvalds
8 */

--- 2200 unchanged lines hidden (view full) ---

2209 if (prev_class->switched_from)
2210 prev_class->switched_from(rq, p);
2211
2212 p->sched_class->switched_to(rq, p);
2213 } else if (oldprio != p->prio || dl_task(p))
2214 p->sched_class->prio_changed(rq, p, oldprio);
2215}
2216
2217void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2217void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
2218{
2219 if (p->sched_class == rq->curr->sched_class)
2218{
2219 if (p->sched_class == rq->curr->sched_class)
2220 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2220 rq->curr->sched_class->wakeup_preempt(rq, p, flags);
2221 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2222 resched_curr(rq);
2223
2224 /*
2225 * A queue event has occurred, and we're going to schedule. In
2226 * this case, we can save a useless back to back clock update.
2227 */
2228 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))

--- 289 unchanged lines hidden (view full) ---

2518 set_task_cpu(p, new_cpu);
2519 rq_unlock(rq, rf);
2520
2521 rq = cpu_rq(new_cpu);
2522
2523 rq_lock(rq, rf);
2524 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2525 activate_task(rq, p, 0);
2221 else if (sched_class_above(p->sched_class, rq->curr->sched_class))
2222 resched_curr(rq);
2223
2224 /*
2225 * A queue event has occurred, and we're going to schedule. In
2226 * this case, we can save a useless back to back clock update.
2227 */
2228 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))

--- 289 unchanged lines hidden (view full) ---

2518 set_task_cpu(p, new_cpu);
2519 rq_unlock(rq, rf);
2520
2521 rq = cpu_rq(new_cpu);
2522
2523 rq_lock(rq, rf);
2524 WARN_ON_ONCE(task_cpu(p) != new_cpu);
2525 activate_task(rq, p, 0);
2526 check_preempt_curr(rq, p, 0);
2526 wakeup_preempt(rq, p, 0);
2527
2528 return rq;
2529}
2530
2531struct migration_arg {
2532 struct task_struct *task;
2533 int dest_cpu;
2534 struct set_affinity_pending *pending;

--- 869 unchanged lines hidden (view full) ---

3404 dst_rq = cpu_rq(cpu);
3405
3406 rq_pin_lock(src_rq, &srf);
3407 rq_pin_lock(dst_rq, &drf);
3408
3409 deactivate_task(src_rq, p, 0);
3410 set_task_cpu(p, cpu);
3411 activate_task(dst_rq, p, 0);
2527
2528 return rq;
2529}
2530
2531struct migration_arg {
2532 struct task_struct *task;
2533 int dest_cpu;
2534 struct set_affinity_pending *pending;

--- 869 unchanged lines hidden (view full) ---

3404 dst_rq = cpu_rq(cpu);
3405
3406 rq_pin_lock(src_rq, &srf);
3407 rq_pin_lock(dst_rq, &drf);
3408
3409 deactivate_task(src_rq, p, 0);
3410 set_task_cpu(p, cpu);
3411 activate_task(dst_rq, p, 0);
3412 check_preempt_curr(dst_rq, p, 0);
3412 wakeup_preempt(dst_rq, p, 0);
3413
3414 rq_unpin_lock(dst_rq, &drf);
3415 rq_unpin_lock(src_rq, &srf);
3416
3417 } else {
3418 /*
3419 * Task isn't running anymore; make it appear like we migrated
3420 * it before it went to sleep. This means on wakeup we make the

--- 359 unchanged lines hidden (view full) ---

3780 else
3781#endif
3782 if (p->in_iowait) {
3783 delayacct_blkio_end(p);
3784 atomic_dec(&task_rq(p)->nr_iowait);
3785 }
3786
3787 activate_task(rq, p, en_flags);
3413
3414 rq_unpin_lock(dst_rq, &drf);
3415 rq_unpin_lock(src_rq, &srf);
3416
3417 } else {
3418 /*
3419 * Task isn't running anymore; make it appear like we migrated
3420 * it before it went to sleep. This means on wakeup we make the

--- 359 unchanged lines hidden (view full) ---

3780 else
3781#endif
3782 if (p->in_iowait) {
3783 delayacct_blkio_end(p);
3784 atomic_dec(&task_rq(p)->nr_iowait);
3785 }
3786
3787 activate_task(rq, p, en_flags);
3788 check_preempt_curr(rq, p, wake_flags);
3788 wakeup_preempt(rq, p, wake_flags);
3789
3790 ttwu_do_wakeup(p);
3791
3792#ifdef CONFIG_SMP
3793 if (p->sched_class->task_woken) {
3794 /*
3795 * Our task @p is fully woken up and running; so it's safe to
3796 * drop the rq->lock, hereafter rq is only used for statistics.

--- 54 unchanged lines hidden (view full) ---

3851 rq = __task_rq_lock(p, &rf);
3852 if (task_on_rq_queued(p)) {
3853 if (!task_on_cpu(rq, p)) {
3854 /*
3855 * When on_rq && !on_cpu the task is preempted, see if
3856 * it should preempt the task that is current now.
3857 */
3858 update_rq_clock(rq);
3789
3790 ttwu_do_wakeup(p);
3791
3792#ifdef CONFIG_SMP
3793 if (p->sched_class->task_woken) {
3794 /*
3795 * Our task @p is fully woken up and running; so it's safe to
3796 * drop the rq->lock, hereafter rq is only used for statistics.

--- 54 unchanged lines hidden (view full) ---

3851 rq = __task_rq_lock(p, &rf);
3852 if (task_on_rq_queued(p)) {
3853 if (!task_on_cpu(rq, p)) {
3854 /*
3855 * When on_rq && !on_cpu the task is preempted, see if
3856 * it should preempt the task that is current now.
3857 */
3858 update_rq_clock(rq);
3859 check_preempt_curr(rq, p, wake_flags);
3859 wakeup_preempt(rq, p, wake_flags);
3860 }
3861 ttwu_do_wakeup(p);
3862 ret = 1;
3863 }
3864 __task_rq_unlock(rq, &rf);
3865
3866 return ret;
3867}

--- 998 unchanged lines hidden (view full) ---

4866 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4867#endif
4868 rq = __task_rq_lock(p, &rf);
4869 update_rq_clock(rq);
4870 post_init_entity_util_avg(p);
4871
4872 activate_task(rq, p, ENQUEUE_NOCLOCK);
4873 trace_sched_wakeup_new(p);
3860 }
3861 ttwu_do_wakeup(p);
3862 ret = 1;
3863 }
3864 __task_rq_unlock(rq, &rf);
3865
3866 return ret;
3867}

--- 998 unchanged lines hidden (view full) ---

4866 __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK));
4867#endif
4868 rq = __task_rq_lock(p, &rf);
4869 update_rq_clock(rq);
4870 post_init_entity_util_avg(p);
4871
4872 activate_task(rq, p, ENQUEUE_NOCLOCK);
4873 trace_sched_wakeup_new(p);
4874 check_preempt_curr(rq, p, WF_FORK);
4874 wakeup_preempt(rq, p, WF_FORK);
4875#ifdef CONFIG_SMP
4876 if (p->sched_class->task_woken) {
4877 /*
4878 * Nothing relies on rq->lock after this, so it's fine to
4879 * drop it.
4880 */
4881 rq_unpin_lock(rq, &rf);
4882 p->sched_class->task_woken(rq, p);

--- 7253 unchanged lines hidden ---
4875#ifdef CONFIG_SMP
4876 if (p->sched_class->task_woken) {
4877 /*
4878 * Nothing relies on rq->lock after this, so it's fine to
4879 * drop it.
4880 */
4881 rq_unpin_lock(rq, &rf);
4882 p->sched_class->task_woken(rq, p);

--- 7253 unchanged lines hidden ---