rt.c (d121f07691415df824e6b60520f782f6d13b3c81) rt.c (ad0f1d9d65938aec72a698116cd73a980916895e)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4 * policies)
5 */
6
7#include "sched.h"
8

--- 1893 unchanged lines hidden (view full) ---

1902 *
1903 * All CPUs that are scheduling a lower priority task will increment the
1904 * rt_loop_next variable. This will make sure that the irq work iterator
1905 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1906 * priority task, even if the iterator is in the middle of a scan. Incrementing
1907 * the rt_loop_next will cause the iterator to perform another scan.
1908 *
1909 */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4 * policies)
5 */
6
7#include "sched.h"
8

--- 1893 unchanged lines hidden (view full) ---

1902 *
1903 * All CPUs that are scheduling a lower priority task will increment the
1904 * rt_loop_next variable. This will make sure that the irq work iterator
1905 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1906 * priority task, even if the iterator is in the middle of a scan. Incrementing
1907 * the rt_loop_next will cause the iterator to perform another scan.
1908 *
1909 */
1910static int rto_next_cpu(struct rq *rq)
1910static int rto_next_cpu(struct root_domain *rd)
1911{
1911{
1912 struct root_domain *rd = rq->rd;
1913 int next;
1914 int cpu;
1915
1916 /*
1917 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1918 * rt_next_cpu() will simply return the first CPU found in
1919 * the rto_mask.
1920 *

--- 59 unchanged lines hidden (view full) ---

1980
1981 /*
1982 * The rto_cpu is updated under the lock, if it has a valid cpu
1983 * then the IPI is still running and will continue due to the
1984 * update to loop_next, and nothing needs to be done here.
1985 * Otherwise it is finishing up and an ipi needs to be sent.
1986 */
1987 if (rq->rd->rto_cpu < 0)
1912 int next;
1913 int cpu;
1914
1915 /*
1916 * When starting the IPI RT pushing, the rto_cpu is set to -1,
1917 * rt_next_cpu() will simply return the first CPU found in
1918 * the rto_mask.
1919 *

--- 59 unchanged lines hidden (view full) ---

1979
1980 /*
1981 * The rto_cpu is updated under the lock, if it has a valid cpu
1982 * then the IPI is still running and will continue due to the
1983 * update to loop_next, and nothing needs to be done here.
1984 * Otherwise it is finishing up and an ipi needs to be sent.
1985 */
1986 if (rq->rd->rto_cpu < 0)
1988 cpu = rto_next_cpu(rq);
1987 cpu = rto_next_cpu(rq->rd);
1989
1990 raw_spin_unlock(&rq->rd->rto_lock);
1991
1992 rto_start_unlock(&rq->rd->rto_loop_start);
1993
1994 if (cpu >= 0)
1995 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
1996}
1997
1998/* Called from hardirq context */
1999void rto_push_irq_work_func(struct irq_work *work)
2000{
1988
1989 raw_spin_unlock(&rq->rd->rto_lock);
1990
1991 rto_start_unlock(&rq->rd->rto_loop_start);
1992
1993 if (cpu >= 0)
1994 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
1995}
1996
1997/* Called from hardirq context */
1998void rto_push_irq_work_func(struct irq_work *work)
1999{
2000 struct root_domain *rd =
2001 container_of(work, struct root_domain, rto_push_work);
2001 struct rq *rq;
2002 int cpu;
2003
2004 rq = this_rq();
2005
2006 /*
2007 * We do not need to grab the lock to check for has_pushable_tasks.
2008 * When it gets updated, a check is made if a push is possible.
2009 */
2010 if (has_pushable_tasks(rq)) {
2011 raw_spin_lock(&rq->lock);
2012 push_rt_tasks(rq);
2013 raw_spin_unlock(&rq->lock);
2014 }
2015
2002 struct rq *rq;
2003 int cpu;
2004
2005 rq = this_rq();
2006
2007 /*
2008 * We do not need to grab the lock to check for has_pushable_tasks.
2009 * When it gets updated, a check is made if a push is possible.
2010 */
2011 if (has_pushable_tasks(rq)) {
2012 raw_spin_lock(&rq->lock);
2013 push_rt_tasks(rq);
2014 raw_spin_unlock(&rq->lock);
2015 }
2016
2016 raw_spin_lock(&rq->rd->rto_lock);
2017 raw_spin_lock(&rd->rto_lock);
2017
2018 /* Pass the IPI to the next rt overloaded queue */
2018
2019 /* Pass the IPI to the next rt overloaded queue */
2019 cpu = rto_next_cpu(rq);
2020 cpu = rto_next_cpu(rd);
2020
2021
2021 raw_spin_unlock(&rq->rd->rto_lock);
2022 raw_spin_unlock(&rd->rto_lock);
2022
2023 if (cpu < 0)
2024 return;
2025
2026 /* Try the next RT overloaded CPU */
2023
2024 if (cpu < 0)
2025 return;
2026
2027 /* Try the next RT overloaded CPU */
2027 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2028 irq_work_queue_on(&rd->rto_push_work, cpu);
2028}
2029#endif /* HAVE_RT_PUSH_IPI */
2030
2031static void pull_rt_task(struct rq *this_rq)
2032{
2033 int this_cpu = this_rq->cpu, cpu;
2034 bool resched = false;
2035 struct task_struct *p;

--- 171 unchanged lines hidden (view full) ---

2207 * If that current running task is also an RT task
2208 * then see if we can move to another run queue.
2209 */
2210 if (task_on_rq_queued(p) && rq->curr != p) {
2211#ifdef CONFIG_SMP
2212 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2213 queue_push_tasks(rq);
2214#endif /* CONFIG_SMP */
2029}
2030#endif /* HAVE_RT_PUSH_IPI */
2031
2032static void pull_rt_task(struct rq *this_rq)
2033{
2034 int this_cpu = this_rq->cpu, cpu;
2035 bool resched = false;
2036 struct task_struct *p;

--- 171 unchanged lines hidden (view full) ---

2208 * If that current running task is also an RT task
2209 * then see if we can move to another run queue.
2210 */
2211 if (task_on_rq_queued(p) && rq->curr != p) {
2212#ifdef CONFIG_SMP
2213 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2214 queue_push_tasks(rq);
2215#endif /* CONFIG_SMP */
2215 if (p->prio < rq->curr->prio)
2216 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2216 resched_curr(rq);
2217 }
2218}
2219
2220/*
2221 * Priority of the task has changed. This may cause
2222 * us to initiate a push or pull.
2223 */

--- 473 unchanged lines hidden ---
2217 resched_curr(rq);
2218 }
2219}
2220
2221/*
2222 * Priority of the task has changed. This may cause
2223 * us to initiate a push or pull.
2224 */

--- 473 unchanged lines hidden ---