rt.c (aa7f67304d1a03180f463258aa6f15a8b434e77d) rt.c (57d2aa00dcec67afa52478730f2b524521af14fb)
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 552 unchanged lines hidden (view full) ---

561
562#ifdef CONFIG_SMP
563/*
564 * We ran out of runtime, see if we can borrow some from our neighbours.
565 */
566static int do_balance_runtime(struct rt_rq *rt_rq)
567{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 552 unchanged lines hidden (view full) ---

561
562#ifdef CONFIG_SMP
563/*
564 * We ran out of runtime, see if we can borrow some from our neighbours.
565 */
566static int do_balance_runtime(struct rt_rq *rt_rq)
567{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
570 int i, weight, more = 0;
571 u64 rt_period;
572
573 weight = cpumask_weight(rd->span);
574
575 raw_spin_lock(&rt_b->rt_runtime_lock);
576 rt_period = ktime_to_ns(rt_b->rt_period);
577 for_each_cpu(i, rd->span) {

--- 1306 unchanged lines hidden (view full) ---

1884{
1885 /*
1886 * If there are other RT tasks then we will reschedule
1887 * and the scheduling of the other RT tasks will handle
1888 * the balancing. But if we are the last RT task
1889 * we may need to handle the pulling of RT tasks
1890 * now.
1891 */
570 int i, weight, more = 0;
571 u64 rt_period;
572
573 weight = cpumask_weight(rd->span);
574
575 raw_spin_lock(&rt_b->rt_runtime_lock);
576 rt_period = ktime_to_ns(rt_b->rt_period);
577 for_each_cpu(i, rd->span) {

--- 1306 unchanged lines hidden (view full) ---

1884{
1885 /*
1886 * If there are other RT tasks then we will reschedule
1887 * and the scheduling of the other RT tasks will handle
1888 * the balancing. But if we are the last RT task
1889 * we may need to handle the pulling of RT tasks
1890 * now.
1891 */
1892 if (p->on_rq && !rq->rt.rt_nr_running)
1893 pull_rt_task(rq);
1892 if (!p->on_rq || rq->rt.rt_nr_running)
1893 return;
1894
1895 if (pull_rt_task(rq))
1896 resched_task(rq->curr);
1894}
1895
1896void init_sched_rt_class(void)
1897{
1898 unsigned int i;
1899
1900 for_each_possible_cpu(i) {
1901 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),

--- 78 unchanged lines hidden (view full) ---

1980
1981 /* max may change after cur was read, this will be fixed next tick */
1982 soft = task_rlimit(p, RLIMIT_RTTIME);
1983 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1984
1985 if (soft != RLIM_INFINITY) {
1986 unsigned long next;
1987
1897}
1898
1899void init_sched_rt_class(void)
1900{
1901 unsigned int i;
1902
1903 for_each_possible_cpu(i) {
1904 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),

--- 78 unchanged lines hidden (view full) ---

1983
1984 /* max may change after cur was read, this will be fixed next tick */
1985 soft = task_rlimit(p, RLIMIT_RTTIME);
1986 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1987
1988 if (soft != RLIM_INFINITY) {
1989 unsigned long next;
1990
1988 p->rt.timeout++;
1991 if (p->rt.watchdog_stamp != jiffies) {
1992 p->rt.timeout++;
1993 p->rt.watchdog_stamp = jiffies;
1994 }
1995
1989 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1990 if (p->rt.timeout > next)
1991 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1992 }
1993}
1994
1995static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1996{

--- 98 unchanged lines hidden ---
1996 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1997 if (p->rt.timeout > next)
1998 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1999 }
2000}
2001
2002static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2003{

--- 98 unchanged lines hidden ---