rt.c (8046d6806247088de5725eaf8a2580b29e50ac5a) rt.c (fd7a4bed183523275279c9addbf42fce550c2e90)
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 340 unchanged lines hidden (view full) ---

349 update_rt_migration(rt_rq);
350}
351
352static inline int has_pushable_tasks(struct rq *rq)
353{
354 return !plist_head_empty(&rq->rt.pushable_tasks);
355}
356
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 340 unchanged lines hidden (view full) ---

349 update_rt_migration(rt_rq);
350}
351
352static inline int has_pushable_tasks(struct rq *rq)
353{
354 return !plist_head_empty(&rq->rt.pushable_tasks);
355}
356
357static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
357static DEFINE_PER_CPU(struct callback_head, rt_push_head);
358static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
358
359static void push_rt_tasks(struct rq *);
359
360static void push_rt_tasks(struct rq *);
361static void pull_rt_task(struct rq *);
360
361static inline void queue_push_tasks(struct rq *rq)
362{
363 if (!has_pushable_tasks(rq))
364 return;
365
362
363static inline void queue_push_tasks(struct rq *rq)
364{
365 if (!has_pushable_tasks(rq))
366 return;
367
366 queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
368 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
367}
368
369}
370
371static inline void queue_pull_task(struct rq *rq)
372{
373 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
374}
375
369static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
370{
371 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
372 plist_node_init(&p->pushable_tasks, p->prio);
373 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
374
375 /* Update the highest prio pushable task */
376 if (p->prio < rq->rt.highest_prio.next)

--- 1757 unchanged lines hidden (view full) ---

2134 * and the scheduling of the other RT tasks will handle
2135 * the balancing. But if we are the last RT task
2136 * we may need to handle the pulling of RT tasks
2137 * now.
2138 */
2139 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2140 return;
2141
376static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377{
378 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 plist_node_init(&p->pushable_tasks, p->prio);
380 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
381
382 /* Update the highest prio pushable task */
383 if (p->prio < rq->rt.highest_prio.next)

--- 1757 unchanged lines hidden (view full) ---

2141 * and the scheduling of the other RT tasks will handle
2142 * the balancing. But if we are the last RT task
2143 * we may need to handle the pulling of RT tasks
2144 * now.
2145 */
2146 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2147 return;
2148
2142 pull_rt_task(rq);
2149 queue_pull_task(rq);
2143}
2144
2145void __init init_sched_rt_class(void)
2146{
2147 unsigned int i;
2148
2149 for_each_possible_cpu(i) {
2150 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),

--- 4 unchanged lines hidden (view full) ---

2155
2156/*
2157 * When switching a task to RT, we may overload the runqueue
2158 * with RT tasks. In this case we try to push them off to
2159 * other runqueues.
2160 */
2161static void switched_to_rt(struct rq *rq, struct task_struct *p)
2162{
2150}
2151
2152void __init init_sched_rt_class(void)
2153{
2154 unsigned int i;
2155
2156 for_each_possible_cpu(i) {
2157 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),

--- 4 unchanged lines hidden (view full) ---

2162
2163/*
2164 * When switching a task to RT, we may overload the runqueue
2165 * with RT tasks. In this case we try to push them off to
2166 * other runqueues.
2167 */
2168static void switched_to_rt(struct rq *rq, struct task_struct *p)
2169{
2163 int check_resched = 1;
2164
2165 /*
2166 * If we are already running, then there's nothing
2167 * that needs to be done. But if we are not running
2168 * we may need to preempt the current running task.
2169 * If that current running task is also an RT task
2170 * then see if we can move to another run queue.
2171 */
2172 if (task_on_rq_queued(p) && rq->curr != p) {
2173#ifdef CONFIG_SMP
2170 /*
2171 * If we are already running, then there's nothing
2172 * that needs to be done. But if we are not running
2173 * we may need to preempt the current running task.
2174 * If that current running task is also an RT task
2175 * then see if we can move to another run queue.
2176 */
2177 if (task_on_rq_queued(p) && rq->curr != p) {
2178#ifdef CONFIG_SMP
2174 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded &&
2175 /* Don't resched if we changed runqueues */
2176 push_rt_task(rq) && rq != task_rq(p))
2177 check_resched = 0;
2178#endif /* CONFIG_SMP */
2179 if (check_resched && p->prio < rq->curr->prio)
2179 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2180 queue_push_tasks(rq);
2181#else
2182 if (p->prio < rq->curr->prio)
2180 resched_curr(rq);
2183 resched_curr(rq);
2184#endif /* CONFIG_SMP */
2181 }
2182}
2183
2184/*
2185 * Priority of the task has changed. This may cause
2186 * us to initiate a push or pull.
2187 */
2188static void

--- 4 unchanged lines hidden (view full) ---

2193
2194 if (rq->curr == p) {
2195#ifdef CONFIG_SMP
2196 /*
2197 * If our priority decreases while running, we
2198 * may need to pull tasks to this runqueue.
2199 */
2200 if (oldprio < p->prio)
2185 }
2186}
2187
2188/*
2189 * Priority of the task has changed. This may cause
2190 * us to initiate a push or pull.
2191 */
2192static void

--- 4 unchanged lines hidden (view full) ---

2197
2198 if (rq->curr == p) {
2199#ifdef CONFIG_SMP
2200 /*
2201 * If our priority decreases while running, we
2202 * may need to pull tasks to this runqueue.
2203 */
2204 if (oldprio < p->prio)
2201 pull_rt_task(rq);
2205 queue_pull_task(rq);
2206
2202 /*
2203 * If there's a higher priority task waiting to run
2207 /*
2208 * If there's a higher priority task waiting to run
2204 * then reschedule. Note, the above pull_rt_task
2205 * can release the rq lock and p could migrate.
2206 * Only reschedule if p is still on the same runqueue.
2209 * then reschedule.
2207 */
2210 */
2208 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
2211 if (p->prio > rq->rt.highest_prio.curr)
2209 resched_curr(rq);
2210#else
2211 /* For UP simply resched on drop of prio */
2212 if (oldprio < p->prio)
2213 resched_curr(rq);
2214#endif /* CONFIG_SMP */
2215 } else {
2216 /*

--- 131 unchanged lines hidden ---
2212 resched_curr(rq);
2213#else
2214 /* For UP simply resched on drop of prio */
2215 if (oldprio < p->prio)
2216 resched_curr(rq);
2217#endif /* CONFIG_SMP */
2218 } else {
2219 /*

--- 131 unchanged lines hidden ---