rt.c (e644dae645e167d154c0526358940986682a72b0) rt.c (29baa7478ba47d746e3625c91d3b2afbf46b4312)
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 260 unchanged lines hidden (view full) ---

269 } else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
272 }
273}
274
275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276{
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 260 unchanged lines hidden (view full) ---

269 } else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
272 }
273}
274
275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276{
277 struct task_struct *p;
278
277 if (!rt_entity_is_task(rt_se))
278 return;
279
279 if (!rt_entity_is_task(rt_se))
280 return;
281
282 p = rt_task_of(rt_se);
280 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
281
282 rt_rq->rt_nr_total++;
283 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
284
285 rt_rq->rt_nr_total++;
283 if (rt_se->nr_cpus_allowed > 1)
286 if (p->nr_cpus_allowed > 1)
284 rt_rq->rt_nr_migratory++;
285
286 update_rt_migration(rt_rq);
287}
288
289static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
290{
287 rt_rq->rt_nr_migratory++;
288
289 update_rt_migration(rt_rq);
290}
291
292static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
293{
294 struct task_struct *p;
295
291 if (!rt_entity_is_task(rt_se))
292 return;
293
296 if (!rt_entity_is_task(rt_se))
297 return;
298
299 p = rt_task_of(rt_se);
294 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
295
296 rt_rq->rt_nr_total--;
300 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
301
302 rt_rq->rt_nr_total--;
297 if (rt_se->nr_cpus_allowed > 1)
303 if (p->nr_cpus_allowed > 1)
298 rt_rq->rt_nr_migratory--;
299
300 update_rt_migration(rt_rq);
301}
302
303static inline int has_pushable_tasks(struct rq *rq)
304{
305 return !plist_head_empty(&rq->rt.pushable_tasks);

--- 850 unchanged lines hidden (view full) ---

1156{
1157 struct sched_rt_entity *rt_se = &p->rt;
1158
1159 if (flags & ENQUEUE_WAKEUP)
1160 rt_se->timeout = 0;
1161
1162 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1163
304 rt_rq->rt_nr_migratory--;
305
306 update_rt_migration(rt_rq);
307}
308
309static inline int has_pushable_tasks(struct rq *rq)
310{
311 return !plist_head_empty(&rq->rt.pushable_tasks);

--- 850 unchanged lines hidden (view full) ---

1162{
1163 struct sched_rt_entity *rt_se = &p->rt;
1164
1165 if (flags & ENQUEUE_WAKEUP)
1166 rt_se->timeout = 0;
1167
1168 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1169
1164 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
1170 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1165 enqueue_pushable_task(rq, p);
1166
1167 inc_nr_running(rq);
1168}
1169
1170static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1171{
1172 struct sched_rt_entity *rt_se = &p->rt;

--- 47 unchanged lines hidden (view full) ---

1220select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1221{
1222 struct task_struct *curr;
1223 struct rq *rq;
1224 int cpu;
1225
1226 cpu = task_cpu(p);
1227
1171 enqueue_pushable_task(rq, p);
1172
1173 inc_nr_running(rq);
1174}
1175
1176static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1177{
1178 struct sched_rt_entity *rt_se = &p->rt;

--- 47 unchanged lines hidden (view full) ---

1226select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1227{
1228 struct task_struct *curr;
1229 struct rq *rq;
1230 int cpu;
1231
1232 cpu = task_cpu(p);
1233
1228 if (p->rt.nr_cpus_allowed == 1)
1234 if (p->nr_cpus_allowed == 1)
1229 goto out;
1230
1231 /* For anything but wake ups, just return the task_cpu */
1232 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1233 goto out;
1234
1235 rq = cpu_rq(cpu);
1236

--- 18 unchanged lines hidden (view full) ---

1255 *
1256 * Otherwise, just let it ride on the affined RQ and the
1257 * post-schedule router will push the preempted task away
1258 *
1259 * This test is optimistic, if we get it wrong the load-balancer
1260 * will have to sort it out.
1261 */
1262 if (curr && unlikely(rt_task(curr)) &&
1235 goto out;
1236
1237 /* For anything but wake ups, just return the task_cpu */
1238 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1239 goto out;
1240
1241 rq = cpu_rq(cpu);
1242

--- 18 unchanged lines hidden (view full) ---

1261 *
1262 * Otherwise, just let it ride on the affined RQ and the
1263 * post-schedule router will push the preempted task away
1264 *
1265 * This test is optimistic, if we get it wrong the load-balancer
1266 * will have to sort it out.
1267 */
1268 if (curr && unlikely(rt_task(curr)) &&
1263 (curr->rt.nr_cpus_allowed < 2 ||
1269 (curr->nr_cpus_allowed < 2 ||
1264 curr->prio <= p->prio) &&
1270 curr->prio <= p->prio) &&
1265 (p->rt.nr_cpus_allowed > 1)) {
1271 (p->nr_cpus_allowed > 1)) {
1266 int target = find_lowest_rq(p);
1267
1268 if (target != -1)
1269 cpu = target;
1270 }
1271 rcu_read_unlock();
1272
1273out:
1274 return cpu;
1275}
1276
1277static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1278{
1272 int target = find_lowest_rq(p);
1273
1274 if (target != -1)
1275 cpu = target;
1276 }
1277 rcu_read_unlock();
1278
1279out:
1280 return cpu;
1281}
1282
1283static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1284{
1279 if (rq->curr->rt.nr_cpus_allowed == 1)
1285 if (rq->curr->nr_cpus_allowed == 1)
1280 return;
1281
1286 return;
1287
1282 if (p->rt.nr_cpus_allowed != 1
1288 if (p->nr_cpus_allowed != 1
1283 && cpupri_find(&rq->rd->cpupri, p, NULL))
1284 return;
1285
1286 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1287 return;
1288
1289 /*
1290 * There appears to be other cpus that can accept

--- 99 unchanged lines hidden (view full) ---

1390static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1391{
1392 update_curr_rt(rq);
1393
1394 /*
1395 * The previous task needs to be made eligible for pushing
1396 * if it is still active
1397 */
1289 && cpupri_find(&rq->rd->cpupri, p, NULL))
1290 return;
1291
1292 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1293 return;
1294
1295 /*
1296 * There appears to be other cpus that can accept

--- 99 unchanged lines hidden (view full) ---

1396static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1397{
1398 update_curr_rt(rq);
1399
1400 /*
1401 * The previous task needs to be made eligible for pushing
1402 * if it is still active
1403 */
1398 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1404 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1399 enqueue_pushable_task(rq, p);
1400}
1401
1402#ifdef CONFIG_SMP
1403
1404/* Only try algorithms three times */
1405#define RT_MAX_TRIES 3
1406
1407static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1408{
1409 if (!task_running(rq, p) &&
1410 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1405 enqueue_pushable_task(rq, p);
1406}
1407
1408#ifdef CONFIG_SMP
1409
1410/* Only try algorithms three times */
1411#define RT_MAX_TRIES 3
1412
1413static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1414{
1415 if (!task_running(rq, p) &&
1416 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1411 (p->rt.nr_cpus_allowed > 1))
1417 (p->nr_cpus_allowed > 1))
1412 return 1;
1413 return 0;
1414}
1415
1416/* Return the second highest RT task, NULL otherwise */
1417static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1418{
1419 struct task_struct *next = NULL;

--- 39 unchanged lines hidden (view full) ---

1459 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1460 int this_cpu = smp_processor_id();
1461 int cpu = task_cpu(task);
1462
1463 /* Make sure the mask is initialized first */
1464 if (unlikely(!lowest_mask))
1465 return -1;
1466
1418 return 1;
1419 return 0;
1420}
1421
1422/* Return the second highest RT task, NULL otherwise */
1423static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1424{
1425 struct task_struct *next = NULL;

--- 39 unchanged lines hidden (view full) ---

1465 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1466 int this_cpu = smp_processor_id();
1467 int cpu = task_cpu(task);
1468
1469 /* Make sure the mask is initialized first */
1470 if (unlikely(!lowest_mask))
1471 return -1;
1472
1467 if (task->rt.nr_cpus_allowed == 1)
1473 if (task->nr_cpus_allowed == 1)
1468 return -1; /* No other targets possible */
1469
1470 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1471 return -1; /* No targets found */
1472
1473 /*
1474 * At this point we have built a mask of cpus representing the
1475 * lowest priority tasks in the system. Now we want to elect

--- 105 unchanged lines hidden (view full) ---

1581 if (!has_pushable_tasks(rq))
1582 return NULL;
1583
1584 p = plist_first_entry(&rq->rt.pushable_tasks,
1585 struct task_struct, pushable_tasks);
1586
1587 BUG_ON(rq->cpu != task_cpu(p));
1588 BUG_ON(task_current(rq, p));
1474 return -1; /* No other targets possible */
1475
1476 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1477 return -1; /* No targets found */
1478
1479 /*
1480 * At this point we have built a mask of cpus representing the
1481 * lowest priority tasks in the system. Now we want to elect

--- 105 unchanged lines hidden (view full) ---

1587 if (!has_pushable_tasks(rq))
1588 return NULL;
1589
1590 p = plist_first_entry(&rq->rt.pushable_tasks,
1591 struct task_struct, pushable_tasks);
1592
1593 BUG_ON(rq->cpu != task_cpu(p));
1594 BUG_ON(task_current(rq, p));
1589 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1595 BUG_ON(p->nr_cpus_allowed <= 1);
1590
1591 BUG_ON(!p->on_rq);
1592 BUG_ON(!rt_task(p));
1593
1594 return p;
1595}
1596
1597/*

--- 190 unchanged lines hidden (view full) ---

1788 * If we are not running and we are not going to reschedule soon, we should
1789 * try to push tasks away now
1790 */
1791static void task_woken_rt(struct rq *rq, struct task_struct *p)
1792{
1793 if (!task_running(rq, p) &&
1794 !test_tsk_need_resched(rq->curr) &&
1795 has_pushable_tasks(rq) &&
1596
1597 BUG_ON(!p->on_rq);
1598 BUG_ON(!rt_task(p));
1599
1600 return p;
1601}
1602
1603/*

--- 190 unchanged lines hidden (view full) ---

1794 * If we are not running and we are not going to reschedule soon, we should
1795 * try to push tasks away now
1796 */
1797static void task_woken_rt(struct rq *rq, struct task_struct *p)
1798{
1799 if (!task_running(rq, p) &&
1800 !test_tsk_need_resched(rq->curr) &&
1801 has_pushable_tasks(rq) &&
1796 p->rt.nr_cpus_allowed > 1 &&
1802 p->nr_cpus_allowed > 1 &&
1797 rt_task(rq->curr) &&
1803 rt_task(rq->curr) &&
1798 (rq->curr->rt.nr_cpus_allowed < 2 ||
1804 (rq->curr->nr_cpus_allowed < 2 ||
1799 rq->curr->prio <= p->prio))
1800 push_rt_tasks(rq);
1801}
1802
1803static void set_cpus_allowed_rt(struct task_struct *p,
1804 const struct cpumask *new_mask)
1805{
1805 rq->curr->prio <= p->prio))
1806 push_rt_tasks(rq);
1807}
1808
1809static void set_cpus_allowed_rt(struct task_struct *p,
1810 const struct cpumask *new_mask)
1811{
1806 int weight = cpumask_weight(new_mask);
1812 struct rq *rq;
1813 int weight;
1807
1808 BUG_ON(!rt_task(p));
1809
1814
1815 BUG_ON(!rt_task(p));
1816
1817 if (!p->on_rq)
1818 return;
1819
1820 weight = cpumask_weight(new_mask);
1821
1810 /*
1822 /*
1811 * Update the migration status of the RQ if we have an RT task
1812 * which is running AND changing its weight value.
1823 * Only update if the process changes its state from whether it
1824 * can migrate or not.
1813 */
1825 */
1814 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1815 struct rq *rq = task_rq(p);
1826 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1827 return;
1816
1828
1817 if (!task_current(rq, p)) {
1818 /*
1819 * Make sure we dequeue this task from the pushable list
1820 * before going further. It will either remain off of
1821 * the list because we are no longer pushable, or it
1822 * will be requeued.
1823 */
1824 if (p->rt.nr_cpus_allowed > 1)
1825 dequeue_pushable_task(rq, p);
1829 rq = task_rq(p);
1826
1830
1827 /*
1828 * Requeue if our weight is changing and still > 1
1829 */
1830 if (weight > 1)
1831 enqueue_pushable_task(rq, p);
1832
1833 }
1834
1835 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1836 rq->rt.rt_nr_migratory++;
1837 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1838 BUG_ON(!rq->rt.rt_nr_migratory);
1839 rq->rt.rt_nr_migratory--;
1840 }
1841
1842 update_rt_migration(&rq->rt);
1831 /*
1832 * The process used to be able to migrate OR it can now migrate
1833 */
1834 if (weight <= 1) {
1835 if (!task_current(rq, p))
1836 dequeue_pushable_task(rq, p);
1837 BUG_ON(!rq->rt.rt_nr_migratory);
1838 rq->rt.rt_nr_migratory--;
1839 } else {
1840 if (!task_current(rq, p))
1841 enqueue_pushable_task(rq, p);
1842 rq->rt.rt_nr_migratory++;
1843 }
1843 }
1844
1845 update_rt_migration(&rq->rt);
1844}
1845
1846/* Assumes rq->lock is held */
1847static void rq_online_rt(struct rq *rq)
1848{
1849 if (rq->rt.overloaded)
1850 rt_set_overload(rq);
1851

--- 227 unchanged lines hidden ---
1846}
1847
1848/* Assumes rq->lock is held */
1849static void rq_online_rt(struct rq *rq)
1850{
1851 if (rq->rt.overloaded)
1852 rt_set_overload(rq);
1853

--- 227 unchanged lines hidden ---