deadline.c (c5b2803840817115e9b568d5054e5007ae36176b) deadline.c (6c37067e27867db172b988cc11b9ff921175dee5)
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more

--- 1654 unchanged lines hidden (view full) ---

1663 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1664 push_dl_tasks(rq);
1665 }
1666}
1667
1668static void set_cpus_allowed_dl(struct task_struct *p,
1669 const struct cpumask *new_mask)
1670{
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more

--- 1654 unchanged lines hidden (view full) ---

1663 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1664 push_dl_tasks(rq);
1665 }
1666}
1667
1668static void set_cpus_allowed_dl(struct task_struct *p,
1669 const struct cpumask *new_mask)
1670{
1671 struct rq *rq;
1672 struct root_domain *src_rd;
1671 struct root_domain *src_rd;
1673 int weight;
1672 struct rq *rq;
1674
1675 BUG_ON(!dl_task(p));
1676
1677 rq = task_rq(p);
1678 src_rd = rq->rd;
1679 /*
1680 * Migrating a SCHED_DEADLINE task between exclusive
1681 * cpusets (different root_domains) entails a bandwidth

--- 9 unchanged lines hidden (view full) ---

1691 * off. In the worst case, sched_setattr() may temporary fail
1692 * until we complete the update.
1693 */
1694 raw_spin_lock(&src_dl_b->lock);
1695 __dl_clear(src_dl_b, p->dl.dl_bw);
1696 raw_spin_unlock(&src_dl_b->lock);
1697 }
1698
1673
1674 BUG_ON(!dl_task(p));
1675
1676 rq = task_rq(p);
1677 src_rd = rq->rd;
1678 /*
1679 * Migrating a SCHED_DEADLINE task between exclusive
1680 * cpusets (different root_domains) entails a bandwidth

--- 9 unchanged lines hidden (view full) ---

1690 * off. In the worst case, sched_setattr() may temporary fail
1691 * until we complete the update.
1692 */
1693 raw_spin_lock(&src_dl_b->lock);
1694 __dl_clear(src_dl_b, p->dl.dl_bw);
1695 raw_spin_unlock(&src_dl_b->lock);
1696 }
1697
1699 weight = cpumask_weight(new_mask);
1700
1701 /*
1702 * Only update if the process changes its state from whether it
1703 * can migrate or not.
1704 */
1705 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1706 goto done;
1707
1708 /*
1709 * Update only if the task is actually running (i.e.,
1710 * it is on the rq AND it is not throttled).
1711 */
1712 if (!on_dl_rq(&p->dl))
1713 goto done;
1714
1715 /*
1716 * The process used to be able to migrate OR it can now migrate
1717 */
1718 if (weight <= 1) {
1719 if (!task_current(rq, p))
1720 dequeue_pushable_dl_task(rq, p);
1721 BUG_ON(!rq->dl.dl_nr_migratory);
1722 rq->dl.dl_nr_migratory--;
1723 } else {
1724 if (!task_current(rq, p))
1725 enqueue_pushable_dl_task(rq, p);
1726 rq->dl.dl_nr_migratory++;
1727 }
1728
1729 update_dl_migration(&rq->dl);
1730
1731done:
1732 cpumask_copy(&p->cpus_allowed, new_mask);
1733 p->nr_cpus_allowed = weight;
1698 set_cpus_allowed_common(p, new_mask);
1734}
1735
1736/* Assumes rq->lock is held */
1737static void rq_online_dl(struct rq *rq)
1738{
1739 if (rq->dl.overloaded)
1740 dl_set_overload(rq);
1741

--- 143 unchanged lines hidden ---
1699}
1700
1701/* Assumes rq->lock is held */
1702static void rq_online_dl(struct rq *rq)
1703{
1704 if (rq->dl.overloaded)
1705 dl_set_overload(rq);
1706

--- 143 unchanged lines hidden ---