core.c (d50dde5a10f305253cbc3855307f608f8a3c5f73) core.c (aab03e05e8f7e26f51dee792beddcb5cca9215a5)
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and

--- 885 unchanged lines hidden (view full) ---

894 * boosted by interactivity modifiers. Changes upon fork,
895 * setprio syscalls, and whenever the interactivity
896 * estimator recalculates.
897 */
898static inline int normal_prio(struct task_struct *p)
899{
900 int prio;
901
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and

--- 885 unchanged lines hidden (view full) ---

894 * boosted by interactivity modifiers. Changes upon fork,
895 * setprio syscalls, and whenever the interactivity
896 * estimator recalculates.
897 */
898static inline int normal_prio(struct task_struct *p)
899{
900 int prio;
901
902 if (task_has_rt_policy(p))
902 if (task_has_dl_policy(p))
903 prio = MAX_DL_PRIO-1;
904 else if (task_has_rt_policy(p))
903 prio = MAX_RT_PRIO-1 - p->rt_priority;
904 else
905 prio = __normal_prio(p);
906 return prio;
907}
908
909/*
910 * Calculate the current priority, i.e. the priority

--- 801 unchanged lines hidden (view full) ---

1712 p->se.nr_migrations = 0;
1713 p->se.vruntime = 0;
1714 INIT_LIST_HEAD(&p->se.group_node);
1715
1716#ifdef CONFIG_SCHEDSTATS
1717 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1718#endif
1719
905 prio = MAX_RT_PRIO-1 - p->rt_priority;
906 else
907 prio = __normal_prio(p);
908 return prio;
909}
910
911/*
912 * Calculate the current priority, i.e. the priority

--- 801 unchanged lines hidden (view full) ---

1714 p->se.nr_migrations = 0;
1715 p->se.vruntime = 0;
1716 INIT_LIST_HEAD(&p->se.group_node);
1717
1718#ifdef CONFIG_SCHEDSTATS
1719 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1720#endif
1721
1722 RB_CLEAR_NODE(&p->dl.rb_node);
1723 hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1724 p->dl.dl_runtime = p->dl.runtime = 0;
1725 p->dl.dl_deadline = p->dl.deadline = 0;
1726 p->dl.flags = 0;
1727
1720 INIT_LIST_HEAD(&p->rt.run_list);
1721
1722#ifdef CONFIG_PREEMPT_NOTIFIERS
1723 INIT_HLIST_HEAD(&p->preempt_notifiers);
1724#endif
1725
1726#ifdef CONFIG_NUMA_BALANCING
1727 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {

--- 35 unchanged lines hidden (view full) ---

1763 numabalancing_enabled = enabled;
1764}
1765#endif /* CONFIG_SCHED_DEBUG */
1766#endif /* CONFIG_NUMA_BALANCING */
1767
1768/*
1769 * fork()/clone()-time setup:
1770 */
1728 INIT_LIST_HEAD(&p->rt.run_list);
1729
1730#ifdef CONFIG_PREEMPT_NOTIFIERS
1731 INIT_HLIST_HEAD(&p->preempt_notifiers);
1732#endif
1733
1734#ifdef CONFIG_NUMA_BALANCING
1735 if (p->mm && atomic_read(&p->mm->mm_users) == 1) {

--- 35 unchanged lines hidden (view full) ---

1771 numabalancing_enabled = enabled;
1772}
1773#endif /* CONFIG_SCHED_DEBUG */
1774#endif /* CONFIG_NUMA_BALANCING */
1775
1776/*
1777 * fork()/clone()-time setup:
1778 */
1771void sched_fork(unsigned long clone_flags, struct task_struct *p)
1779int sched_fork(unsigned long clone_flags, struct task_struct *p)
1772{
1773 unsigned long flags;
1774 int cpu = get_cpu();
1775
1776 __sched_fork(clone_flags, p);
1777 /*
1778 * We mark the process as running here. This guarantees that
1779 * nobody will actually run it, and a signal or other external

--- 5 unchanged lines hidden (view full) ---

1785 * Make sure we do not leak PI boosting priority to the child.
1786 */
1787 p->prio = current->normal_prio;
1788
1789 /*
1790 * Revert to default priority/policy on fork if requested.
1791 */
1792 if (unlikely(p->sched_reset_on_fork)) {
1780{
1781 unsigned long flags;
1782 int cpu = get_cpu();
1783
1784 __sched_fork(clone_flags, p);
1785 /*
1786 * We mark the process as running here. This guarantees that
1787 * nobody will actually run it, and a signal or other external

--- 5 unchanged lines hidden (view full) ---

1793 * Make sure we do not leak PI boosting priority to the child.
1794 */
1795 p->prio = current->normal_prio;
1796
1797 /*
1798 * Revert to default priority/policy on fork if requested.
1799 */
1800 if (unlikely(p->sched_reset_on_fork)) {
1793 if (task_has_rt_policy(p)) {
1801 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
1794 p->policy = SCHED_NORMAL;
1795 p->static_prio = NICE_TO_PRIO(0);
1796 p->rt_priority = 0;
1797 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1798 p->static_prio = NICE_TO_PRIO(0);
1799
1800 p->prio = p->normal_prio = __normal_prio(p);
1801 set_load_weight(p);
1802
1803 /*
1804 * We don't need the reset flag anymore after the fork. It has
1805 * fulfilled its duty:
1806 */
1807 p->sched_reset_on_fork = 0;
1808 }
1809
1802 p->policy = SCHED_NORMAL;
1803 p->static_prio = NICE_TO_PRIO(0);
1804 p->rt_priority = 0;
1805 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1806 p->static_prio = NICE_TO_PRIO(0);
1807
1808 p->prio = p->normal_prio = __normal_prio(p);
1809 set_load_weight(p);
1810
1811 /*
1812 * We don't need the reset flag anymore after the fork. It has
1813 * fulfilled its duty:
1814 */
1815 p->sched_reset_on_fork = 0;
1816 }
1817
1810 if (!rt_prio(p->prio))
1818 if (dl_prio(p->prio)) {
1819 put_cpu();
1820 return -EAGAIN;
1821 } else if (rt_prio(p->prio)) {
1822 p->sched_class = &rt_sched_class;
1823 } else {
1811 p->sched_class = &fair_sched_class;
1824 p->sched_class = &fair_sched_class;
1825 }
1812
1813 if (p->sched_class->task_fork)
1814 p->sched_class->task_fork(p);
1815
1816 /*
1817 * The child is not yet in the pid-hash so no cgroup attach races,
1818 * and the cgroup is pinned to this child due to cgroup_fork()
1819 * is ran before sched_fork().

--- 12 unchanged lines hidden (view full) ---

1832 p->on_cpu = 0;
1833#endif
1834 init_task_preempt_count(p);
1835#ifdef CONFIG_SMP
1836 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1837#endif
1838
1839 put_cpu();
1826
1827 if (p->sched_class->task_fork)
1828 p->sched_class->task_fork(p);
1829
1830 /*
1831 * The child is not yet in the pid-hash so no cgroup attach races,
1832 * and the cgroup is pinned to this child due to cgroup_fork()
1833 * is ran before sched_fork().

--- 12 unchanged lines hidden (view full) ---

1846 p->on_cpu = 0;
1847#endif
1848 init_task_preempt_count(p);
1849#ifdef CONFIG_SMP
1850 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1851#endif
1852
1853 put_cpu();
1854 return 0;
1840}
1841
1842/*
1843 * wake_up_new_task - wake up a newly created task for the first time.
1844 *
1845 * This function will do some initial scheduler statistics housekeeping
1846 * that must be done for every newly created context, then puts the task
1847 * on the runqueue and wakes it.

--- 915 unchanged lines hidden (view full) ---

2763 * Used by the rt_mutex code to implement priority inheritance logic.
2764 */
2765void rt_mutex_setprio(struct task_struct *p, int prio)
2766{
2767 int oldprio, on_rq, running;
2768 struct rq *rq;
2769 const struct sched_class *prev_class;
2770
1855}
1856
1857/*
1858 * wake_up_new_task - wake up a newly created task for the first time.
1859 *
1860 * This function will do some initial scheduler statistics housekeeping
1861 * that must be done for every newly created context, then puts the task
1862 * on the runqueue and wakes it.

--- 915 unchanged lines hidden (view full) ---

2778 * Used by the rt_mutex code to implement priority inheritance logic.
2779 */
2780void rt_mutex_setprio(struct task_struct *p, int prio)
2781{
2782 int oldprio, on_rq, running;
2783 struct rq *rq;
2784 const struct sched_class *prev_class;
2785
2771 BUG_ON(prio < 0 || prio > MAX_PRIO);
2786 BUG_ON(prio > MAX_PRIO);
2772
2773 rq = __task_rq_lock(p);
2774
2775 /*
2776 * Idle task boosting is a nono in general. There is one
2777 * exception, when PREEMPT_RT and NOHZ is active:
2778 *
2779 * The idle task calls get_next_timer_interrupt() and holds

--- 15 unchanged lines hidden (view full) ---

2795 prev_class = p->sched_class;
2796 on_rq = p->on_rq;
2797 running = task_current(rq, p);
2798 if (on_rq)
2799 dequeue_task(rq, p, 0);
2800 if (running)
2801 p->sched_class->put_prev_task(rq, p);
2802
2787
2788 rq = __task_rq_lock(p);
2789
2790 /*
2791 * Idle task boosting is a nono in general. There is one
2792 * exception, when PREEMPT_RT and NOHZ is active:
2793 *
2794 * The idle task calls get_next_timer_interrupt() and holds

--- 15 unchanged lines hidden (view full) ---

2810 prev_class = p->sched_class;
2811 on_rq = p->on_rq;
2812 running = task_current(rq, p);
2813 if (on_rq)
2814 dequeue_task(rq, p, 0);
2815 if (running)
2816 p->sched_class->put_prev_task(rq, p);
2817
2803 if (rt_prio(prio))
2818 if (dl_prio(prio))
2819 p->sched_class = &dl_sched_class;
2820 else if (rt_prio(prio))
2804 p->sched_class = &rt_sched_class;
2805 else
2806 p->sched_class = &fair_sched_class;
2807
2808 p->prio = prio;
2809
2810 if (running)
2811 p->sched_class->set_curr_task(rq);

--- 18 unchanged lines hidden (view full) ---

2830 * We have to be careful, if called from sys_setpriority(),
2831 * the task might be in the middle of scheduling on another CPU.
2832 */
2833 rq = task_rq_lock(p, &flags);
2834 /*
2835 * The RT priorities are set via sched_setscheduler(), but we still
2836 * allow the 'normal' nice value to be set - but as expected
2837 * it wont have any effect on scheduling until the task is
2821 p->sched_class = &rt_sched_class;
2822 else
2823 p->sched_class = &fair_sched_class;
2824
2825 p->prio = prio;
2826
2827 if (running)
2828 p->sched_class->set_curr_task(rq);

--- 18 unchanged lines hidden (view full) ---

2847 * We have to be careful, if called from sys_setpriority(),
2848 * the task might be in the middle of scheduling on another CPU.
2849 */
2850 rq = task_rq_lock(p, &flags);
2851 /*
2852 * The RT priorities are set via sched_setscheduler(), but we still
2853 * allow the 'normal' nice value to be set - but as expected
2854 * it wont have any effect on scheduling until the task is
2838 * SCHED_FIFO/SCHED_RR:
2855 * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR:
2839 */
2856 */
2840 if (task_has_rt_policy(p)) {
2857 if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
2841 p->static_prio = NICE_TO_PRIO(nice);
2842 goto out_unlock;
2843 }
2844 on_rq = p->on_rq;
2845 if (on_rq)
2846 dequeue_task(rq, p, 0);
2847
2848 p->static_prio = NICE_TO_PRIO(nice);

--- 138 unchanged lines hidden (view full) ---

2987 *
2988 * The task of @pid, if found. %NULL otherwise.
2989 */
2990static struct task_struct *find_process_by_pid(pid_t pid)
2991{
2992 return pid ? find_task_by_vpid(pid) : current;
2993}
2994
2858 p->static_prio = NICE_TO_PRIO(nice);
2859 goto out_unlock;
2860 }
2861 on_rq = p->on_rq;
2862 if (on_rq)
2863 dequeue_task(rq, p, 0);
2864
2865 p->static_prio = NICE_TO_PRIO(nice);

--- 138 unchanged lines hidden (view full) ---

3004 *
3005 * The task of @pid, if found. %NULL otherwise.
3006 */
3007static struct task_struct *find_process_by_pid(pid_t pid)
3008{
3009 return pid ? find_task_by_vpid(pid) : current;
3010}
3011
3012/*
3013 * This function initializes the sched_dl_entity of a newly becoming
3014 * SCHED_DEADLINE task.
3015 *
3016 * Only the static values are considered here, the actual runtime and the
3017 * absolute deadline will be properly calculated when the task is enqueued
3018 * for the first time with its new policy.
3019 */
3020static void
3021__setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3022{
3023 struct sched_dl_entity *dl_se = &p->dl;
3024
3025 init_dl_task_timer(dl_se);
3026 dl_se->dl_runtime = attr->sched_runtime;
3027 dl_se->dl_deadline = attr->sched_deadline;
3028 dl_se->flags = attr->sched_flags;
3029 dl_se->dl_throttled = 0;
3030 dl_se->dl_new = 1;
3031}
3032
2995/* Actually do priority change: must hold pi & rq lock. */
2996static void __setscheduler(struct rq *rq, struct task_struct *p,
2997 const struct sched_attr *attr)
2998{
2999 int policy = attr->sched_policy;
3000
3001 p->policy = policy;
3002
3033/* Actually do priority change: must hold pi & rq lock. */
3034static void __setscheduler(struct rq *rq, struct task_struct *p,
3035 const struct sched_attr *attr)
3036{
3037 int policy = attr->sched_policy;
3038
3039 p->policy = policy;
3040
3003 if (rt_policy(policy))
3041 if (dl_policy(policy))
3042 __setparam_dl(p, attr);
3043 else if (rt_policy(policy))
3004 p->rt_priority = attr->sched_priority;
3005 else
3006 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3007
3008 p->normal_prio = normal_prio(p);
3009 p->prio = rt_mutex_getprio(p);
3010
3044 p->rt_priority = attr->sched_priority;
3045 else
3046 p->static_prio = NICE_TO_PRIO(attr->sched_nice);
3047
3048 p->normal_prio = normal_prio(p);
3049 p->prio = rt_mutex_getprio(p);
3050
3011 if (rt_prio(p->prio))
3051 if (dl_prio(p->prio))
3052 p->sched_class = &dl_sched_class;
3053 else if (rt_prio(p->prio))
3012 p->sched_class = &rt_sched_class;
3013 else
3014 p->sched_class = &fair_sched_class;
3015
3016 set_load_weight(p);
3017}
3054 p->sched_class = &rt_sched_class;
3055 else
3056 p->sched_class = &fair_sched_class;
3057
3058 set_load_weight(p);
3059}
3060
3061static void
3062__getparam_dl(struct task_struct *p, struct sched_attr *attr)
3063{
3064 struct sched_dl_entity *dl_se = &p->dl;
3065
3066 attr->sched_priority = p->rt_priority;
3067 attr->sched_runtime = dl_se->dl_runtime;
3068 attr->sched_deadline = dl_se->dl_deadline;
3069 attr->sched_flags = dl_se->flags;
3070}
3071
3018/*
3072/*
3073 * This function validates the new parameters of a -deadline task.
3074 * We ask for the deadline not being zero, and greater or equal
3075 * than the runtime.
3076 */
3077static bool
3078__checkparam_dl(const struct sched_attr *attr)
3079{
3080 return attr && attr->sched_deadline != 0 &&
3081 (s64)(attr->sched_deadline - attr->sched_runtime) >= 0;
3082}
3083
3084/*
3019 * check the target process has a UID that matches the current process's
3020 */
3021static bool check_same_owner(struct task_struct *p)
3022{
3023 const struct cred *cred = current_cred(), *pcred;
3024 bool match;
3025
3026 rcu_read_lock();

--- 21 unchanged lines hidden (view full) ---

3048 /* double check policy once rq lock held */
3049 if (policy < 0) {
3050 reset_on_fork = p->sched_reset_on_fork;
3051 policy = oldpolicy = p->policy;
3052 } else {
3053 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3054 policy &= ~SCHED_RESET_ON_FORK;
3055
3085 * check the target process has a UID that matches the current process's
3086 */
3087static bool check_same_owner(struct task_struct *p)
3088{
3089 const struct cred *cred = current_cred(), *pcred;
3090 bool match;
3091
3092 rcu_read_lock();

--- 21 unchanged lines hidden (view full) ---

3114 /* double check policy once rq lock held */
3115 if (policy < 0) {
3116 reset_on_fork = p->sched_reset_on_fork;
3117 policy = oldpolicy = p->policy;
3118 } else {
3119 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3120 policy &= ~SCHED_RESET_ON_FORK;
3121
3056 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3122 if (policy != SCHED_DEADLINE &&
3123 policy != SCHED_FIFO && policy != SCHED_RR &&
3057 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3058 policy != SCHED_IDLE)
3059 return -EINVAL;
3060 }
3061
3062 /*
3063 * Valid priorities for SCHED_FIFO and SCHED_RR are
3064 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3065 * SCHED_BATCH and SCHED_IDLE is 0.
3066 */
3067 if (attr->sched_priority < 0 ||
3068 (p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3069 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3070 return -EINVAL;
3124 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3125 policy != SCHED_IDLE)
3126 return -EINVAL;
3127 }
3128
3129 /*
3130 * Valid priorities for SCHED_FIFO and SCHED_RR are
3131 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3132 * SCHED_BATCH and SCHED_IDLE is 0.
3133 */
3134 if (attr->sched_priority < 0 ||
3135 (p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) ||
3136 (!p->mm && attr->sched_priority > MAX_RT_PRIO-1))
3137 return -EINVAL;
3071 if (rt_policy(policy) != (attr->sched_priority != 0))
3138 if ((dl_policy(policy) && !__checkparam_dl(attr)) ||
3139 (rt_policy(policy) != (attr->sched_priority != 0)))
3072 return -EINVAL;
3073
3074 /*
3075 * Allow unprivileged RT tasks to decrease priority:
3076 */
3077 if (user && !capable(CAP_SYS_NICE)) {
3078 if (fair_policy(policy)) {
3079 if (!can_nice(p, attr->sched_nice))

--- 58 unchanged lines hidden (view full) ---

3138 /*
3139 * If not changing anything there's no need to proceed further:
3140 */
3141 if (unlikely(policy == p->policy)) {
3142 if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p))
3143 goto change;
3144 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3145 goto change;
3140 return -EINVAL;
3141
3142 /*
3143 * Allow unprivileged RT tasks to decrease priority:
3144 */
3145 if (user && !capable(CAP_SYS_NICE)) {
3146 if (fair_policy(policy)) {
3147 if (!can_nice(p, attr->sched_nice))

--- 58 unchanged lines hidden (view full) ---

3206 /*
3207 * If not changing anything there's no need to proceed further:
3208 */
3209 if (unlikely(policy == p->policy)) {
3210 if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p))
3211 goto change;
3212 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3213 goto change;
3214 if (dl_policy(policy))
3215 goto change;
3146
3147 task_rq_unlock(rq, p, &flags);
3148 return 0;
3149 }
3150change:
3151
3152#ifdef CONFIG_RT_GROUP_SCHED
3153 if (user) {

--- 294 unchanged lines hidden (view full) ---

3448 retval = -ESRCH;
3449 if (!p)
3450 goto out_unlock;
3451
3452 retval = security_task_getscheduler(p);
3453 if (retval)
3454 goto out_unlock;
3455
3216
3217 task_rq_unlock(rq, p, &flags);
3218 return 0;
3219 }
3220change:
3221
3222#ifdef CONFIG_RT_GROUP_SCHED
3223 if (user) {

--- 294 unchanged lines hidden (view full) ---

3518 retval = -ESRCH;
3519 if (!p)
3520 goto out_unlock;
3521
3522 retval = security_task_getscheduler(p);
3523 if (retval)
3524 goto out_unlock;
3525
3526 if (task_has_dl_policy(p)) {
3527 retval = -EINVAL;
3528 goto out_unlock;
3529 }
3456 lp.sched_priority = p->rt_priority;
3457 rcu_read_unlock();
3458
3459 /*
3460 * This one might sleep, we cannot do it with a spinlock held ...
3461 */
3462 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3463

--- 41 unchanged lines hidden (view full) ---

3505 return ret;
3506
3507err_size:
3508 ret = -E2BIG;
3509 goto out;
3510}
3511
3512/**
3530 lp.sched_priority = p->rt_priority;
3531 rcu_read_unlock();
3532
3533 /*
3534 * This one might sleep, we cannot do it with a spinlock held ...
3535 */
3536 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3537

--- 41 unchanged lines hidden (view full) ---

3579 return ret;
3580
3581err_size:
3582 ret = -E2BIG;
3583 goto out;
3584}
3585
3586/**
3513 * sys_sched_getattr - same as above, but with extended "sched_param"
3587 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
3514 * @pid: the pid in question.
3515 * @attr: structure containing the extended parameters.
3516 * @size: sizeof(attr) for fwd/bwd comp.
3517 */
3518SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3519 unsigned int, size)
3520{
3521 struct sched_attr attr = {

--- 12 unchanged lines hidden (view full) ---

3534 if (!p)
3535 goto out_unlock;
3536
3537 retval = security_task_getscheduler(p);
3538 if (retval)
3539 goto out_unlock;
3540
3541 attr.sched_policy = p->policy;
3588 * @pid: the pid in question.
3589 * @attr: structure containing the extended parameters.
3590 * @size: sizeof(attr) for fwd/bwd comp.
3591 */
3592SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
3593 unsigned int, size)
3594{
3595 struct sched_attr attr = {

--- 12 unchanged lines hidden (view full) ---

3608 if (!p)
3609 goto out_unlock;
3610
3611 retval = security_task_getscheduler(p);
3612 if (retval)
3613 goto out_unlock;
3614
3615 attr.sched_policy = p->policy;
3542 if (task_has_rt_policy(p))
3616 if (task_has_dl_policy(p))
3617 __getparam_dl(p, &attr);
3618 else if (task_has_rt_policy(p))
3543 attr.sched_priority = p->rt_priority;
3544 else
3545 attr.sched_nice = TASK_NICE(p);
3546
3547 rcu_read_unlock();
3548
3549 retval = sched_read_attr(uattr, &attr, size);
3550 return retval;

--- 409 unchanged lines hidden (view full) ---

3960{
3961 int ret = -EINVAL;
3962
3963 switch (policy) {
3964 case SCHED_FIFO:
3965 case SCHED_RR:
3966 ret = MAX_USER_RT_PRIO-1;
3967 break;
3619 attr.sched_priority = p->rt_priority;
3620 else
3621 attr.sched_nice = TASK_NICE(p);
3622
3623 rcu_read_unlock();
3624
3625 retval = sched_read_attr(uattr, &attr, size);
3626 return retval;

--- 409 unchanged lines hidden (view full) ---

4036{
4037 int ret = -EINVAL;
4038
4039 switch (policy) {
4040 case SCHED_FIFO:
4041 case SCHED_RR:
4042 ret = MAX_USER_RT_PRIO-1;
4043 break;
4044 case SCHED_DEADLINE:
3968 case SCHED_NORMAL:
3969 case SCHED_BATCH:
3970 case SCHED_IDLE:
3971 ret = 0;
3972 break;
3973 }
3974 return ret;
3975}

--- 10 unchanged lines hidden (view full) ---

3986{
3987 int ret = -EINVAL;
3988
3989 switch (policy) {
3990 case SCHED_FIFO:
3991 case SCHED_RR:
3992 ret = 1;
3993 break;
4045 case SCHED_NORMAL:
4046 case SCHED_BATCH:
4047 case SCHED_IDLE:
4048 ret = 0;
4049 break;
4050 }
4051 return ret;
4052}

--- 10 unchanged lines hidden (view full) ---

4063{
4064 int ret = -EINVAL;
4065
4066 switch (policy) {
4067 case SCHED_FIFO:
4068 case SCHED_RR:
4069 ret = 1;
4070 break;
4071 case SCHED_DEADLINE:
3994 case SCHED_NORMAL:
3995 case SCHED_BATCH:
3996 case SCHED_IDLE:
3997 ret = 0;
3998 }
3999 return ret;
4000}
4001

--- 2465 unchanged lines hidden (view full) ---

6467
6468 rq = cpu_rq(i);
6469 raw_spin_lock_init(&rq->lock);
6470 rq->nr_running = 0;
6471 rq->calc_load_active = 0;
6472 rq->calc_load_update = jiffies + LOAD_FREQ;
6473 init_cfs_rq(&rq->cfs);
6474 init_rt_rq(&rq->rt, rq);
4072 case SCHED_NORMAL:
4073 case SCHED_BATCH:
4074 case SCHED_IDLE:
4075 ret = 0;
4076 }
4077 return ret;
4078}
4079

--- 2465 unchanged lines hidden (view full) ---

6545
6546 rq = cpu_rq(i);
6547 raw_spin_lock_init(&rq->lock);
6548 rq->nr_running = 0;
6549 rq->calc_load_active = 0;
6550 rq->calc_load_update = jiffies + LOAD_FREQ;
6551 init_cfs_rq(&rq->cfs);
6552 init_rt_rq(&rq->rt, rq);
6553 init_dl_rq(&rq->dl, rq);
6475#ifdef CONFIG_FAIR_GROUP_SCHED
6476 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6477 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6478 /*
6479 * How much cpu bandwidth does root_task_group get?
6480 *
6481 * In case of task-groups formed thr' the cgroup filesystem, it
6482 * gets 100% of the cpu resources in the system. This overall

--- 171 unchanged lines hidden (view full) ---

6654
6655 p->se.exec_start = 0;
6656#ifdef CONFIG_SCHEDSTATS
6657 p->se.statistics.wait_start = 0;
6658 p->se.statistics.sleep_start = 0;
6659 p->se.statistics.block_start = 0;
6660#endif
6661
6554#ifdef CONFIG_FAIR_GROUP_SCHED
6555 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6556 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6557 /*
6558 * How much cpu bandwidth does root_task_group get?
6559 *
6560 * In case of task-groups formed thr' the cgroup filesystem, it
6561 * gets 100% of the cpu resources in the system. This overall

--- 171 unchanged lines hidden (view full) ---

6733
6734 p->se.exec_start = 0;
6735#ifdef CONFIG_SCHEDSTATS
6736 p->se.statistics.wait_start = 0;
6737 p->se.statistics.sleep_start = 0;
6738 p->se.statistics.block_start = 0;
6739#endif
6740
6662 if (!rt_task(p)) {
6741 if (!dl_task(p) && !rt_task(p)) {
6663 /*
6664 * Renice negative nice level userspace
6665 * tasks back to 0:
6666 */
6667 if (TASK_NICE(p) < 0 && p->mm)
6668 set_user_nice(p, 0);
6669 continue;
6670 }

--- 917 unchanged lines hidden ---
6742 /*
6743 * Renice negative nice level userspace
6744 * tasks back to 0:
6745 */
6746 if (TASK_NICE(p) < 0 && p->mm)
6747 set_user_nice(p, 0);
6748 continue;
6749 }

--- 917 unchanged lines hidden ---