rt.c (00170fdd0846df7cdb5ad421d3a340440f930b8f) rt.c (0b07939cbfdd05bed0c5ec01b8b25493e6ecd34c)
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 876 unchanged lines hidden (view full) ---

885 if (rt_rq->rt_time > runtime) {
886 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
887
888 /*
889 * Don't actually throttle groups that have no runtime assigned
890 * but accrue some time due to boosting.
891 */
892 if (likely(rt_b->rt_runtime)) {
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>

--- 876 unchanged lines hidden (view full) ---

885 if (rt_rq->rt_time > runtime) {
886 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
887
888 /*
889 * Don't actually throttle groups that have no runtime assigned
890 * but accrue some time due to boosting.
891 */
892 if (likely(rt_b->rt_runtime)) {
893 static bool once = false;
894
893 rt_rq->rt_throttled = 1;
895 rt_rq->rt_throttled = 1;
894 printk_deferred_once("sched: RT throttling activated\n");
896
897 if (!once) {
898 once = true;
899 printk_sched("sched: RT throttling activated\n");
900 }
895 } else {
896 /*
897 * In case we did anyway, make it go away,
898 * replenishment is a joke, since it will replenish us
899 * with exactly 0 ns.
900 */
901 rt_rq->rt_time = 0;
902 }

--- 10 unchanged lines hidden (view full) ---

913/*
914 * Update the current task's runtime statistics. Skip current tasks that
915 * are not in our scheduling class.
916 */
917static void update_curr_rt(struct rq *rq)
918{
919 struct task_struct *curr = rq->curr;
920 struct sched_rt_entity *rt_se = &curr->rt;
901 } else {
902 /*
903 * In case we did anyway, make it go away,
904 * replenishment is a joke, since it will replenish us
905 * with exactly 0 ns.
906 */
907 rt_rq->rt_time = 0;
908 }

--- 10 unchanged lines hidden (view full) ---

919/*
920 * Update the current task's runtime statistics. Skip current tasks that
921 * are not in our scheduling class.
922 */
923static void update_curr_rt(struct rq *rq)
924{
925 struct task_struct *curr = rq->curr;
926 struct sched_rt_entity *rt_se = &curr->rt;
921 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
922 u64 delta_exec;
923
924 if (curr->sched_class != &rt_sched_class)
925 return;
926
927 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
928 if (unlikely((s64)delta_exec <= 0))
929 return;

--- 8 unchanged lines hidden (view full) ---

938 cpuacct_charge(curr, delta_exec);
939
940 sched_rt_avg_update(rq, delta_exec);
941
942 if (!rt_bandwidth_enabled())
943 return;
944
945 for_each_sched_rt_entity(rt_se) {
927 u64 delta_exec;
928
929 if (curr->sched_class != &rt_sched_class)
930 return;
931
932 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
933 if (unlikely((s64)delta_exec <= 0))
934 return;

--- 8 unchanged lines hidden (view full) ---

943 cpuacct_charge(curr, delta_exec);
944
945 sched_rt_avg_update(rq, delta_exec);
946
947 if (!rt_bandwidth_enabled())
948 return;
949
950 for_each_sched_rt_entity(rt_se) {
946 rt_rq = rt_rq_of_se(rt_se);
951 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
947
948 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
949 raw_spin_lock(&rt_rq->rt_runtime_lock);
950 rt_rq->rt_time += delta_exec;
951 if (sched_rt_runtime_exceeded(rt_rq))
952 resched_task(curr);
953 raw_spin_unlock(&rt_rq->rt_runtime_lock);
954 }

--- 1190 unchanged lines hidden ---
952
953 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
954 raw_spin_lock(&rt_rq->rt_runtime_lock);
955 rt_rq->rt_time += delta_exec;
956 if (sched_rt_runtime_exceeded(rt_rq))
957 resched_task(curr);
958 raw_spin_unlock(&rt_rq->rt_runtime_lock);
959 }

--- 1190 unchanged lines hidden ---