1*391e43daSPeter Zijlstra /* 2*391e43daSPeter Zijlstra * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 3*391e43daSPeter Zijlstra * policies) 4*391e43daSPeter Zijlstra */ 5*391e43daSPeter Zijlstra 6*391e43daSPeter Zijlstra #include "sched.h" 7*391e43daSPeter Zijlstra 8*391e43daSPeter Zijlstra #include <linux/slab.h> 9*391e43daSPeter Zijlstra 10*391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 11*391e43daSPeter Zijlstra 12*391e43daSPeter Zijlstra struct rt_bandwidth def_rt_bandwidth; 13*391e43daSPeter Zijlstra 14*391e43daSPeter Zijlstra static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 15*391e43daSPeter Zijlstra { 16*391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = 17*391e43daSPeter Zijlstra container_of(timer, struct rt_bandwidth, rt_period_timer); 18*391e43daSPeter Zijlstra ktime_t now; 19*391e43daSPeter Zijlstra int overrun; 20*391e43daSPeter Zijlstra int idle = 0; 21*391e43daSPeter Zijlstra 22*391e43daSPeter Zijlstra for (;;) { 23*391e43daSPeter Zijlstra now = hrtimer_cb_get_time(timer); 24*391e43daSPeter Zijlstra overrun = hrtimer_forward(timer, now, rt_b->rt_period); 25*391e43daSPeter Zijlstra 26*391e43daSPeter Zijlstra if (!overrun) 27*391e43daSPeter Zijlstra break; 28*391e43daSPeter Zijlstra 29*391e43daSPeter Zijlstra idle = do_sched_rt_period_timer(rt_b, overrun); 30*391e43daSPeter Zijlstra } 31*391e43daSPeter Zijlstra 32*391e43daSPeter Zijlstra return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 33*391e43daSPeter Zijlstra } 34*391e43daSPeter Zijlstra 35*391e43daSPeter Zijlstra void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 36*391e43daSPeter Zijlstra { 37*391e43daSPeter Zijlstra rt_b->rt_period = ns_to_ktime(period); 38*391e43daSPeter Zijlstra rt_b->rt_runtime = runtime; 39*391e43daSPeter Zijlstra 40*391e43daSPeter Zijlstra raw_spin_lock_init(&rt_b->rt_runtime_lock); 41*391e43daSPeter Zijlstra 42*391e43daSPeter Zijlstra hrtimer_init(&rt_b->rt_period_timer, 43*391e43daSPeter Zijlstra CLOCK_MONOTONIC, HRTIMER_MODE_REL); 44*391e43daSPeter Zijlstra rt_b->rt_period_timer.function = sched_rt_period_timer; 45*391e43daSPeter Zijlstra } 46*391e43daSPeter Zijlstra 47*391e43daSPeter Zijlstra static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 48*391e43daSPeter Zijlstra { 49*391e43daSPeter Zijlstra if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 50*391e43daSPeter Zijlstra return; 51*391e43daSPeter Zijlstra 52*391e43daSPeter Zijlstra if (hrtimer_active(&rt_b->rt_period_timer)) 53*391e43daSPeter Zijlstra return; 54*391e43daSPeter Zijlstra 55*391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 56*391e43daSPeter Zijlstra start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); 57*391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 58*391e43daSPeter Zijlstra } 59*391e43daSPeter Zijlstra 60*391e43daSPeter Zijlstra void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) 61*391e43daSPeter Zijlstra { 62*391e43daSPeter Zijlstra struct rt_prio_array *array; 63*391e43daSPeter Zijlstra int i; 64*391e43daSPeter Zijlstra 65*391e43daSPeter Zijlstra array = &rt_rq->active; 66*391e43daSPeter Zijlstra for (i = 0; i < MAX_RT_PRIO; i++) { 67*391e43daSPeter Zijlstra INIT_LIST_HEAD(array->queue + i); 68*391e43daSPeter Zijlstra __clear_bit(i, array->bitmap); 69*391e43daSPeter Zijlstra } 70*391e43daSPeter Zijlstra /* delimiter for bitsearch: */ 71*391e43daSPeter Zijlstra __set_bit(MAX_RT_PRIO, array->bitmap); 72*391e43daSPeter Zijlstra 73*391e43daSPeter Zijlstra #if defined CONFIG_SMP 74*391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 75*391e43daSPeter Zijlstra rt_rq->highest_prio.next = MAX_RT_PRIO; 76*391e43daSPeter Zijlstra rt_rq->rt_nr_migratory = 0; 77*391e43daSPeter Zijlstra rt_rq->overloaded = 0; 78*391e43daSPeter Zijlstra plist_head_init(&rt_rq->pushable_tasks); 79*391e43daSPeter Zijlstra #endif 80*391e43daSPeter Zijlstra 81*391e43daSPeter Zijlstra rt_rq->rt_time = 0; 82*391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 83*391e43daSPeter Zijlstra rt_rq->rt_runtime = 0; 84*391e43daSPeter Zijlstra raw_spin_lock_init(&rt_rq->rt_runtime_lock); 85*391e43daSPeter Zijlstra } 86*391e43daSPeter Zijlstra 87*391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 88*391e43daSPeter Zijlstra static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 89*391e43daSPeter Zijlstra { 90*391e43daSPeter Zijlstra hrtimer_cancel(&rt_b->rt_period_timer); 91*391e43daSPeter Zijlstra } 92*391e43daSPeter Zijlstra 93*391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 94*391e43daSPeter Zijlstra 95*391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 96*391e43daSPeter Zijlstra { 97*391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 98*391e43daSPeter Zijlstra WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 99*391e43daSPeter Zijlstra #endif 100*391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 101*391e43daSPeter Zijlstra } 102*391e43daSPeter Zijlstra 103*391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 104*391e43daSPeter Zijlstra { 105*391e43daSPeter Zijlstra return rt_rq->rq; 106*391e43daSPeter Zijlstra } 107*391e43daSPeter Zijlstra 108*391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 109*391e43daSPeter Zijlstra { 110*391e43daSPeter Zijlstra return rt_se->rt_rq; 111*391e43daSPeter Zijlstra } 112*391e43daSPeter Zijlstra 113*391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) 114*391e43daSPeter Zijlstra { 115*391e43daSPeter Zijlstra int i; 116*391e43daSPeter Zijlstra 117*391e43daSPeter Zijlstra if (tg->rt_se) 118*391e43daSPeter Zijlstra destroy_rt_bandwidth(&tg->rt_bandwidth); 119*391e43daSPeter Zijlstra 120*391e43daSPeter Zijlstra for_each_possible_cpu(i) { 121*391e43daSPeter Zijlstra if (tg->rt_rq) 122*391e43daSPeter Zijlstra kfree(tg->rt_rq[i]); 123*391e43daSPeter Zijlstra if (tg->rt_se) 124*391e43daSPeter Zijlstra kfree(tg->rt_se[i]); 125*391e43daSPeter Zijlstra } 126*391e43daSPeter Zijlstra 127*391e43daSPeter Zijlstra kfree(tg->rt_rq); 128*391e43daSPeter Zijlstra kfree(tg->rt_se); 129*391e43daSPeter Zijlstra } 130*391e43daSPeter Zijlstra 131*391e43daSPeter Zijlstra void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 132*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se, int cpu, 133*391e43daSPeter Zijlstra struct sched_rt_entity *parent) 134*391e43daSPeter Zijlstra { 135*391e43daSPeter Zijlstra struct rq *rq = cpu_rq(cpu); 136*391e43daSPeter Zijlstra 137*391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 138*391e43daSPeter Zijlstra rt_rq->rt_nr_boosted = 0; 139*391e43daSPeter Zijlstra rt_rq->rq = rq; 140*391e43daSPeter Zijlstra rt_rq->tg = tg; 141*391e43daSPeter Zijlstra 142*391e43daSPeter Zijlstra tg->rt_rq[cpu] = rt_rq; 143*391e43daSPeter Zijlstra tg->rt_se[cpu] = rt_se; 144*391e43daSPeter Zijlstra 145*391e43daSPeter Zijlstra if (!rt_se) 146*391e43daSPeter Zijlstra return; 147*391e43daSPeter Zijlstra 148*391e43daSPeter Zijlstra if (!parent) 149*391e43daSPeter Zijlstra rt_se->rt_rq = &rq->rt; 150*391e43daSPeter Zijlstra else 151*391e43daSPeter Zijlstra rt_se->rt_rq = parent->my_q; 152*391e43daSPeter Zijlstra 153*391e43daSPeter Zijlstra rt_se->my_q = rt_rq; 154*391e43daSPeter Zijlstra rt_se->parent = parent; 155*391e43daSPeter Zijlstra INIT_LIST_HEAD(&rt_se->run_list); 156*391e43daSPeter Zijlstra } 157*391e43daSPeter Zijlstra 158*391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 159*391e43daSPeter Zijlstra { 160*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 161*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 162*391e43daSPeter Zijlstra int i; 163*391e43daSPeter Zijlstra 164*391e43daSPeter Zijlstra tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); 165*391e43daSPeter Zijlstra if (!tg->rt_rq) 166*391e43daSPeter Zijlstra goto err; 167*391e43daSPeter Zijlstra tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); 168*391e43daSPeter Zijlstra if (!tg->rt_se) 169*391e43daSPeter Zijlstra goto err; 170*391e43daSPeter Zijlstra 171*391e43daSPeter Zijlstra init_rt_bandwidth(&tg->rt_bandwidth, 172*391e43daSPeter Zijlstra ktime_to_ns(def_rt_bandwidth.rt_period), 0); 173*391e43daSPeter Zijlstra 174*391e43daSPeter Zijlstra for_each_possible_cpu(i) { 175*391e43daSPeter Zijlstra rt_rq = kzalloc_node(sizeof(struct rt_rq), 176*391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 177*391e43daSPeter Zijlstra if (!rt_rq) 178*391e43daSPeter Zijlstra goto err; 179*391e43daSPeter Zijlstra 180*391e43daSPeter Zijlstra rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 181*391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 182*391e43daSPeter Zijlstra if (!rt_se) 183*391e43daSPeter Zijlstra goto err_free_rq; 184*391e43daSPeter Zijlstra 185*391e43daSPeter Zijlstra init_rt_rq(rt_rq, cpu_rq(i)); 186*391e43daSPeter Zijlstra rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 187*391e43daSPeter Zijlstra init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 188*391e43daSPeter Zijlstra } 189*391e43daSPeter Zijlstra 190*391e43daSPeter Zijlstra return 1; 191*391e43daSPeter Zijlstra 192*391e43daSPeter Zijlstra err_free_rq: 193*391e43daSPeter Zijlstra kfree(rt_rq); 194*391e43daSPeter Zijlstra err: 195*391e43daSPeter Zijlstra return 0; 196*391e43daSPeter Zijlstra } 197*391e43daSPeter Zijlstra 198*391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 199*391e43daSPeter Zijlstra 200*391e43daSPeter Zijlstra #define rt_entity_is_task(rt_se) (1) 201*391e43daSPeter Zijlstra 202*391e43daSPeter Zijlstra static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 203*391e43daSPeter Zijlstra { 204*391e43daSPeter Zijlstra return container_of(rt_se, struct task_struct, rt); 205*391e43daSPeter Zijlstra } 206*391e43daSPeter Zijlstra 207*391e43daSPeter Zijlstra static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 208*391e43daSPeter Zijlstra { 209*391e43daSPeter Zijlstra return container_of(rt_rq, struct rq, rt); 210*391e43daSPeter Zijlstra } 211*391e43daSPeter Zijlstra 212*391e43daSPeter Zijlstra static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 213*391e43daSPeter Zijlstra { 214*391e43daSPeter Zijlstra struct task_struct *p = rt_task_of(rt_se); 215*391e43daSPeter Zijlstra struct rq *rq = task_rq(p); 216*391e43daSPeter Zijlstra 217*391e43daSPeter Zijlstra return &rq->rt; 218*391e43daSPeter Zijlstra } 219*391e43daSPeter Zijlstra 220*391e43daSPeter Zijlstra void free_rt_sched_group(struct task_group *tg) { } 221*391e43daSPeter Zijlstra 222*391e43daSPeter Zijlstra int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 223*391e43daSPeter Zijlstra { 224*391e43daSPeter Zijlstra return 1; 225*391e43daSPeter Zijlstra } 226*391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 227*391e43daSPeter Zijlstra 228*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 229*391e43daSPeter Zijlstra 230*391e43daSPeter Zijlstra static inline int rt_overloaded(struct rq *rq) 231*391e43daSPeter Zijlstra { 232*391e43daSPeter Zijlstra return atomic_read(&rq->rd->rto_count); 233*391e43daSPeter Zijlstra } 234*391e43daSPeter Zijlstra 235*391e43daSPeter Zijlstra static inline void rt_set_overload(struct rq *rq) 236*391e43daSPeter Zijlstra { 237*391e43daSPeter Zijlstra if (!rq->online) 238*391e43daSPeter Zijlstra return; 239*391e43daSPeter Zijlstra 240*391e43daSPeter Zijlstra cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 241*391e43daSPeter Zijlstra /* 242*391e43daSPeter Zijlstra * Make sure the mask is visible before we set 243*391e43daSPeter Zijlstra * the overload count. That is checked to determine 244*391e43daSPeter Zijlstra * if we should look at the mask. It would be a shame 245*391e43daSPeter Zijlstra * if we looked at the mask, but the mask was not 246*391e43daSPeter Zijlstra * updated yet. 247*391e43daSPeter Zijlstra */ 248*391e43daSPeter Zijlstra wmb(); 249*391e43daSPeter Zijlstra atomic_inc(&rq->rd->rto_count); 250*391e43daSPeter Zijlstra } 251*391e43daSPeter Zijlstra 252*391e43daSPeter Zijlstra static inline void rt_clear_overload(struct rq *rq) 253*391e43daSPeter Zijlstra { 254*391e43daSPeter Zijlstra if (!rq->online) 255*391e43daSPeter Zijlstra return; 256*391e43daSPeter Zijlstra 257*391e43daSPeter Zijlstra /* the order here really doesn't matter */ 258*391e43daSPeter Zijlstra atomic_dec(&rq->rd->rto_count); 259*391e43daSPeter Zijlstra cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 260*391e43daSPeter Zijlstra } 261*391e43daSPeter Zijlstra 262*391e43daSPeter Zijlstra static void update_rt_migration(struct rt_rq *rt_rq) 263*391e43daSPeter Zijlstra { 264*391e43daSPeter Zijlstra if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 265*391e43daSPeter Zijlstra if (!rt_rq->overloaded) { 266*391e43daSPeter Zijlstra rt_set_overload(rq_of_rt_rq(rt_rq)); 267*391e43daSPeter Zijlstra rt_rq->overloaded = 1; 268*391e43daSPeter Zijlstra } 269*391e43daSPeter Zijlstra } else if (rt_rq->overloaded) { 270*391e43daSPeter Zijlstra rt_clear_overload(rq_of_rt_rq(rt_rq)); 271*391e43daSPeter Zijlstra rt_rq->overloaded = 0; 272*391e43daSPeter Zijlstra } 273*391e43daSPeter Zijlstra } 274*391e43daSPeter Zijlstra 275*391e43daSPeter Zijlstra static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 276*391e43daSPeter Zijlstra { 277*391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 278*391e43daSPeter Zijlstra return; 279*391e43daSPeter Zijlstra 280*391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 281*391e43daSPeter Zijlstra 282*391e43daSPeter Zijlstra rt_rq->rt_nr_total++; 283*391e43daSPeter Zijlstra if (rt_se->nr_cpus_allowed > 1) 284*391e43daSPeter Zijlstra rt_rq->rt_nr_migratory++; 285*391e43daSPeter Zijlstra 286*391e43daSPeter Zijlstra update_rt_migration(rt_rq); 287*391e43daSPeter Zijlstra } 288*391e43daSPeter Zijlstra 289*391e43daSPeter Zijlstra static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 290*391e43daSPeter Zijlstra { 291*391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 292*391e43daSPeter Zijlstra return; 293*391e43daSPeter Zijlstra 294*391e43daSPeter Zijlstra rt_rq = &rq_of_rt_rq(rt_rq)->rt; 295*391e43daSPeter Zijlstra 296*391e43daSPeter Zijlstra rt_rq->rt_nr_total--; 297*391e43daSPeter Zijlstra if (rt_se->nr_cpus_allowed > 1) 298*391e43daSPeter Zijlstra rt_rq->rt_nr_migratory--; 299*391e43daSPeter Zijlstra 300*391e43daSPeter Zijlstra update_rt_migration(rt_rq); 301*391e43daSPeter Zijlstra } 302*391e43daSPeter Zijlstra 303*391e43daSPeter Zijlstra static inline int has_pushable_tasks(struct rq *rq) 304*391e43daSPeter Zijlstra { 305*391e43daSPeter Zijlstra return !plist_head_empty(&rq->rt.pushable_tasks); 306*391e43daSPeter Zijlstra } 307*391e43daSPeter Zijlstra 308*391e43daSPeter Zijlstra static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 309*391e43daSPeter Zijlstra { 310*391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 311*391e43daSPeter Zijlstra plist_node_init(&p->pushable_tasks, p->prio); 312*391e43daSPeter Zijlstra plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 313*391e43daSPeter Zijlstra 314*391e43daSPeter Zijlstra /* Update the highest prio pushable task */ 315*391e43daSPeter Zijlstra if (p->prio < rq->rt.highest_prio.next) 316*391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 317*391e43daSPeter Zijlstra } 318*391e43daSPeter Zijlstra 319*391e43daSPeter Zijlstra static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 320*391e43daSPeter Zijlstra { 321*391e43daSPeter Zijlstra plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 322*391e43daSPeter Zijlstra 323*391e43daSPeter Zijlstra /* Update the new highest prio pushable task */ 324*391e43daSPeter Zijlstra if (has_pushable_tasks(rq)) { 325*391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 326*391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 327*391e43daSPeter Zijlstra rq->rt.highest_prio.next = p->prio; 328*391e43daSPeter Zijlstra } else 329*391e43daSPeter Zijlstra rq->rt.highest_prio.next = MAX_RT_PRIO; 330*391e43daSPeter Zijlstra } 331*391e43daSPeter Zijlstra 332*391e43daSPeter Zijlstra #else 333*391e43daSPeter Zijlstra 334*391e43daSPeter Zijlstra static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 335*391e43daSPeter Zijlstra { 336*391e43daSPeter Zijlstra } 337*391e43daSPeter Zijlstra 338*391e43daSPeter Zijlstra static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 339*391e43daSPeter Zijlstra { 340*391e43daSPeter Zijlstra } 341*391e43daSPeter Zijlstra 342*391e43daSPeter Zijlstra static inline 343*391e43daSPeter Zijlstra void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 344*391e43daSPeter Zijlstra { 345*391e43daSPeter Zijlstra } 346*391e43daSPeter Zijlstra 347*391e43daSPeter Zijlstra static inline 348*391e43daSPeter Zijlstra void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 349*391e43daSPeter Zijlstra { 350*391e43daSPeter Zijlstra } 351*391e43daSPeter Zijlstra 352*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 353*391e43daSPeter Zijlstra 354*391e43daSPeter Zijlstra static inline int on_rt_rq(struct sched_rt_entity *rt_se) 355*391e43daSPeter Zijlstra { 356*391e43daSPeter Zijlstra return !list_empty(&rt_se->run_list); 357*391e43daSPeter Zijlstra } 358*391e43daSPeter Zijlstra 359*391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 360*391e43daSPeter Zijlstra 361*391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 362*391e43daSPeter Zijlstra { 363*391e43daSPeter Zijlstra if (!rt_rq->tg) 364*391e43daSPeter Zijlstra return RUNTIME_INF; 365*391e43daSPeter Zijlstra 366*391e43daSPeter Zijlstra return rt_rq->rt_runtime; 367*391e43daSPeter Zijlstra } 368*391e43daSPeter Zijlstra 369*391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 370*391e43daSPeter Zijlstra { 371*391e43daSPeter Zijlstra return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 372*391e43daSPeter Zijlstra } 373*391e43daSPeter Zijlstra 374*391e43daSPeter Zijlstra typedef struct task_group *rt_rq_iter_t; 375*391e43daSPeter Zijlstra 376*391e43daSPeter Zijlstra static inline struct task_group *next_task_group(struct task_group *tg) 377*391e43daSPeter Zijlstra { 378*391e43daSPeter Zijlstra do { 379*391e43daSPeter Zijlstra tg = list_entry_rcu(tg->list.next, 380*391e43daSPeter Zijlstra typeof(struct task_group), list); 381*391e43daSPeter Zijlstra } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 382*391e43daSPeter Zijlstra 383*391e43daSPeter Zijlstra if (&tg->list == &task_groups) 384*391e43daSPeter Zijlstra tg = NULL; 385*391e43daSPeter Zijlstra 386*391e43daSPeter Zijlstra return tg; 387*391e43daSPeter Zijlstra } 388*391e43daSPeter Zijlstra 389*391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 390*391e43daSPeter Zijlstra for (iter = container_of(&task_groups, typeof(*iter), list); \ 391*391e43daSPeter Zijlstra (iter = next_task_group(iter)) && \ 392*391e43daSPeter Zijlstra (rt_rq = iter->rt_rq[cpu_of(rq)]);) 393*391e43daSPeter Zijlstra 394*391e43daSPeter Zijlstra static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) 395*391e43daSPeter Zijlstra { 396*391e43daSPeter Zijlstra list_add_rcu(&rt_rq->leaf_rt_rq_list, 397*391e43daSPeter Zijlstra &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list); 398*391e43daSPeter Zijlstra } 399*391e43daSPeter Zijlstra 400*391e43daSPeter Zijlstra static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) 401*391e43daSPeter Zijlstra { 402*391e43daSPeter Zijlstra list_del_rcu(&rt_rq->leaf_rt_rq_list); 403*391e43daSPeter Zijlstra } 404*391e43daSPeter Zijlstra 405*391e43daSPeter Zijlstra #define for_each_leaf_rt_rq(rt_rq, rq) \ 406*391e43daSPeter Zijlstra list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 407*391e43daSPeter Zijlstra 408*391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 409*391e43daSPeter Zijlstra for (; rt_se; rt_se = rt_se->parent) 410*391e43daSPeter Zijlstra 411*391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 412*391e43daSPeter Zijlstra { 413*391e43daSPeter Zijlstra return rt_se->my_q; 414*391e43daSPeter Zijlstra } 415*391e43daSPeter Zijlstra 416*391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); 417*391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se); 418*391e43daSPeter Zijlstra 419*391e43daSPeter Zijlstra static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 420*391e43daSPeter Zijlstra { 421*391e43daSPeter Zijlstra struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 422*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 423*391e43daSPeter Zijlstra 424*391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 425*391e43daSPeter Zijlstra 426*391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 427*391e43daSPeter Zijlstra 428*391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 429*391e43daSPeter Zijlstra if (rt_se && !on_rt_rq(rt_se)) 430*391e43daSPeter Zijlstra enqueue_rt_entity(rt_se, false); 431*391e43daSPeter Zijlstra if (rt_rq->highest_prio.curr < curr->prio) 432*391e43daSPeter Zijlstra resched_task(curr); 433*391e43daSPeter Zijlstra } 434*391e43daSPeter Zijlstra } 435*391e43daSPeter Zijlstra 436*391e43daSPeter Zijlstra static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 437*391e43daSPeter Zijlstra { 438*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 439*391e43daSPeter Zijlstra int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 440*391e43daSPeter Zijlstra 441*391e43daSPeter Zijlstra rt_se = rt_rq->tg->rt_se[cpu]; 442*391e43daSPeter Zijlstra 443*391e43daSPeter Zijlstra if (rt_se && on_rt_rq(rt_se)) 444*391e43daSPeter Zijlstra dequeue_rt_entity(rt_se); 445*391e43daSPeter Zijlstra } 446*391e43daSPeter Zijlstra 447*391e43daSPeter Zijlstra static inline int rt_rq_throttled(struct rt_rq *rt_rq) 448*391e43daSPeter Zijlstra { 449*391e43daSPeter Zijlstra return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 450*391e43daSPeter Zijlstra } 451*391e43daSPeter Zijlstra 452*391e43daSPeter Zijlstra static int rt_se_boosted(struct sched_rt_entity *rt_se) 453*391e43daSPeter Zijlstra { 454*391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 455*391e43daSPeter Zijlstra struct task_struct *p; 456*391e43daSPeter Zijlstra 457*391e43daSPeter Zijlstra if (rt_rq) 458*391e43daSPeter Zijlstra return !!rt_rq->rt_nr_boosted; 459*391e43daSPeter Zijlstra 460*391e43daSPeter Zijlstra p = rt_task_of(rt_se); 461*391e43daSPeter Zijlstra return p->prio != p->normal_prio; 462*391e43daSPeter Zijlstra } 463*391e43daSPeter Zijlstra 464*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 465*391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 466*391e43daSPeter Zijlstra { 467*391e43daSPeter Zijlstra return cpu_rq(smp_processor_id())->rd->span; 468*391e43daSPeter Zijlstra } 469*391e43daSPeter Zijlstra #else 470*391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 471*391e43daSPeter Zijlstra { 472*391e43daSPeter Zijlstra return cpu_online_mask; 473*391e43daSPeter Zijlstra } 474*391e43daSPeter Zijlstra #endif 475*391e43daSPeter Zijlstra 476*391e43daSPeter Zijlstra static inline 477*391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 478*391e43daSPeter Zijlstra { 479*391e43daSPeter Zijlstra return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 480*391e43daSPeter Zijlstra } 481*391e43daSPeter Zijlstra 482*391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 483*391e43daSPeter Zijlstra { 484*391e43daSPeter Zijlstra return &rt_rq->tg->rt_bandwidth; 485*391e43daSPeter Zijlstra } 486*391e43daSPeter Zijlstra 487*391e43daSPeter Zijlstra #else /* !CONFIG_RT_GROUP_SCHED */ 488*391e43daSPeter Zijlstra 489*391e43daSPeter Zijlstra static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 490*391e43daSPeter Zijlstra { 491*391e43daSPeter Zijlstra return rt_rq->rt_runtime; 492*391e43daSPeter Zijlstra } 493*391e43daSPeter Zijlstra 494*391e43daSPeter Zijlstra static inline u64 sched_rt_period(struct rt_rq *rt_rq) 495*391e43daSPeter Zijlstra { 496*391e43daSPeter Zijlstra return ktime_to_ns(def_rt_bandwidth.rt_period); 497*391e43daSPeter Zijlstra } 498*391e43daSPeter Zijlstra 499*391e43daSPeter Zijlstra typedef struct rt_rq *rt_rq_iter_t; 500*391e43daSPeter Zijlstra 501*391e43daSPeter Zijlstra #define for_each_rt_rq(rt_rq, iter, rq) \ 502*391e43daSPeter Zijlstra for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 503*391e43daSPeter Zijlstra 504*391e43daSPeter Zijlstra static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) 505*391e43daSPeter Zijlstra { 506*391e43daSPeter Zijlstra } 507*391e43daSPeter Zijlstra 508*391e43daSPeter Zijlstra static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) 509*391e43daSPeter Zijlstra { 510*391e43daSPeter Zijlstra } 511*391e43daSPeter Zijlstra 512*391e43daSPeter Zijlstra #define for_each_leaf_rt_rq(rt_rq, rq) \ 513*391e43daSPeter Zijlstra for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 514*391e43daSPeter Zijlstra 515*391e43daSPeter Zijlstra #define for_each_sched_rt_entity(rt_se) \ 516*391e43daSPeter Zijlstra for (; rt_se; rt_se = NULL) 517*391e43daSPeter Zijlstra 518*391e43daSPeter Zijlstra static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 519*391e43daSPeter Zijlstra { 520*391e43daSPeter Zijlstra return NULL; 521*391e43daSPeter Zijlstra } 522*391e43daSPeter Zijlstra 523*391e43daSPeter Zijlstra static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 524*391e43daSPeter Zijlstra { 525*391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) 526*391e43daSPeter Zijlstra resched_task(rq_of_rt_rq(rt_rq)->curr); 527*391e43daSPeter Zijlstra } 528*391e43daSPeter Zijlstra 529*391e43daSPeter Zijlstra static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 530*391e43daSPeter Zijlstra { 531*391e43daSPeter Zijlstra } 532*391e43daSPeter Zijlstra 533*391e43daSPeter Zijlstra static inline int rt_rq_throttled(struct rt_rq *rt_rq) 534*391e43daSPeter Zijlstra { 535*391e43daSPeter Zijlstra return rt_rq->rt_throttled; 536*391e43daSPeter Zijlstra } 537*391e43daSPeter Zijlstra 538*391e43daSPeter Zijlstra static inline const struct cpumask *sched_rt_period_mask(void) 539*391e43daSPeter Zijlstra { 540*391e43daSPeter Zijlstra return cpu_online_mask; 541*391e43daSPeter Zijlstra } 542*391e43daSPeter Zijlstra 543*391e43daSPeter Zijlstra static inline 544*391e43daSPeter Zijlstra struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 545*391e43daSPeter Zijlstra { 546*391e43daSPeter Zijlstra return &cpu_rq(cpu)->rt; 547*391e43daSPeter Zijlstra } 548*391e43daSPeter Zijlstra 549*391e43daSPeter Zijlstra static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 550*391e43daSPeter Zijlstra { 551*391e43daSPeter Zijlstra return &def_rt_bandwidth; 552*391e43daSPeter Zijlstra } 553*391e43daSPeter Zijlstra 554*391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 555*391e43daSPeter Zijlstra 556*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 557*391e43daSPeter Zijlstra /* 558*391e43daSPeter Zijlstra * We ran out of runtime, see if we can borrow some from our neighbours. 559*391e43daSPeter Zijlstra */ 560*391e43daSPeter Zijlstra static int do_balance_runtime(struct rt_rq *rt_rq) 561*391e43daSPeter Zijlstra { 562*391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 563*391e43daSPeter Zijlstra struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 564*391e43daSPeter Zijlstra int i, weight, more = 0; 565*391e43daSPeter Zijlstra u64 rt_period; 566*391e43daSPeter Zijlstra 567*391e43daSPeter Zijlstra weight = cpumask_weight(rd->span); 568*391e43daSPeter Zijlstra 569*391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 570*391e43daSPeter Zijlstra rt_period = ktime_to_ns(rt_b->rt_period); 571*391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 572*391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 573*391e43daSPeter Zijlstra s64 diff; 574*391e43daSPeter Zijlstra 575*391e43daSPeter Zijlstra if (iter == rt_rq) 576*391e43daSPeter Zijlstra continue; 577*391e43daSPeter Zijlstra 578*391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 579*391e43daSPeter Zijlstra /* 580*391e43daSPeter Zijlstra * Either all rqs have inf runtime and there's nothing to steal 581*391e43daSPeter Zijlstra * or __disable_runtime() below sets a specific rq to inf to 582*391e43daSPeter Zijlstra * indicate its been disabled and disalow stealing. 583*391e43daSPeter Zijlstra */ 584*391e43daSPeter Zijlstra if (iter->rt_runtime == RUNTIME_INF) 585*391e43daSPeter Zijlstra goto next; 586*391e43daSPeter Zijlstra 587*391e43daSPeter Zijlstra /* 588*391e43daSPeter Zijlstra * From runqueues with spare time, take 1/n part of their 589*391e43daSPeter Zijlstra * spare time, but no more than our period. 590*391e43daSPeter Zijlstra */ 591*391e43daSPeter Zijlstra diff = iter->rt_runtime - iter->rt_time; 592*391e43daSPeter Zijlstra if (diff > 0) { 593*391e43daSPeter Zijlstra diff = div_u64((u64)diff, weight); 594*391e43daSPeter Zijlstra if (rt_rq->rt_runtime + diff > rt_period) 595*391e43daSPeter Zijlstra diff = rt_period - rt_rq->rt_runtime; 596*391e43daSPeter Zijlstra iter->rt_runtime -= diff; 597*391e43daSPeter Zijlstra rt_rq->rt_runtime += diff; 598*391e43daSPeter Zijlstra more = 1; 599*391e43daSPeter Zijlstra if (rt_rq->rt_runtime == rt_period) { 600*391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 601*391e43daSPeter Zijlstra break; 602*391e43daSPeter Zijlstra } 603*391e43daSPeter Zijlstra } 604*391e43daSPeter Zijlstra next: 605*391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 606*391e43daSPeter Zijlstra } 607*391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 608*391e43daSPeter Zijlstra 609*391e43daSPeter Zijlstra return more; 610*391e43daSPeter Zijlstra } 611*391e43daSPeter Zijlstra 612*391e43daSPeter Zijlstra /* 613*391e43daSPeter Zijlstra * Ensure this RQ takes back all the runtime it lend to its neighbours. 614*391e43daSPeter Zijlstra */ 615*391e43daSPeter Zijlstra static void __disable_runtime(struct rq *rq) 616*391e43daSPeter Zijlstra { 617*391e43daSPeter Zijlstra struct root_domain *rd = rq->rd; 618*391e43daSPeter Zijlstra rt_rq_iter_t iter; 619*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 620*391e43daSPeter Zijlstra 621*391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 622*391e43daSPeter Zijlstra return; 623*391e43daSPeter Zijlstra 624*391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 625*391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 626*391e43daSPeter Zijlstra s64 want; 627*391e43daSPeter Zijlstra int i; 628*391e43daSPeter Zijlstra 629*391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 630*391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 631*391e43daSPeter Zijlstra /* 632*391e43daSPeter Zijlstra * Either we're all inf and nobody needs to borrow, or we're 633*391e43daSPeter Zijlstra * already disabled and thus have nothing to do, or we have 634*391e43daSPeter Zijlstra * exactly the right amount of runtime to take out. 635*391e43daSPeter Zijlstra */ 636*391e43daSPeter Zijlstra if (rt_rq->rt_runtime == RUNTIME_INF || 637*391e43daSPeter Zijlstra rt_rq->rt_runtime == rt_b->rt_runtime) 638*391e43daSPeter Zijlstra goto balanced; 639*391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 640*391e43daSPeter Zijlstra 641*391e43daSPeter Zijlstra /* 642*391e43daSPeter Zijlstra * Calculate the difference between what we started out with 643*391e43daSPeter Zijlstra * and what we current have, that's the amount of runtime 644*391e43daSPeter Zijlstra * we lend and now have to reclaim. 645*391e43daSPeter Zijlstra */ 646*391e43daSPeter Zijlstra want = rt_b->rt_runtime - rt_rq->rt_runtime; 647*391e43daSPeter Zijlstra 648*391e43daSPeter Zijlstra /* 649*391e43daSPeter Zijlstra * Greedy reclaim, take back as much as we can. 650*391e43daSPeter Zijlstra */ 651*391e43daSPeter Zijlstra for_each_cpu(i, rd->span) { 652*391e43daSPeter Zijlstra struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 653*391e43daSPeter Zijlstra s64 diff; 654*391e43daSPeter Zijlstra 655*391e43daSPeter Zijlstra /* 656*391e43daSPeter Zijlstra * Can't reclaim from ourselves or disabled runqueues. 657*391e43daSPeter Zijlstra */ 658*391e43daSPeter Zijlstra if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 659*391e43daSPeter Zijlstra continue; 660*391e43daSPeter Zijlstra 661*391e43daSPeter Zijlstra raw_spin_lock(&iter->rt_runtime_lock); 662*391e43daSPeter Zijlstra if (want > 0) { 663*391e43daSPeter Zijlstra diff = min_t(s64, iter->rt_runtime, want); 664*391e43daSPeter Zijlstra iter->rt_runtime -= diff; 665*391e43daSPeter Zijlstra want -= diff; 666*391e43daSPeter Zijlstra } else { 667*391e43daSPeter Zijlstra iter->rt_runtime -= want; 668*391e43daSPeter Zijlstra want -= want; 669*391e43daSPeter Zijlstra } 670*391e43daSPeter Zijlstra raw_spin_unlock(&iter->rt_runtime_lock); 671*391e43daSPeter Zijlstra 672*391e43daSPeter Zijlstra if (!want) 673*391e43daSPeter Zijlstra break; 674*391e43daSPeter Zijlstra } 675*391e43daSPeter Zijlstra 676*391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 677*391e43daSPeter Zijlstra /* 678*391e43daSPeter Zijlstra * We cannot be left wanting - that would mean some runtime 679*391e43daSPeter Zijlstra * leaked out of the system. 680*391e43daSPeter Zijlstra */ 681*391e43daSPeter Zijlstra BUG_ON(want); 682*391e43daSPeter Zijlstra balanced: 683*391e43daSPeter Zijlstra /* 684*391e43daSPeter Zijlstra * Disable all the borrow logic by pretending we have inf 685*391e43daSPeter Zijlstra * runtime - in which case borrowing doesn't make sense. 686*391e43daSPeter Zijlstra */ 687*391e43daSPeter Zijlstra rt_rq->rt_runtime = RUNTIME_INF; 688*391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 689*391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 690*391e43daSPeter Zijlstra } 691*391e43daSPeter Zijlstra } 692*391e43daSPeter Zijlstra 693*391e43daSPeter Zijlstra static void disable_runtime(struct rq *rq) 694*391e43daSPeter Zijlstra { 695*391e43daSPeter Zijlstra unsigned long flags; 696*391e43daSPeter Zijlstra 697*391e43daSPeter Zijlstra raw_spin_lock_irqsave(&rq->lock, flags); 698*391e43daSPeter Zijlstra __disable_runtime(rq); 699*391e43daSPeter Zijlstra raw_spin_unlock_irqrestore(&rq->lock, flags); 700*391e43daSPeter Zijlstra } 701*391e43daSPeter Zijlstra 702*391e43daSPeter Zijlstra static void __enable_runtime(struct rq *rq) 703*391e43daSPeter Zijlstra { 704*391e43daSPeter Zijlstra rt_rq_iter_t iter; 705*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 706*391e43daSPeter Zijlstra 707*391e43daSPeter Zijlstra if (unlikely(!scheduler_running)) 708*391e43daSPeter Zijlstra return; 709*391e43daSPeter Zijlstra 710*391e43daSPeter Zijlstra /* 711*391e43daSPeter Zijlstra * Reset each runqueue's bandwidth settings 712*391e43daSPeter Zijlstra */ 713*391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, rq) { 714*391e43daSPeter Zijlstra struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 715*391e43daSPeter Zijlstra 716*391e43daSPeter Zijlstra raw_spin_lock(&rt_b->rt_runtime_lock); 717*391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 718*391e43daSPeter Zijlstra rt_rq->rt_runtime = rt_b->rt_runtime; 719*391e43daSPeter Zijlstra rt_rq->rt_time = 0; 720*391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 721*391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 722*391e43daSPeter Zijlstra raw_spin_unlock(&rt_b->rt_runtime_lock); 723*391e43daSPeter Zijlstra } 724*391e43daSPeter Zijlstra } 725*391e43daSPeter Zijlstra 726*391e43daSPeter Zijlstra static void enable_runtime(struct rq *rq) 727*391e43daSPeter Zijlstra { 728*391e43daSPeter Zijlstra unsigned long flags; 729*391e43daSPeter Zijlstra 730*391e43daSPeter Zijlstra raw_spin_lock_irqsave(&rq->lock, flags); 731*391e43daSPeter Zijlstra __enable_runtime(rq); 732*391e43daSPeter Zijlstra raw_spin_unlock_irqrestore(&rq->lock, flags); 733*391e43daSPeter Zijlstra } 734*391e43daSPeter Zijlstra 735*391e43daSPeter Zijlstra int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu) 736*391e43daSPeter Zijlstra { 737*391e43daSPeter Zijlstra int cpu = (int)(long)hcpu; 738*391e43daSPeter Zijlstra 739*391e43daSPeter Zijlstra switch (action) { 740*391e43daSPeter Zijlstra case CPU_DOWN_PREPARE: 741*391e43daSPeter Zijlstra case CPU_DOWN_PREPARE_FROZEN: 742*391e43daSPeter Zijlstra disable_runtime(cpu_rq(cpu)); 743*391e43daSPeter Zijlstra return NOTIFY_OK; 744*391e43daSPeter Zijlstra 745*391e43daSPeter Zijlstra case CPU_DOWN_FAILED: 746*391e43daSPeter Zijlstra case CPU_DOWN_FAILED_FROZEN: 747*391e43daSPeter Zijlstra case CPU_ONLINE: 748*391e43daSPeter Zijlstra case CPU_ONLINE_FROZEN: 749*391e43daSPeter Zijlstra enable_runtime(cpu_rq(cpu)); 750*391e43daSPeter Zijlstra return NOTIFY_OK; 751*391e43daSPeter Zijlstra 752*391e43daSPeter Zijlstra default: 753*391e43daSPeter Zijlstra return NOTIFY_DONE; 754*391e43daSPeter Zijlstra } 755*391e43daSPeter Zijlstra } 756*391e43daSPeter Zijlstra 757*391e43daSPeter Zijlstra static int balance_runtime(struct rt_rq *rt_rq) 758*391e43daSPeter Zijlstra { 759*391e43daSPeter Zijlstra int more = 0; 760*391e43daSPeter Zijlstra 761*391e43daSPeter Zijlstra if (!sched_feat(RT_RUNTIME_SHARE)) 762*391e43daSPeter Zijlstra return more; 763*391e43daSPeter Zijlstra 764*391e43daSPeter Zijlstra if (rt_rq->rt_time > rt_rq->rt_runtime) { 765*391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 766*391e43daSPeter Zijlstra more = do_balance_runtime(rt_rq); 767*391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 768*391e43daSPeter Zijlstra } 769*391e43daSPeter Zijlstra 770*391e43daSPeter Zijlstra return more; 771*391e43daSPeter Zijlstra } 772*391e43daSPeter Zijlstra #else /* !CONFIG_SMP */ 773*391e43daSPeter Zijlstra static inline int balance_runtime(struct rt_rq *rt_rq) 774*391e43daSPeter Zijlstra { 775*391e43daSPeter Zijlstra return 0; 776*391e43daSPeter Zijlstra } 777*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 778*391e43daSPeter Zijlstra 779*391e43daSPeter Zijlstra static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 780*391e43daSPeter Zijlstra { 781*391e43daSPeter Zijlstra int i, idle = 1; 782*391e43daSPeter Zijlstra const struct cpumask *span; 783*391e43daSPeter Zijlstra 784*391e43daSPeter Zijlstra if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 785*391e43daSPeter Zijlstra return 1; 786*391e43daSPeter Zijlstra 787*391e43daSPeter Zijlstra span = sched_rt_period_mask(); 788*391e43daSPeter Zijlstra for_each_cpu(i, span) { 789*391e43daSPeter Zijlstra int enqueue = 0; 790*391e43daSPeter Zijlstra struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 791*391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 792*391e43daSPeter Zijlstra 793*391e43daSPeter Zijlstra raw_spin_lock(&rq->lock); 794*391e43daSPeter Zijlstra if (rt_rq->rt_time) { 795*391e43daSPeter Zijlstra u64 runtime; 796*391e43daSPeter Zijlstra 797*391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 798*391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 799*391e43daSPeter Zijlstra balance_runtime(rt_rq); 800*391e43daSPeter Zijlstra runtime = rt_rq->rt_runtime; 801*391e43daSPeter Zijlstra rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 802*391e43daSPeter Zijlstra if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 803*391e43daSPeter Zijlstra rt_rq->rt_throttled = 0; 804*391e43daSPeter Zijlstra enqueue = 1; 805*391e43daSPeter Zijlstra 806*391e43daSPeter Zijlstra /* 807*391e43daSPeter Zijlstra * Force a clock update if the CPU was idle, 808*391e43daSPeter Zijlstra * lest wakeup -> unthrottle time accumulate. 809*391e43daSPeter Zijlstra */ 810*391e43daSPeter Zijlstra if (rt_rq->rt_nr_running && rq->curr == rq->idle) 811*391e43daSPeter Zijlstra rq->skip_clock_update = -1; 812*391e43daSPeter Zijlstra } 813*391e43daSPeter Zijlstra if (rt_rq->rt_time || rt_rq->rt_nr_running) 814*391e43daSPeter Zijlstra idle = 0; 815*391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 816*391e43daSPeter Zijlstra } else if (rt_rq->rt_nr_running) { 817*391e43daSPeter Zijlstra idle = 0; 818*391e43daSPeter Zijlstra if (!rt_rq_throttled(rt_rq)) 819*391e43daSPeter Zijlstra enqueue = 1; 820*391e43daSPeter Zijlstra } 821*391e43daSPeter Zijlstra 822*391e43daSPeter Zijlstra if (enqueue) 823*391e43daSPeter Zijlstra sched_rt_rq_enqueue(rt_rq); 824*391e43daSPeter Zijlstra raw_spin_unlock(&rq->lock); 825*391e43daSPeter Zijlstra } 826*391e43daSPeter Zijlstra 827*391e43daSPeter Zijlstra return idle; 828*391e43daSPeter Zijlstra } 829*391e43daSPeter Zijlstra 830*391e43daSPeter Zijlstra static inline int rt_se_prio(struct sched_rt_entity *rt_se) 831*391e43daSPeter Zijlstra { 832*391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 833*391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 834*391e43daSPeter Zijlstra 835*391e43daSPeter Zijlstra if (rt_rq) 836*391e43daSPeter Zijlstra return rt_rq->highest_prio.curr; 837*391e43daSPeter Zijlstra #endif 838*391e43daSPeter Zijlstra 839*391e43daSPeter Zijlstra return rt_task_of(rt_se)->prio; 840*391e43daSPeter Zijlstra } 841*391e43daSPeter Zijlstra 842*391e43daSPeter Zijlstra static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 843*391e43daSPeter Zijlstra { 844*391e43daSPeter Zijlstra u64 runtime = sched_rt_runtime(rt_rq); 845*391e43daSPeter Zijlstra 846*391e43daSPeter Zijlstra if (rt_rq->rt_throttled) 847*391e43daSPeter Zijlstra return rt_rq_throttled(rt_rq); 848*391e43daSPeter Zijlstra 849*391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) 850*391e43daSPeter Zijlstra return 0; 851*391e43daSPeter Zijlstra 852*391e43daSPeter Zijlstra balance_runtime(rt_rq); 853*391e43daSPeter Zijlstra runtime = sched_rt_runtime(rt_rq); 854*391e43daSPeter Zijlstra if (runtime == RUNTIME_INF) 855*391e43daSPeter Zijlstra return 0; 856*391e43daSPeter Zijlstra 857*391e43daSPeter Zijlstra if (rt_rq->rt_time > runtime) { 858*391e43daSPeter Zijlstra rt_rq->rt_throttled = 1; 859*391e43daSPeter Zijlstra printk_once(KERN_WARNING "sched: RT throttling activated\n"); 860*391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) { 861*391e43daSPeter Zijlstra sched_rt_rq_dequeue(rt_rq); 862*391e43daSPeter Zijlstra return 1; 863*391e43daSPeter Zijlstra } 864*391e43daSPeter Zijlstra } 865*391e43daSPeter Zijlstra 866*391e43daSPeter Zijlstra return 0; 867*391e43daSPeter Zijlstra } 868*391e43daSPeter Zijlstra 869*391e43daSPeter Zijlstra /* 870*391e43daSPeter Zijlstra * Update the current task's runtime statistics. Skip current tasks that 871*391e43daSPeter Zijlstra * are not in our scheduling class. 872*391e43daSPeter Zijlstra */ 873*391e43daSPeter Zijlstra static void update_curr_rt(struct rq *rq) 874*391e43daSPeter Zijlstra { 875*391e43daSPeter Zijlstra struct task_struct *curr = rq->curr; 876*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &curr->rt; 877*391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 878*391e43daSPeter Zijlstra u64 delta_exec; 879*391e43daSPeter Zijlstra 880*391e43daSPeter Zijlstra if (curr->sched_class != &rt_sched_class) 881*391e43daSPeter Zijlstra return; 882*391e43daSPeter Zijlstra 883*391e43daSPeter Zijlstra delta_exec = rq->clock_task - curr->se.exec_start; 884*391e43daSPeter Zijlstra if (unlikely((s64)delta_exec < 0)) 885*391e43daSPeter Zijlstra delta_exec = 0; 886*391e43daSPeter Zijlstra 887*391e43daSPeter Zijlstra schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); 888*391e43daSPeter Zijlstra 889*391e43daSPeter Zijlstra curr->se.sum_exec_runtime += delta_exec; 890*391e43daSPeter Zijlstra account_group_exec_runtime(curr, delta_exec); 891*391e43daSPeter Zijlstra 892*391e43daSPeter Zijlstra curr->se.exec_start = rq->clock_task; 893*391e43daSPeter Zijlstra cpuacct_charge(curr, delta_exec); 894*391e43daSPeter Zijlstra 895*391e43daSPeter Zijlstra sched_rt_avg_update(rq, delta_exec); 896*391e43daSPeter Zijlstra 897*391e43daSPeter Zijlstra if (!rt_bandwidth_enabled()) 898*391e43daSPeter Zijlstra return; 899*391e43daSPeter Zijlstra 900*391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 901*391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se); 902*391e43daSPeter Zijlstra 903*391e43daSPeter Zijlstra if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 904*391e43daSPeter Zijlstra raw_spin_lock(&rt_rq->rt_runtime_lock); 905*391e43daSPeter Zijlstra rt_rq->rt_time += delta_exec; 906*391e43daSPeter Zijlstra if (sched_rt_runtime_exceeded(rt_rq)) 907*391e43daSPeter Zijlstra resched_task(curr); 908*391e43daSPeter Zijlstra raw_spin_unlock(&rt_rq->rt_runtime_lock); 909*391e43daSPeter Zijlstra } 910*391e43daSPeter Zijlstra } 911*391e43daSPeter Zijlstra } 912*391e43daSPeter Zijlstra 913*391e43daSPeter Zijlstra #if defined CONFIG_SMP 914*391e43daSPeter Zijlstra 915*391e43daSPeter Zijlstra static void 916*391e43daSPeter Zijlstra inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 917*391e43daSPeter Zijlstra { 918*391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 919*391e43daSPeter Zijlstra 920*391e43daSPeter Zijlstra if (rq->online && prio < prev_prio) 921*391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 922*391e43daSPeter Zijlstra } 923*391e43daSPeter Zijlstra 924*391e43daSPeter Zijlstra static void 925*391e43daSPeter Zijlstra dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 926*391e43daSPeter Zijlstra { 927*391e43daSPeter Zijlstra struct rq *rq = rq_of_rt_rq(rt_rq); 928*391e43daSPeter Zijlstra 929*391e43daSPeter Zijlstra if (rq->online && rt_rq->highest_prio.curr != prev_prio) 930*391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 931*391e43daSPeter Zijlstra } 932*391e43daSPeter Zijlstra 933*391e43daSPeter Zijlstra #else /* CONFIG_SMP */ 934*391e43daSPeter Zijlstra 935*391e43daSPeter Zijlstra static inline 936*391e43daSPeter Zijlstra void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 937*391e43daSPeter Zijlstra static inline 938*391e43daSPeter Zijlstra void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 939*391e43daSPeter Zijlstra 940*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 941*391e43daSPeter Zijlstra 942*391e43daSPeter Zijlstra #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 943*391e43daSPeter Zijlstra static void 944*391e43daSPeter Zijlstra inc_rt_prio(struct rt_rq *rt_rq, int prio) 945*391e43daSPeter Zijlstra { 946*391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 947*391e43daSPeter Zijlstra 948*391e43daSPeter Zijlstra if (prio < prev_prio) 949*391e43daSPeter Zijlstra rt_rq->highest_prio.curr = prio; 950*391e43daSPeter Zijlstra 951*391e43daSPeter Zijlstra inc_rt_prio_smp(rt_rq, prio, prev_prio); 952*391e43daSPeter Zijlstra } 953*391e43daSPeter Zijlstra 954*391e43daSPeter Zijlstra static void 955*391e43daSPeter Zijlstra dec_rt_prio(struct rt_rq *rt_rq, int prio) 956*391e43daSPeter Zijlstra { 957*391e43daSPeter Zijlstra int prev_prio = rt_rq->highest_prio.curr; 958*391e43daSPeter Zijlstra 959*391e43daSPeter Zijlstra if (rt_rq->rt_nr_running) { 960*391e43daSPeter Zijlstra 961*391e43daSPeter Zijlstra WARN_ON(prio < prev_prio); 962*391e43daSPeter Zijlstra 963*391e43daSPeter Zijlstra /* 964*391e43daSPeter Zijlstra * This may have been our highest task, and therefore 965*391e43daSPeter Zijlstra * we may have some recomputation to do 966*391e43daSPeter Zijlstra */ 967*391e43daSPeter Zijlstra if (prio == prev_prio) { 968*391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 969*391e43daSPeter Zijlstra 970*391e43daSPeter Zijlstra rt_rq->highest_prio.curr = 971*391e43daSPeter Zijlstra sched_find_first_bit(array->bitmap); 972*391e43daSPeter Zijlstra } 973*391e43daSPeter Zijlstra 974*391e43daSPeter Zijlstra } else 975*391e43daSPeter Zijlstra rt_rq->highest_prio.curr = MAX_RT_PRIO; 976*391e43daSPeter Zijlstra 977*391e43daSPeter Zijlstra dec_rt_prio_smp(rt_rq, prio, prev_prio); 978*391e43daSPeter Zijlstra } 979*391e43daSPeter Zijlstra 980*391e43daSPeter Zijlstra #else 981*391e43daSPeter Zijlstra 982*391e43daSPeter Zijlstra static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 983*391e43daSPeter Zijlstra static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 984*391e43daSPeter Zijlstra 985*391e43daSPeter Zijlstra #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 986*391e43daSPeter Zijlstra 987*391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED 988*391e43daSPeter Zijlstra 989*391e43daSPeter Zijlstra static void 990*391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 991*391e43daSPeter Zijlstra { 992*391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 993*391e43daSPeter Zijlstra rt_rq->rt_nr_boosted++; 994*391e43daSPeter Zijlstra 995*391e43daSPeter Zijlstra if (rt_rq->tg) 996*391e43daSPeter Zijlstra start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 997*391e43daSPeter Zijlstra } 998*391e43daSPeter Zijlstra 999*391e43daSPeter Zijlstra static void 1000*391e43daSPeter Zijlstra dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1001*391e43daSPeter Zijlstra { 1002*391e43daSPeter Zijlstra if (rt_se_boosted(rt_se)) 1003*391e43daSPeter Zijlstra rt_rq->rt_nr_boosted--; 1004*391e43daSPeter Zijlstra 1005*391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1006*391e43daSPeter Zijlstra } 1007*391e43daSPeter Zijlstra 1008*391e43daSPeter Zijlstra #else /* CONFIG_RT_GROUP_SCHED */ 1009*391e43daSPeter Zijlstra 1010*391e43daSPeter Zijlstra static void 1011*391e43daSPeter Zijlstra inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1012*391e43daSPeter Zijlstra { 1013*391e43daSPeter Zijlstra start_rt_bandwidth(&def_rt_bandwidth); 1014*391e43daSPeter Zijlstra } 1015*391e43daSPeter Zijlstra 1016*391e43daSPeter Zijlstra static inline 1017*391e43daSPeter Zijlstra void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1018*391e43daSPeter Zijlstra 1019*391e43daSPeter Zijlstra #endif /* CONFIG_RT_GROUP_SCHED */ 1020*391e43daSPeter Zijlstra 1021*391e43daSPeter Zijlstra static inline 1022*391e43daSPeter Zijlstra void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1023*391e43daSPeter Zijlstra { 1024*391e43daSPeter Zijlstra int prio = rt_se_prio(rt_se); 1025*391e43daSPeter Zijlstra 1026*391e43daSPeter Zijlstra WARN_ON(!rt_prio(prio)); 1027*391e43daSPeter Zijlstra rt_rq->rt_nr_running++; 1028*391e43daSPeter Zijlstra 1029*391e43daSPeter Zijlstra inc_rt_prio(rt_rq, prio); 1030*391e43daSPeter Zijlstra inc_rt_migration(rt_se, rt_rq); 1031*391e43daSPeter Zijlstra inc_rt_group(rt_se, rt_rq); 1032*391e43daSPeter Zijlstra } 1033*391e43daSPeter Zijlstra 1034*391e43daSPeter Zijlstra static inline 1035*391e43daSPeter Zijlstra void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1036*391e43daSPeter Zijlstra { 1037*391e43daSPeter Zijlstra WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1038*391e43daSPeter Zijlstra WARN_ON(!rt_rq->rt_nr_running); 1039*391e43daSPeter Zijlstra rt_rq->rt_nr_running--; 1040*391e43daSPeter Zijlstra 1041*391e43daSPeter Zijlstra dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1042*391e43daSPeter Zijlstra dec_rt_migration(rt_se, rt_rq); 1043*391e43daSPeter Zijlstra dec_rt_group(rt_se, rt_rq); 1044*391e43daSPeter Zijlstra } 1045*391e43daSPeter Zijlstra 1046*391e43daSPeter Zijlstra static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) 1047*391e43daSPeter Zijlstra { 1048*391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1049*391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1050*391e43daSPeter Zijlstra struct rt_rq *group_rq = group_rt_rq(rt_se); 1051*391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1052*391e43daSPeter Zijlstra 1053*391e43daSPeter Zijlstra /* 1054*391e43daSPeter Zijlstra * Don't enqueue the group if its throttled, or when empty. 1055*391e43daSPeter Zijlstra * The latter is a consequence of the former when a child group 1056*391e43daSPeter Zijlstra * get throttled and the current group doesn't have any other 1057*391e43daSPeter Zijlstra * active members. 1058*391e43daSPeter Zijlstra */ 1059*391e43daSPeter Zijlstra if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) 1060*391e43daSPeter Zijlstra return; 1061*391e43daSPeter Zijlstra 1062*391e43daSPeter Zijlstra if (!rt_rq->rt_nr_running) 1063*391e43daSPeter Zijlstra list_add_leaf_rt_rq(rt_rq); 1064*391e43daSPeter Zijlstra 1065*391e43daSPeter Zijlstra if (head) 1066*391e43daSPeter Zijlstra list_add(&rt_se->run_list, queue); 1067*391e43daSPeter Zijlstra else 1068*391e43daSPeter Zijlstra list_add_tail(&rt_se->run_list, queue); 1069*391e43daSPeter Zijlstra __set_bit(rt_se_prio(rt_se), array->bitmap); 1070*391e43daSPeter Zijlstra 1071*391e43daSPeter Zijlstra inc_rt_tasks(rt_se, rt_rq); 1072*391e43daSPeter Zijlstra } 1073*391e43daSPeter Zijlstra 1074*391e43daSPeter Zijlstra static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) 1075*391e43daSPeter Zijlstra { 1076*391e43daSPeter Zijlstra struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1077*391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1078*391e43daSPeter Zijlstra 1079*391e43daSPeter Zijlstra list_del_init(&rt_se->run_list); 1080*391e43daSPeter Zijlstra if (list_empty(array->queue + rt_se_prio(rt_se))) 1081*391e43daSPeter Zijlstra __clear_bit(rt_se_prio(rt_se), array->bitmap); 1082*391e43daSPeter Zijlstra 1083*391e43daSPeter Zijlstra dec_rt_tasks(rt_se, rt_rq); 1084*391e43daSPeter Zijlstra if (!rt_rq->rt_nr_running) 1085*391e43daSPeter Zijlstra list_del_leaf_rt_rq(rt_rq); 1086*391e43daSPeter Zijlstra } 1087*391e43daSPeter Zijlstra 1088*391e43daSPeter Zijlstra /* 1089*391e43daSPeter Zijlstra * Because the prio of an upper entry depends on the lower 1090*391e43daSPeter Zijlstra * entries, we must remove entries top - down. 1091*391e43daSPeter Zijlstra */ 1092*391e43daSPeter Zijlstra static void dequeue_rt_stack(struct sched_rt_entity *rt_se) 1093*391e43daSPeter Zijlstra { 1094*391e43daSPeter Zijlstra struct sched_rt_entity *back = NULL; 1095*391e43daSPeter Zijlstra 1096*391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1097*391e43daSPeter Zijlstra rt_se->back = back; 1098*391e43daSPeter Zijlstra back = rt_se; 1099*391e43daSPeter Zijlstra } 1100*391e43daSPeter Zijlstra 1101*391e43daSPeter Zijlstra for (rt_se = back; rt_se; rt_se = rt_se->back) { 1102*391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) 1103*391e43daSPeter Zijlstra __dequeue_rt_entity(rt_se); 1104*391e43daSPeter Zijlstra } 1105*391e43daSPeter Zijlstra } 1106*391e43daSPeter Zijlstra 1107*391e43daSPeter Zijlstra static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) 1108*391e43daSPeter Zijlstra { 1109*391e43daSPeter Zijlstra dequeue_rt_stack(rt_se); 1110*391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) 1111*391e43daSPeter Zijlstra __enqueue_rt_entity(rt_se, head); 1112*391e43daSPeter Zijlstra } 1113*391e43daSPeter Zijlstra 1114*391e43daSPeter Zijlstra static void dequeue_rt_entity(struct sched_rt_entity *rt_se) 1115*391e43daSPeter Zijlstra { 1116*391e43daSPeter Zijlstra dequeue_rt_stack(rt_se); 1117*391e43daSPeter Zijlstra 1118*391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1119*391e43daSPeter Zijlstra struct rt_rq *rt_rq = group_rt_rq(rt_se); 1120*391e43daSPeter Zijlstra 1121*391e43daSPeter Zijlstra if (rt_rq && rt_rq->rt_nr_running) 1122*391e43daSPeter Zijlstra __enqueue_rt_entity(rt_se, false); 1123*391e43daSPeter Zijlstra } 1124*391e43daSPeter Zijlstra } 1125*391e43daSPeter Zijlstra 1126*391e43daSPeter Zijlstra /* 1127*391e43daSPeter Zijlstra * Adding/removing a task to/from a priority array: 1128*391e43daSPeter Zijlstra */ 1129*391e43daSPeter Zijlstra static void 1130*391e43daSPeter Zijlstra enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1131*391e43daSPeter Zijlstra { 1132*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1133*391e43daSPeter Zijlstra 1134*391e43daSPeter Zijlstra if (flags & ENQUEUE_WAKEUP) 1135*391e43daSPeter Zijlstra rt_se->timeout = 0; 1136*391e43daSPeter Zijlstra 1137*391e43daSPeter Zijlstra enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); 1138*391e43daSPeter Zijlstra 1139*391e43daSPeter Zijlstra if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 1140*391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1141*391e43daSPeter Zijlstra 1142*391e43daSPeter Zijlstra inc_nr_running(rq); 1143*391e43daSPeter Zijlstra } 1144*391e43daSPeter Zijlstra 1145*391e43daSPeter Zijlstra static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1146*391e43daSPeter Zijlstra { 1147*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1148*391e43daSPeter Zijlstra 1149*391e43daSPeter Zijlstra update_curr_rt(rq); 1150*391e43daSPeter Zijlstra dequeue_rt_entity(rt_se); 1151*391e43daSPeter Zijlstra 1152*391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1153*391e43daSPeter Zijlstra 1154*391e43daSPeter Zijlstra dec_nr_running(rq); 1155*391e43daSPeter Zijlstra } 1156*391e43daSPeter Zijlstra 1157*391e43daSPeter Zijlstra /* 1158*391e43daSPeter Zijlstra * Put task to the head or the end of the run list without the overhead of 1159*391e43daSPeter Zijlstra * dequeue followed by enqueue. 1160*391e43daSPeter Zijlstra */ 1161*391e43daSPeter Zijlstra static void 1162*391e43daSPeter Zijlstra requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1163*391e43daSPeter Zijlstra { 1164*391e43daSPeter Zijlstra if (on_rt_rq(rt_se)) { 1165*391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1166*391e43daSPeter Zijlstra struct list_head *queue = array->queue + rt_se_prio(rt_se); 1167*391e43daSPeter Zijlstra 1168*391e43daSPeter Zijlstra if (head) 1169*391e43daSPeter Zijlstra list_move(&rt_se->run_list, queue); 1170*391e43daSPeter Zijlstra else 1171*391e43daSPeter Zijlstra list_move_tail(&rt_se->run_list, queue); 1172*391e43daSPeter Zijlstra } 1173*391e43daSPeter Zijlstra } 1174*391e43daSPeter Zijlstra 1175*391e43daSPeter Zijlstra static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1176*391e43daSPeter Zijlstra { 1177*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se = &p->rt; 1178*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1179*391e43daSPeter Zijlstra 1180*391e43daSPeter Zijlstra for_each_sched_rt_entity(rt_se) { 1181*391e43daSPeter Zijlstra rt_rq = rt_rq_of_se(rt_se); 1182*391e43daSPeter Zijlstra requeue_rt_entity(rt_rq, rt_se, head); 1183*391e43daSPeter Zijlstra } 1184*391e43daSPeter Zijlstra } 1185*391e43daSPeter Zijlstra 1186*391e43daSPeter Zijlstra static void yield_task_rt(struct rq *rq) 1187*391e43daSPeter Zijlstra { 1188*391e43daSPeter Zijlstra requeue_task_rt(rq, rq->curr, 0); 1189*391e43daSPeter Zijlstra } 1190*391e43daSPeter Zijlstra 1191*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1192*391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task); 1193*391e43daSPeter Zijlstra 1194*391e43daSPeter Zijlstra static int 1195*391e43daSPeter Zijlstra select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) 1196*391e43daSPeter Zijlstra { 1197*391e43daSPeter Zijlstra struct task_struct *curr; 1198*391e43daSPeter Zijlstra struct rq *rq; 1199*391e43daSPeter Zijlstra int cpu; 1200*391e43daSPeter Zijlstra 1201*391e43daSPeter Zijlstra cpu = task_cpu(p); 1202*391e43daSPeter Zijlstra 1203*391e43daSPeter Zijlstra /* For anything but wake ups, just return the task_cpu */ 1204*391e43daSPeter Zijlstra if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) 1205*391e43daSPeter Zijlstra goto out; 1206*391e43daSPeter Zijlstra 1207*391e43daSPeter Zijlstra rq = cpu_rq(cpu); 1208*391e43daSPeter Zijlstra 1209*391e43daSPeter Zijlstra rcu_read_lock(); 1210*391e43daSPeter Zijlstra curr = ACCESS_ONCE(rq->curr); /* unlocked access */ 1211*391e43daSPeter Zijlstra 1212*391e43daSPeter Zijlstra /* 1213*391e43daSPeter Zijlstra * If the current task on @p's runqueue is an RT task, then 1214*391e43daSPeter Zijlstra * try to see if we can wake this RT task up on another 1215*391e43daSPeter Zijlstra * runqueue. Otherwise simply start this RT task 1216*391e43daSPeter Zijlstra * on its current runqueue. 1217*391e43daSPeter Zijlstra * 1218*391e43daSPeter Zijlstra * We want to avoid overloading runqueues. If the woken 1219*391e43daSPeter Zijlstra * task is a higher priority, then it will stay on this CPU 1220*391e43daSPeter Zijlstra * and the lower prio task should be moved to another CPU. 1221*391e43daSPeter Zijlstra * Even though this will probably make the lower prio task 1222*391e43daSPeter Zijlstra * lose its cache, we do not want to bounce a higher task 1223*391e43daSPeter Zijlstra * around just because it gave up its CPU, perhaps for a 1224*391e43daSPeter Zijlstra * lock? 1225*391e43daSPeter Zijlstra * 1226*391e43daSPeter Zijlstra * For equal prio tasks, we just let the scheduler sort it out. 1227*391e43daSPeter Zijlstra * 1228*391e43daSPeter Zijlstra * Otherwise, just let it ride on the affined RQ and the 1229*391e43daSPeter Zijlstra * post-schedule router will push the preempted task away 1230*391e43daSPeter Zijlstra * 1231*391e43daSPeter Zijlstra * This test is optimistic, if we get it wrong the load-balancer 1232*391e43daSPeter Zijlstra * will have to sort it out. 1233*391e43daSPeter Zijlstra */ 1234*391e43daSPeter Zijlstra if (curr && unlikely(rt_task(curr)) && 1235*391e43daSPeter Zijlstra (curr->rt.nr_cpus_allowed < 2 || 1236*391e43daSPeter Zijlstra curr->prio <= p->prio) && 1237*391e43daSPeter Zijlstra (p->rt.nr_cpus_allowed > 1)) { 1238*391e43daSPeter Zijlstra int target = find_lowest_rq(p); 1239*391e43daSPeter Zijlstra 1240*391e43daSPeter Zijlstra if (target != -1) 1241*391e43daSPeter Zijlstra cpu = target; 1242*391e43daSPeter Zijlstra } 1243*391e43daSPeter Zijlstra rcu_read_unlock(); 1244*391e43daSPeter Zijlstra 1245*391e43daSPeter Zijlstra out: 1246*391e43daSPeter Zijlstra return cpu; 1247*391e43daSPeter Zijlstra } 1248*391e43daSPeter Zijlstra 1249*391e43daSPeter Zijlstra static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1250*391e43daSPeter Zijlstra { 1251*391e43daSPeter Zijlstra if (rq->curr->rt.nr_cpus_allowed == 1) 1252*391e43daSPeter Zijlstra return; 1253*391e43daSPeter Zijlstra 1254*391e43daSPeter Zijlstra if (p->rt.nr_cpus_allowed != 1 1255*391e43daSPeter Zijlstra && cpupri_find(&rq->rd->cpupri, p, NULL)) 1256*391e43daSPeter Zijlstra return; 1257*391e43daSPeter Zijlstra 1258*391e43daSPeter Zijlstra if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1259*391e43daSPeter Zijlstra return; 1260*391e43daSPeter Zijlstra 1261*391e43daSPeter Zijlstra /* 1262*391e43daSPeter Zijlstra * There appears to be other cpus that can accept 1263*391e43daSPeter Zijlstra * current and none to run 'p', so lets reschedule 1264*391e43daSPeter Zijlstra * to try and push current away: 1265*391e43daSPeter Zijlstra */ 1266*391e43daSPeter Zijlstra requeue_task_rt(rq, p, 1); 1267*391e43daSPeter Zijlstra resched_task(rq->curr); 1268*391e43daSPeter Zijlstra } 1269*391e43daSPeter Zijlstra 1270*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1271*391e43daSPeter Zijlstra 1272*391e43daSPeter Zijlstra /* 1273*391e43daSPeter Zijlstra * Preempt the current task with a newly woken task if needed: 1274*391e43daSPeter Zijlstra */ 1275*391e43daSPeter Zijlstra static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1276*391e43daSPeter Zijlstra { 1277*391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) { 1278*391e43daSPeter Zijlstra resched_task(rq->curr); 1279*391e43daSPeter Zijlstra return; 1280*391e43daSPeter Zijlstra } 1281*391e43daSPeter Zijlstra 1282*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1283*391e43daSPeter Zijlstra /* 1284*391e43daSPeter Zijlstra * If: 1285*391e43daSPeter Zijlstra * 1286*391e43daSPeter Zijlstra * - the newly woken task is of equal priority to the current task 1287*391e43daSPeter Zijlstra * - the newly woken task is non-migratable while current is migratable 1288*391e43daSPeter Zijlstra * - current will be preempted on the next reschedule 1289*391e43daSPeter Zijlstra * 1290*391e43daSPeter Zijlstra * we should check to see if current can readily move to a different 1291*391e43daSPeter Zijlstra * cpu. If so, we will reschedule to allow the push logic to try 1292*391e43daSPeter Zijlstra * to move current somewhere else, making room for our non-migratable 1293*391e43daSPeter Zijlstra * task. 1294*391e43daSPeter Zijlstra */ 1295*391e43daSPeter Zijlstra if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1296*391e43daSPeter Zijlstra check_preempt_equal_prio(rq, p); 1297*391e43daSPeter Zijlstra #endif 1298*391e43daSPeter Zijlstra } 1299*391e43daSPeter Zijlstra 1300*391e43daSPeter Zijlstra static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, 1301*391e43daSPeter Zijlstra struct rt_rq *rt_rq) 1302*391e43daSPeter Zijlstra { 1303*391e43daSPeter Zijlstra struct rt_prio_array *array = &rt_rq->active; 1304*391e43daSPeter Zijlstra struct sched_rt_entity *next = NULL; 1305*391e43daSPeter Zijlstra struct list_head *queue; 1306*391e43daSPeter Zijlstra int idx; 1307*391e43daSPeter Zijlstra 1308*391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap); 1309*391e43daSPeter Zijlstra BUG_ON(idx >= MAX_RT_PRIO); 1310*391e43daSPeter Zijlstra 1311*391e43daSPeter Zijlstra queue = array->queue + idx; 1312*391e43daSPeter Zijlstra next = list_entry(queue->next, struct sched_rt_entity, run_list); 1313*391e43daSPeter Zijlstra 1314*391e43daSPeter Zijlstra return next; 1315*391e43daSPeter Zijlstra } 1316*391e43daSPeter Zijlstra 1317*391e43daSPeter Zijlstra static struct task_struct *_pick_next_task_rt(struct rq *rq) 1318*391e43daSPeter Zijlstra { 1319*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 1320*391e43daSPeter Zijlstra struct task_struct *p; 1321*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1322*391e43daSPeter Zijlstra 1323*391e43daSPeter Zijlstra rt_rq = &rq->rt; 1324*391e43daSPeter Zijlstra 1325*391e43daSPeter Zijlstra if (!rt_rq->rt_nr_running) 1326*391e43daSPeter Zijlstra return NULL; 1327*391e43daSPeter Zijlstra 1328*391e43daSPeter Zijlstra if (rt_rq_throttled(rt_rq)) 1329*391e43daSPeter Zijlstra return NULL; 1330*391e43daSPeter Zijlstra 1331*391e43daSPeter Zijlstra do { 1332*391e43daSPeter Zijlstra rt_se = pick_next_rt_entity(rq, rt_rq); 1333*391e43daSPeter Zijlstra BUG_ON(!rt_se); 1334*391e43daSPeter Zijlstra rt_rq = group_rt_rq(rt_se); 1335*391e43daSPeter Zijlstra } while (rt_rq); 1336*391e43daSPeter Zijlstra 1337*391e43daSPeter Zijlstra p = rt_task_of(rt_se); 1338*391e43daSPeter Zijlstra p->se.exec_start = rq->clock_task; 1339*391e43daSPeter Zijlstra 1340*391e43daSPeter Zijlstra return p; 1341*391e43daSPeter Zijlstra } 1342*391e43daSPeter Zijlstra 1343*391e43daSPeter Zijlstra static struct task_struct *pick_next_task_rt(struct rq *rq) 1344*391e43daSPeter Zijlstra { 1345*391e43daSPeter Zijlstra struct task_struct *p = _pick_next_task_rt(rq); 1346*391e43daSPeter Zijlstra 1347*391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 1348*391e43daSPeter Zijlstra if (p) 1349*391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1350*391e43daSPeter Zijlstra 1351*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1352*391e43daSPeter Zijlstra /* 1353*391e43daSPeter Zijlstra * We detect this state here so that we can avoid taking the RQ 1354*391e43daSPeter Zijlstra * lock again later if there is no need to push 1355*391e43daSPeter Zijlstra */ 1356*391e43daSPeter Zijlstra rq->post_schedule = has_pushable_tasks(rq); 1357*391e43daSPeter Zijlstra #endif 1358*391e43daSPeter Zijlstra 1359*391e43daSPeter Zijlstra return p; 1360*391e43daSPeter Zijlstra } 1361*391e43daSPeter Zijlstra 1362*391e43daSPeter Zijlstra static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1363*391e43daSPeter Zijlstra { 1364*391e43daSPeter Zijlstra update_curr_rt(rq); 1365*391e43daSPeter Zijlstra 1366*391e43daSPeter Zijlstra /* 1367*391e43daSPeter Zijlstra * The previous task needs to be made eligible for pushing 1368*391e43daSPeter Zijlstra * if it is still active 1369*391e43daSPeter Zijlstra */ 1370*391e43daSPeter Zijlstra if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) 1371*391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1372*391e43daSPeter Zijlstra } 1373*391e43daSPeter Zijlstra 1374*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1375*391e43daSPeter Zijlstra 1376*391e43daSPeter Zijlstra /* Only try algorithms three times */ 1377*391e43daSPeter Zijlstra #define RT_MAX_TRIES 3 1378*391e43daSPeter Zijlstra 1379*391e43daSPeter Zijlstra static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1380*391e43daSPeter Zijlstra { 1381*391e43daSPeter Zijlstra if (!task_running(rq, p) && 1382*391e43daSPeter Zijlstra (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && 1383*391e43daSPeter Zijlstra (p->rt.nr_cpus_allowed > 1)) 1384*391e43daSPeter Zijlstra return 1; 1385*391e43daSPeter Zijlstra return 0; 1386*391e43daSPeter Zijlstra } 1387*391e43daSPeter Zijlstra 1388*391e43daSPeter Zijlstra /* Return the second highest RT task, NULL otherwise */ 1389*391e43daSPeter Zijlstra static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) 1390*391e43daSPeter Zijlstra { 1391*391e43daSPeter Zijlstra struct task_struct *next = NULL; 1392*391e43daSPeter Zijlstra struct sched_rt_entity *rt_se; 1393*391e43daSPeter Zijlstra struct rt_prio_array *array; 1394*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 1395*391e43daSPeter Zijlstra int idx; 1396*391e43daSPeter Zijlstra 1397*391e43daSPeter Zijlstra for_each_leaf_rt_rq(rt_rq, rq) { 1398*391e43daSPeter Zijlstra array = &rt_rq->active; 1399*391e43daSPeter Zijlstra idx = sched_find_first_bit(array->bitmap); 1400*391e43daSPeter Zijlstra next_idx: 1401*391e43daSPeter Zijlstra if (idx >= MAX_RT_PRIO) 1402*391e43daSPeter Zijlstra continue; 1403*391e43daSPeter Zijlstra if (next && next->prio < idx) 1404*391e43daSPeter Zijlstra continue; 1405*391e43daSPeter Zijlstra list_for_each_entry(rt_se, array->queue + idx, run_list) { 1406*391e43daSPeter Zijlstra struct task_struct *p; 1407*391e43daSPeter Zijlstra 1408*391e43daSPeter Zijlstra if (!rt_entity_is_task(rt_se)) 1409*391e43daSPeter Zijlstra continue; 1410*391e43daSPeter Zijlstra 1411*391e43daSPeter Zijlstra p = rt_task_of(rt_se); 1412*391e43daSPeter Zijlstra if (pick_rt_task(rq, p, cpu)) { 1413*391e43daSPeter Zijlstra next = p; 1414*391e43daSPeter Zijlstra break; 1415*391e43daSPeter Zijlstra } 1416*391e43daSPeter Zijlstra } 1417*391e43daSPeter Zijlstra if (!next) { 1418*391e43daSPeter Zijlstra idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); 1419*391e43daSPeter Zijlstra goto next_idx; 1420*391e43daSPeter Zijlstra } 1421*391e43daSPeter Zijlstra } 1422*391e43daSPeter Zijlstra 1423*391e43daSPeter Zijlstra return next; 1424*391e43daSPeter Zijlstra } 1425*391e43daSPeter Zijlstra 1426*391e43daSPeter Zijlstra static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1427*391e43daSPeter Zijlstra 1428*391e43daSPeter Zijlstra static int find_lowest_rq(struct task_struct *task) 1429*391e43daSPeter Zijlstra { 1430*391e43daSPeter Zijlstra struct sched_domain *sd; 1431*391e43daSPeter Zijlstra struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 1432*391e43daSPeter Zijlstra int this_cpu = smp_processor_id(); 1433*391e43daSPeter Zijlstra int cpu = task_cpu(task); 1434*391e43daSPeter Zijlstra 1435*391e43daSPeter Zijlstra /* Make sure the mask is initialized first */ 1436*391e43daSPeter Zijlstra if (unlikely(!lowest_mask)) 1437*391e43daSPeter Zijlstra return -1; 1438*391e43daSPeter Zijlstra 1439*391e43daSPeter Zijlstra if (task->rt.nr_cpus_allowed == 1) 1440*391e43daSPeter Zijlstra return -1; /* No other targets possible */ 1441*391e43daSPeter Zijlstra 1442*391e43daSPeter Zijlstra if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) 1443*391e43daSPeter Zijlstra return -1; /* No targets found */ 1444*391e43daSPeter Zijlstra 1445*391e43daSPeter Zijlstra /* 1446*391e43daSPeter Zijlstra * At this point we have built a mask of cpus representing the 1447*391e43daSPeter Zijlstra * lowest priority tasks in the system. Now we want to elect 1448*391e43daSPeter Zijlstra * the best one based on our affinity and topology. 1449*391e43daSPeter Zijlstra * 1450*391e43daSPeter Zijlstra * We prioritize the last cpu that the task executed on since 1451*391e43daSPeter Zijlstra * it is most likely cache-hot in that location. 1452*391e43daSPeter Zijlstra */ 1453*391e43daSPeter Zijlstra if (cpumask_test_cpu(cpu, lowest_mask)) 1454*391e43daSPeter Zijlstra return cpu; 1455*391e43daSPeter Zijlstra 1456*391e43daSPeter Zijlstra /* 1457*391e43daSPeter Zijlstra * Otherwise, we consult the sched_domains span maps to figure 1458*391e43daSPeter Zijlstra * out which cpu is logically closest to our hot cache data. 1459*391e43daSPeter Zijlstra */ 1460*391e43daSPeter Zijlstra if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1461*391e43daSPeter Zijlstra this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1462*391e43daSPeter Zijlstra 1463*391e43daSPeter Zijlstra rcu_read_lock(); 1464*391e43daSPeter Zijlstra for_each_domain(cpu, sd) { 1465*391e43daSPeter Zijlstra if (sd->flags & SD_WAKE_AFFINE) { 1466*391e43daSPeter Zijlstra int best_cpu; 1467*391e43daSPeter Zijlstra 1468*391e43daSPeter Zijlstra /* 1469*391e43daSPeter Zijlstra * "this_cpu" is cheaper to preempt than a 1470*391e43daSPeter Zijlstra * remote processor. 1471*391e43daSPeter Zijlstra */ 1472*391e43daSPeter Zijlstra if (this_cpu != -1 && 1473*391e43daSPeter Zijlstra cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1474*391e43daSPeter Zijlstra rcu_read_unlock(); 1475*391e43daSPeter Zijlstra return this_cpu; 1476*391e43daSPeter Zijlstra } 1477*391e43daSPeter Zijlstra 1478*391e43daSPeter Zijlstra best_cpu = cpumask_first_and(lowest_mask, 1479*391e43daSPeter Zijlstra sched_domain_span(sd)); 1480*391e43daSPeter Zijlstra if (best_cpu < nr_cpu_ids) { 1481*391e43daSPeter Zijlstra rcu_read_unlock(); 1482*391e43daSPeter Zijlstra return best_cpu; 1483*391e43daSPeter Zijlstra } 1484*391e43daSPeter Zijlstra } 1485*391e43daSPeter Zijlstra } 1486*391e43daSPeter Zijlstra rcu_read_unlock(); 1487*391e43daSPeter Zijlstra 1488*391e43daSPeter Zijlstra /* 1489*391e43daSPeter Zijlstra * And finally, if there were no matches within the domains 1490*391e43daSPeter Zijlstra * just give the caller *something* to work with from the compatible 1491*391e43daSPeter Zijlstra * locations. 1492*391e43daSPeter Zijlstra */ 1493*391e43daSPeter Zijlstra if (this_cpu != -1) 1494*391e43daSPeter Zijlstra return this_cpu; 1495*391e43daSPeter Zijlstra 1496*391e43daSPeter Zijlstra cpu = cpumask_any(lowest_mask); 1497*391e43daSPeter Zijlstra if (cpu < nr_cpu_ids) 1498*391e43daSPeter Zijlstra return cpu; 1499*391e43daSPeter Zijlstra return -1; 1500*391e43daSPeter Zijlstra } 1501*391e43daSPeter Zijlstra 1502*391e43daSPeter Zijlstra /* Will lock the rq it finds */ 1503*391e43daSPeter Zijlstra static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1504*391e43daSPeter Zijlstra { 1505*391e43daSPeter Zijlstra struct rq *lowest_rq = NULL; 1506*391e43daSPeter Zijlstra int tries; 1507*391e43daSPeter Zijlstra int cpu; 1508*391e43daSPeter Zijlstra 1509*391e43daSPeter Zijlstra for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1510*391e43daSPeter Zijlstra cpu = find_lowest_rq(task); 1511*391e43daSPeter Zijlstra 1512*391e43daSPeter Zijlstra if ((cpu == -1) || (cpu == rq->cpu)) 1513*391e43daSPeter Zijlstra break; 1514*391e43daSPeter Zijlstra 1515*391e43daSPeter Zijlstra lowest_rq = cpu_rq(cpu); 1516*391e43daSPeter Zijlstra 1517*391e43daSPeter Zijlstra /* if the prio of this runqueue changed, try again */ 1518*391e43daSPeter Zijlstra if (double_lock_balance(rq, lowest_rq)) { 1519*391e43daSPeter Zijlstra /* 1520*391e43daSPeter Zijlstra * We had to unlock the run queue. In 1521*391e43daSPeter Zijlstra * the mean time, task could have 1522*391e43daSPeter Zijlstra * migrated already or had its affinity changed. 1523*391e43daSPeter Zijlstra * Also make sure that it wasn't scheduled on its rq. 1524*391e43daSPeter Zijlstra */ 1525*391e43daSPeter Zijlstra if (unlikely(task_rq(task) != rq || 1526*391e43daSPeter Zijlstra !cpumask_test_cpu(lowest_rq->cpu, 1527*391e43daSPeter Zijlstra tsk_cpus_allowed(task)) || 1528*391e43daSPeter Zijlstra task_running(rq, task) || 1529*391e43daSPeter Zijlstra !task->on_rq)) { 1530*391e43daSPeter Zijlstra 1531*391e43daSPeter Zijlstra raw_spin_unlock(&lowest_rq->lock); 1532*391e43daSPeter Zijlstra lowest_rq = NULL; 1533*391e43daSPeter Zijlstra break; 1534*391e43daSPeter Zijlstra } 1535*391e43daSPeter Zijlstra } 1536*391e43daSPeter Zijlstra 1537*391e43daSPeter Zijlstra /* If this rq is still suitable use it. */ 1538*391e43daSPeter Zijlstra if (lowest_rq->rt.highest_prio.curr > task->prio) 1539*391e43daSPeter Zijlstra break; 1540*391e43daSPeter Zijlstra 1541*391e43daSPeter Zijlstra /* try again */ 1542*391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1543*391e43daSPeter Zijlstra lowest_rq = NULL; 1544*391e43daSPeter Zijlstra } 1545*391e43daSPeter Zijlstra 1546*391e43daSPeter Zijlstra return lowest_rq; 1547*391e43daSPeter Zijlstra } 1548*391e43daSPeter Zijlstra 1549*391e43daSPeter Zijlstra static struct task_struct *pick_next_pushable_task(struct rq *rq) 1550*391e43daSPeter Zijlstra { 1551*391e43daSPeter Zijlstra struct task_struct *p; 1552*391e43daSPeter Zijlstra 1553*391e43daSPeter Zijlstra if (!has_pushable_tasks(rq)) 1554*391e43daSPeter Zijlstra return NULL; 1555*391e43daSPeter Zijlstra 1556*391e43daSPeter Zijlstra p = plist_first_entry(&rq->rt.pushable_tasks, 1557*391e43daSPeter Zijlstra struct task_struct, pushable_tasks); 1558*391e43daSPeter Zijlstra 1559*391e43daSPeter Zijlstra BUG_ON(rq->cpu != task_cpu(p)); 1560*391e43daSPeter Zijlstra BUG_ON(task_current(rq, p)); 1561*391e43daSPeter Zijlstra BUG_ON(p->rt.nr_cpus_allowed <= 1); 1562*391e43daSPeter Zijlstra 1563*391e43daSPeter Zijlstra BUG_ON(!p->on_rq); 1564*391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1565*391e43daSPeter Zijlstra 1566*391e43daSPeter Zijlstra return p; 1567*391e43daSPeter Zijlstra } 1568*391e43daSPeter Zijlstra 1569*391e43daSPeter Zijlstra /* 1570*391e43daSPeter Zijlstra * If the current CPU has more than one RT task, see if the non 1571*391e43daSPeter Zijlstra * running task can migrate over to a CPU that is running a task 1572*391e43daSPeter Zijlstra * of lesser priority. 1573*391e43daSPeter Zijlstra */ 1574*391e43daSPeter Zijlstra static int push_rt_task(struct rq *rq) 1575*391e43daSPeter Zijlstra { 1576*391e43daSPeter Zijlstra struct task_struct *next_task; 1577*391e43daSPeter Zijlstra struct rq *lowest_rq; 1578*391e43daSPeter Zijlstra int ret = 0; 1579*391e43daSPeter Zijlstra 1580*391e43daSPeter Zijlstra if (!rq->rt.overloaded) 1581*391e43daSPeter Zijlstra return 0; 1582*391e43daSPeter Zijlstra 1583*391e43daSPeter Zijlstra next_task = pick_next_pushable_task(rq); 1584*391e43daSPeter Zijlstra if (!next_task) 1585*391e43daSPeter Zijlstra return 0; 1586*391e43daSPeter Zijlstra 1587*391e43daSPeter Zijlstra retry: 1588*391e43daSPeter Zijlstra if (unlikely(next_task == rq->curr)) { 1589*391e43daSPeter Zijlstra WARN_ON(1); 1590*391e43daSPeter Zijlstra return 0; 1591*391e43daSPeter Zijlstra } 1592*391e43daSPeter Zijlstra 1593*391e43daSPeter Zijlstra /* 1594*391e43daSPeter Zijlstra * It's possible that the next_task slipped in of 1595*391e43daSPeter Zijlstra * higher priority than current. If that's the case 1596*391e43daSPeter Zijlstra * just reschedule current. 1597*391e43daSPeter Zijlstra */ 1598*391e43daSPeter Zijlstra if (unlikely(next_task->prio < rq->curr->prio)) { 1599*391e43daSPeter Zijlstra resched_task(rq->curr); 1600*391e43daSPeter Zijlstra return 0; 1601*391e43daSPeter Zijlstra } 1602*391e43daSPeter Zijlstra 1603*391e43daSPeter Zijlstra /* We might release rq lock */ 1604*391e43daSPeter Zijlstra get_task_struct(next_task); 1605*391e43daSPeter Zijlstra 1606*391e43daSPeter Zijlstra /* find_lock_lowest_rq locks the rq if found */ 1607*391e43daSPeter Zijlstra lowest_rq = find_lock_lowest_rq(next_task, rq); 1608*391e43daSPeter Zijlstra if (!lowest_rq) { 1609*391e43daSPeter Zijlstra struct task_struct *task; 1610*391e43daSPeter Zijlstra /* 1611*391e43daSPeter Zijlstra * find_lock_lowest_rq releases rq->lock 1612*391e43daSPeter Zijlstra * so it is possible that next_task has migrated. 1613*391e43daSPeter Zijlstra * 1614*391e43daSPeter Zijlstra * We need to make sure that the task is still on the same 1615*391e43daSPeter Zijlstra * run-queue and is also still the next task eligible for 1616*391e43daSPeter Zijlstra * pushing. 1617*391e43daSPeter Zijlstra */ 1618*391e43daSPeter Zijlstra task = pick_next_pushable_task(rq); 1619*391e43daSPeter Zijlstra if (task_cpu(next_task) == rq->cpu && task == next_task) { 1620*391e43daSPeter Zijlstra /* 1621*391e43daSPeter Zijlstra * The task hasn't migrated, and is still the next 1622*391e43daSPeter Zijlstra * eligible task, but we failed to find a run-queue 1623*391e43daSPeter Zijlstra * to push it to. Do not retry in this case, since 1624*391e43daSPeter Zijlstra * other cpus will pull from us when ready. 1625*391e43daSPeter Zijlstra */ 1626*391e43daSPeter Zijlstra goto out; 1627*391e43daSPeter Zijlstra } 1628*391e43daSPeter Zijlstra 1629*391e43daSPeter Zijlstra if (!task) 1630*391e43daSPeter Zijlstra /* No more tasks, just exit */ 1631*391e43daSPeter Zijlstra goto out; 1632*391e43daSPeter Zijlstra 1633*391e43daSPeter Zijlstra /* 1634*391e43daSPeter Zijlstra * Something has shifted, try again. 1635*391e43daSPeter Zijlstra */ 1636*391e43daSPeter Zijlstra put_task_struct(next_task); 1637*391e43daSPeter Zijlstra next_task = task; 1638*391e43daSPeter Zijlstra goto retry; 1639*391e43daSPeter Zijlstra } 1640*391e43daSPeter Zijlstra 1641*391e43daSPeter Zijlstra deactivate_task(rq, next_task, 0); 1642*391e43daSPeter Zijlstra set_task_cpu(next_task, lowest_rq->cpu); 1643*391e43daSPeter Zijlstra activate_task(lowest_rq, next_task, 0); 1644*391e43daSPeter Zijlstra ret = 1; 1645*391e43daSPeter Zijlstra 1646*391e43daSPeter Zijlstra resched_task(lowest_rq->curr); 1647*391e43daSPeter Zijlstra 1648*391e43daSPeter Zijlstra double_unlock_balance(rq, lowest_rq); 1649*391e43daSPeter Zijlstra 1650*391e43daSPeter Zijlstra out: 1651*391e43daSPeter Zijlstra put_task_struct(next_task); 1652*391e43daSPeter Zijlstra 1653*391e43daSPeter Zijlstra return ret; 1654*391e43daSPeter Zijlstra } 1655*391e43daSPeter Zijlstra 1656*391e43daSPeter Zijlstra static void push_rt_tasks(struct rq *rq) 1657*391e43daSPeter Zijlstra { 1658*391e43daSPeter Zijlstra /* push_rt_task will return true if it moved an RT */ 1659*391e43daSPeter Zijlstra while (push_rt_task(rq)) 1660*391e43daSPeter Zijlstra ; 1661*391e43daSPeter Zijlstra } 1662*391e43daSPeter Zijlstra 1663*391e43daSPeter Zijlstra static int pull_rt_task(struct rq *this_rq) 1664*391e43daSPeter Zijlstra { 1665*391e43daSPeter Zijlstra int this_cpu = this_rq->cpu, ret = 0, cpu; 1666*391e43daSPeter Zijlstra struct task_struct *p; 1667*391e43daSPeter Zijlstra struct rq *src_rq; 1668*391e43daSPeter Zijlstra 1669*391e43daSPeter Zijlstra if (likely(!rt_overloaded(this_rq))) 1670*391e43daSPeter Zijlstra return 0; 1671*391e43daSPeter Zijlstra 1672*391e43daSPeter Zijlstra for_each_cpu(cpu, this_rq->rd->rto_mask) { 1673*391e43daSPeter Zijlstra if (this_cpu == cpu) 1674*391e43daSPeter Zijlstra continue; 1675*391e43daSPeter Zijlstra 1676*391e43daSPeter Zijlstra src_rq = cpu_rq(cpu); 1677*391e43daSPeter Zijlstra 1678*391e43daSPeter Zijlstra /* 1679*391e43daSPeter Zijlstra * Don't bother taking the src_rq->lock if the next highest 1680*391e43daSPeter Zijlstra * task is known to be lower-priority than our current task. 1681*391e43daSPeter Zijlstra * This may look racy, but if this value is about to go 1682*391e43daSPeter Zijlstra * logically higher, the src_rq will push this task away. 1683*391e43daSPeter Zijlstra * And if its going logically lower, we do not care 1684*391e43daSPeter Zijlstra */ 1685*391e43daSPeter Zijlstra if (src_rq->rt.highest_prio.next >= 1686*391e43daSPeter Zijlstra this_rq->rt.highest_prio.curr) 1687*391e43daSPeter Zijlstra continue; 1688*391e43daSPeter Zijlstra 1689*391e43daSPeter Zijlstra /* 1690*391e43daSPeter Zijlstra * We can potentially drop this_rq's lock in 1691*391e43daSPeter Zijlstra * double_lock_balance, and another CPU could 1692*391e43daSPeter Zijlstra * alter this_rq 1693*391e43daSPeter Zijlstra */ 1694*391e43daSPeter Zijlstra double_lock_balance(this_rq, src_rq); 1695*391e43daSPeter Zijlstra 1696*391e43daSPeter Zijlstra /* 1697*391e43daSPeter Zijlstra * Are there still pullable RT tasks? 1698*391e43daSPeter Zijlstra */ 1699*391e43daSPeter Zijlstra if (src_rq->rt.rt_nr_running <= 1) 1700*391e43daSPeter Zijlstra goto skip; 1701*391e43daSPeter Zijlstra 1702*391e43daSPeter Zijlstra p = pick_next_highest_task_rt(src_rq, this_cpu); 1703*391e43daSPeter Zijlstra 1704*391e43daSPeter Zijlstra /* 1705*391e43daSPeter Zijlstra * Do we have an RT task that preempts 1706*391e43daSPeter Zijlstra * the to-be-scheduled task? 1707*391e43daSPeter Zijlstra */ 1708*391e43daSPeter Zijlstra if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 1709*391e43daSPeter Zijlstra WARN_ON(p == src_rq->curr); 1710*391e43daSPeter Zijlstra WARN_ON(!p->on_rq); 1711*391e43daSPeter Zijlstra 1712*391e43daSPeter Zijlstra /* 1713*391e43daSPeter Zijlstra * There's a chance that p is higher in priority 1714*391e43daSPeter Zijlstra * than what's currently running on its cpu. 1715*391e43daSPeter Zijlstra * This is just that p is wakeing up and hasn't 1716*391e43daSPeter Zijlstra * had a chance to schedule. We only pull 1717*391e43daSPeter Zijlstra * p if it is lower in priority than the 1718*391e43daSPeter Zijlstra * current task on the run queue 1719*391e43daSPeter Zijlstra */ 1720*391e43daSPeter Zijlstra if (p->prio < src_rq->curr->prio) 1721*391e43daSPeter Zijlstra goto skip; 1722*391e43daSPeter Zijlstra 1723*391e43daSPeter Zijlstra ret = 1; 1724*391e43daSPeter Zijlstra 1725*391e43daSPeter Zijlstra deactivate_task(src_rq, p, 0); 1726*391e43daSPeter Zijlstra set_task_cpu(p, this_cpu); 1727*391e43daSPeter Zijlstra activate_task(this_rq, p, 0); 1728*391e43daSPeter Zijlstra /* 1729*391e43daSPeter Zijlstra * We continue with the search, just in 1730*391e43daSPeter Zijlstra * case there's an even higher prio task 1731*391e43daSPeter Zijlstra * in another runqueue. (low likelihood 1732*391e43daSPeter Zijlstra * but possible) 1733*391e43daSPeter Zijlstra */ 1734*391e43daSPeter Zijlstra } 1735*391e43daSPeter Zijlstra skip: 1736*391e43daSPeter Zijlstra double_unlock_balance(this_rq, src_rq); 1737*391e43daSPeter Zijlstra } 1738*391e43daSPeter Zijlstra 1739*391e43daSPeter Zijlstra return ret; 1740*391e43daSPeter Zijlstra } 1741*391e43daSPeter Zijlstra 1742*391e43daSPeter Zijlstra static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1743*391e43daSPeter Zijlstra { 1744*391e43daSPeter Zijlstra /* Try to pull RT tasks here if we lower this rq's prio */ 1745*391e43daSPeter Zijlstra if (rq->rt.highest_prio.curr > prev->prio) 1746*391e43daSPeter Zijlstra pull_rt_task(rq); 1747*391e43daSPeter Zijlstra } 1748*391e43daSPeter Zijlstra 1749*391e43daSPeter Zijlstra static void post_schedule_rt(struct rq *rq) 1750*391e43daSPeter Zijlstra { 1751*391e43daSPeter Zijlstra push_rt_tasks(rq); 1752*391e43daSPeter Zijlstra } 1753*391e43daSPeter Zijlstra 1754*391e43daSPeter Zijlstra /* 1755*391e43daSPeter Zijlstra * If we are not running and we are not going to reschedule soon, we should 1756*391e43daSPeter Zijlstra * try to push tasks away now 1757*391e43daSPeter Zijlstra */ 1758*391e43daSPeter Zijlstra static void task_woken_rt(struct rq *rq, struct task_struct *p) 1759*391e43daSPeter Zijlstra { 1760*391e43daSPeter Zijlstra if (!task_running(rq, p) && 1761*391e43daSPeter Zijlstra !test_tsk_need_resched(rq->curr) && 1762*391e43daSPeter Zijlstra has_pushable_tasks(rq) && 1763*391e43daSPeter Zijlstra p->rt.nr_cpus_allowed > 1 && 1764*391e43daSPeter Zijlstra rt_task(rq->curr) && 1765*391e43daSPeter Zijlstra (rq->curr->rt.nr_cpus_allowed < 2 || 1766*391e43daSPeter Zijlstra rq->curr->prio <= p->prio)) 1767*391e43daSPeter Zijlstra push_rt_tasks(rq); 1768*391e43daSPeter Zijlstra } 1769*391e43daSPeter Zijlstra 1770*391e43daSPeter Zijlstra static void set_cpus_allowed_rt(struct task_struct *p, 1771*391e43daSPeter Zijlstra const struct cpumask *new_mask) 1772*391e43daSPeter Zijlstra { 1773*391e43daSPeter Zijlstra int weight = cpumask_weight(new_mask); 1774*391e43daSPeter Zijlstra 1775*391e43daSPeter Zijlstra BUG_ON(!rt_task(p)); 1776*391e43daSPeter Zijlstra 1777*391e43daSPeter Zijlstra /* 1778*391e43daSPeter Zijlstra * Update the migration status of the RQ if we have an RT task 1779*391e43daSPeter Zijlstra * which is running AND changing its weight value. 1780*391e43daSPeter Zijlstra */ 1781*391e43daSPeter Zijlstra if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) { 1782*391e43daSPeter Zijlstra struct rq *rq = task_rq(p); 1783*391e43daSPeter Zijlstra 1784*391e43daSPeter Zijlstra if (!task_current(rq, p)) { 1785*391e43daSPeter Zijlstra /* 1786*391e43daSPeter Zijlstra * Make sure we dequeue this task from the pushable list 1787*391e43daSPeter Zijlstra * before going further. It will either remain off of 1788*391e43daSPeter Zijlstra * the list because we are no longer pushable, or it 1789*391e43daSPeter Zijlstra * will be requeued. 1790*391e43daSPeter Zijlstra */ 1791*391e43daSPeter Zijlstra if (p->rt.nr_cpus_allowed > 1) 1792*391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1793*391e43daSPeter Zijlstra 1794*391e43daSPeter Zijlstra /* 1795*391e43daSPeter Zijlstra * Requeue if our weight is changing and still > 1 1796*391e43daSPeter Zijlstra */ 1797*391e43daSPeter Zijlstra if (weight > 1) 1798*391e43daSPeter Zijlstra enqueue_pushable_task(rq, p); 1799*391e43daSPeter Zijlstra 1800*391e43daSPeter Zijlstra } 1801*391e43daSPeter Zijlstra 1802*391e43daSPeter Zijlstra if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1803*391e43daSPeter Zijlstra rq->rt.rt_nr_migratory++; 1804*391e43daSPeter Zijlstra } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1805*391e43daSPeter Zijlstra BUG_ON(!rq->rt.rt_nr_migratory); 1806*391e43daSPeter Zijlstra rq->rt.rt_nr_migratory--; 1807*391e43daSPeter Zijlstra } 1808*391e43daSPeter Zijlstra 1809*391e43daSPeter Zijlstra update_rt_migration(&rq->rt); 1810*391e43daSPeter Zijlstra } 1811*391e43daSPeter Zijlstra } 1812*391e43daSPeter Zijlstra 1813*391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 1814*391e43daSPeter Zijlstra static void rq_online_rt(struct rq *rq) 1815*391e43daSPeter Zijlstra { 1816*391e43daSPeter Zijlstra if (rq->rt.overloaded) 1817*391e43daSPeter Zijlstra rt_set_overload(rq); 1818*391e43daSPeter Zijlstra 1819*391e43daSPeter Zijlstra __enable_runtime(rq); 1820*391e43daSPeter Zijlstra 1821*391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 1822*391e43daSPeter Zijlstra } 1823*391e43daSPeter Zijlstra 1824*391e43daSPeter Zijlstra /* Assumes rq->lock is held */ 1825*391e43daSPeter Zijlstra static void rq_offline_rt(struct rq *rq) 1826*391e43daSPeter Zijlstra { 1827*391e43daSPeter Zijlstra if (rq->rt.overloaded) 1828*391e43daSPeter Zijlstra rt_clear_overload(rq); 1829*391e43daSPeter Zijlstra 1830*391e43daSPeter Zijlstra __disable_runtime(rq); 1831*391e43daSPeter Zijlstra 1832*391e43daSPeter Zijlstra cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 1833*391e43daSPeter Zijlstra } 1834*391e43daSPeter Zijlstra 1835*391e43daSPeter Zijlstra /* 1836*391e43daSPeter Zijlstra * When switch from the rt queue, we bring ourselves to a position 1837*391e43daSPeter Zijlstra * that we might want to pull RT tasks from other runqueues. 1838*391e43daSPeter Zijlstra */ 1839*391e43daSPeter Zijlstra static void switched_from_rt(struct rq *rq, struct task_struct *p) 1840*391e43daSPeter Zijlstra { 1841*391e43daSPeter Zijlstra /* 1842*391e43daSPeter Zijlstra * If there are other RT tasks then we will reschedule 1843*391e43daSPeter Zijlstra * and the scheduling of the other RT tasks will handle 1844*391e43daSPeter Zijlstra * the balancing. But if we are the last RT task 1845*391e43daSPeter Zijlstra * we may need to handle the pulling of RT tasks 1846*391e43daSPeter Zijlstra * now. 1847*391e43daSPeter Zijlstra */ 1848*391e43daSPeter Zijlstra if (p->on_rq && !rq->rt.rt_nr_running) 1849*391e43daSPeter Zijlstra pull_rt_task(rq); 1850*391e43daSPeter Zijlstra } 1851*391e43daSPeter Zijlstra 1852*391e43daSPeter Zijlstra void init_sched_rt_class(void) 1853*391e43daSPeter Zijlstra { 1854*391e43daSPeter Zijlstra unsigned int i; 1855*391e43daSPeter Zijlstra 1856*391e43daSPeter Zijlstra for_each_possible_cpu(i) { 1857*391e43daSPeter Zijlstra zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 1858*391e43daSPeter Zijlstra GFP_KERNEL, cpu_to_node(i)); 1859*391e43daSPeter Zijlstra } 1860*391e43daSPeter Zijlstra } 1861*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1862*391e43daSPeter Zijlstra 1863*391e43daSPeter Zijlstra /* 1864*391e43daSPeter Zijlstra * When switching a task to RT, we may overload the runqueue 1865*391e43daSPeter Zijlstra * with RT tasks. In this case we try to push them off to 1866*391e43daSPeter Zijlstra * other runqueues. 1867*391e43daSPeter Zijlstra */ 1868*391e43daSPeter Zijlstra static void switched_to_rt(struct rq *rq, struct task_struct *p) 1869*391e43daSPeter Zijlstra { 1870*391e43daSPeter Zijlstra int check_resched = 1; 1871*391e43daSPeter Zijlstra 1872*391e43daSPeter Zijlstra /* 1873*391e43daSPeter Zijlstra * If we are already running, then there's nothing 1874*391e43daSPeter Zijlstra * that needs to be done. But if we are not running 1875*391e43daSPeter Zijlstra * we may need to preempt the current running task. 1876*391e43daSPeter Zijlstra * If that current running task is also an RT task 1877*391e43daSPeter Zijlstra * then see if we can move to another run queue. 1878*391e43daSPeter Zijlstra */ 1879*391e43daSPeter Zijlstra if (p->on_rq && rq->curr != p) { 1880*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1881*391e43daSPeter Zijlstra if (rq->rt.overloaded && push_rt_task(rq) && 1882*391e43daSPeter Zijlstra /* Don't resched if we changed runqueues */ 1883*391e43daSPeter Zijlstra rq != task_rq(p)) 1884*391e43daSPeter Zijlstra check_resched = 0; 1885*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1886*391e43daSPeter Zijlstra if (check_resched && p->prio < rq->curr->prio) 1887*391e43daSPeter Zijlstra resched_task(rq->curr); 1888*391e43daSPeter Zijlstra } 1889*391e43daSPeter Zijlstra } 1890*391e43daSPeter Zijlstra 1891*391e43daSPeter Zijlstra /* 1892*391e43daSPeter Zijlstra * Priority of the task has changed. This may cause 1893*391e43daSPeter Zijlstra * us to initiate a push or pull. 1894*391e43daSPeter Zijlstra */ 1895*391e43daSPeter Zijlstra static void 1896*391e43daSPeter Zijlstra prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 1897*391e43daSPeter Zijlstra { 1898*391e43daSPeter Zijlstra if (!p->on_rq) 1899*391e43daSPeter Zijlstra return; 1900*391e43daSPeter Zijlstra 1901*391e43daSPeter Zijlstra if (rq->curr == p) { 1902*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 1903*391e43daSPeter Zijlstra /* 1904*391e43daSPeter Zijlstra * If our priority decreases while running, we 1905*391e43daSPeter Zijlstra * may need to pull tasks to this runqueue. 1906*391e43daSPeter Zijlstra */ 1907*391e43daSPeter Zijlstra if (oldprio < p->prio) 1908*391e43daSPeter Zijlstra pull_rt_task(rq); 1909*391e43daSPeter Zijlstra /* 1910*391e43daSPeter Zijlstra * If there's a higher priority task waiting to run 1911*391e43daSPeter Zijlstra * then reschedule. Note, the above pull_rt_task 1912*391e43daSPeter Zijlstra * can release the rq lock and p could migrate. 1913*391e43daSPeter Zijlstra * Only reschedule if p is still on the same runqueue. 1914*391e43daSPeter Zijlstra */ 1915*391e43daSPeter Zijlstra if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) 1916*391e43daSPeter Zijlstra resched_task(p); 1917*391e43daSPeter Zijlstra #else 1918*391e43daSPeter Zijlstra /* For UP simply resched on drop of prio */ 1919*391e43daSPeter Zijlstra if (oldprio < p->prio) 1920*391e43daSPeter Zijlstra resched_task(p); 1921*391e43daSPeter Zijlstra #endif /* CONFIG_SMP */ 1922*391e43daSPeter Zijlstra } else { 1923*391e43daSPeter Zijlstra /* 1924*391e43daSPeter Zijlstra * This task is not running, but if it is 1925*391e43daSPeter Zijlstra * greater than the current running task 1926*391e43daSPeter Zijlstra * then reschedule. 1927*391e43daSPeter Zijlstra */ 1928*391e43daSPeter Zijlstra if (p->prio < rq->curr->prio) 1929*391e43daSPeter Zijlstra resched_task(rq->curr); 1930*391e43daSPeter Zijlstra } 1931*391e43daSPeter Zijlstra } 1932*391e43daSPeter Zijlstra 1933*391e43daSPeter Zijlstra static void watchdog(struct rq *rq, struct task_struct *p) 1934*391e43daSPeter Zijlstra { 1935*391e43daSPeter Zijlstra unsigned long soft, hard; 1936*391e43daSPeter Zijlstra 1937*391e43daSPeter Zijlstra /* max may change after cur was read, this will be fixed next tick */ 1938*391e43daSPeter Zijlstra soft = task_rlimit(p, RLIMIT_RTTIME); 1939*391e43daSPeter Zijlstra hard = task_rlimit_max(p, RLIMIT_RTTIME); 1940*391e43daSPeter Zijlstra 1941*391e43daSPeter Zijlstra if (soft != RLIM_INFINITY) { 1942*391e43daSPeter Zijlstra unsigned long next; 1943*391e43daSPeter Zijlstra 1944*391e43daSPeter Zijlstra p->rt.timeout++; 1945*391e43daSPeter Zijlstra next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 1946*391e43daSPeter Zijlstra if (p->rt.timeout > next) 1947*391e43daSPeter Zijlstra p->cputime_expires.sched_exp = p->se.sum_exec_runtime; 1948*391e43daSPeter Zijlstra } 1949*391e43daSPeter Zijlstra } 1950*391e43daSPeter Zijlstra 1951*391e43daSPeter Zijlstra static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 1952*391e43daSPeter Zijlstra { 1953*391e43daSPeter Zijlstra update_curr_rt(rq); 1954*391e43daSPeter Zijlstra 1955*391e43daSPeter Zijlstra watchdog(rq, p); 1956*391e43daSPeter Zijlstra 1957*391e43daSPeter Zijlstra /* 1958*391e43daSPeter Zijlstra * RR tasks need a special form of timeslice management. 1959*391e43daSPeter Zijlstra * FIFO tasks have no timeslices. 1960*391e43daSPeter Zijlstra */ 1961*391e43daSPeter Zijlstra if (p->policy != SCHED_RR) 1962*391e43daSPeter Zijlstra return; 1963*391e43daSPeter Zijlstra 1964*391e43daSPeter Zijlstra if (--p->rt.time_slice) 1965*391e43daSPeter Zijlstra return; 1966*391e43daSPeter Zijlstra 1967*391e43daSPeter Zijlstra p->rt.time_slice = DEF_TIMESLICE; 1968*391e43daSPeter Zijlstra 1969*391e43daSPeter Zijlstra /* 1970*391e43daSPeter Zijlstra * Requeue to the end of queue if we are not the only element 1971*391e43daSPeter Zijlstra * on the queue: 1972*391e43daSPeter Zijlstra */ 1973*391e43daSPeter Zijlstra if (p->rt.run_list.prev != p->rt.run_list.next) { 1974*391e43daSPeter Zijlstra requeue_task_rt(rq, p, 0); 1975*391e43daSPeter Zijlstra set_tsk_need_resched(p); 1976*391e43daSPeter Zijlstra } 1977*391e43daSPeter Zijlstra } 1978*391e43daSPeter Zijlstra 1979*391e43daSPeter Zijlstra static void set_curr_task_rt(struct rq *rq) 1980*391e43daSPeter Zijlstra { 1981*391e43daSPeter Zijlstra struct task_struct *p = rq->curr; 1982*391e43daSPeter Zijlstra 1983*391e43daSPeter Zijlstra p->se.exec_start = rq->clock_task; 1984*391e43daSPeter Zijlstra 1985*391e43daSPeter Zijlstra /* The running task is never eligible for pushing */ 1986*391e43daSPeter Zijlstra dequeue_pushable_task(rq, p); 1987*391e43daSPeter Zijlstra } 1988*391e43daSPeter Zijlstra 1989*391e43daSPeter Zijlstra static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 1990*391e43daSPeter Zijlstra { 1991*391e43daSPeter Zijlstra /* 1992*391e43daSPeter Zijlstra * Time slice is 0 for SCHED_FIFO tasks 1993*391e43daSPeter Zijlstra */ 1994*391e43daSPeter Zijlstra if (task->policy == SCHED_RR) 1995*391e43daSPeter Zijlstra return DEF_TIMESLICE; 1996*391e43daSPeter Zijlstra else 1997*391e43daSPeter Zijlstra return 0; 1998*391e43daSPeter Zijlstra } 1999*391e43daSPeter Zijlstra 2000*391e43daSPeter Zijlstra const struct sched_class rt_sched_class = { 2001*391e43daSPeter Zijlstra .next = &fair_sched_class, 2002*391e43daSPeter Zijlstra .enqueue_task = enqueue_task_rt, 2003*391e43daSPeter Zijlstra .dequeue_task = dequeue_task_rt, 2004*391e43daSPeter Zijlstra .yield_task = yield_task_rt, 2005*391e43daSPeter Zijlstra 2006*391e43daSPeter Zijlstra .check_preempt_curr = check_preempt_curr_rt, 2007*391e43daSPeter Zijlstra 2008*391e43daSPeter Zijlstra .pick_next_task = pick_next_task_rt, 2009*391e43daSPeter Zijlstra .put_prev_task = put_prev_task_rt, 2010*391e43daSPeter Zijlstra 2011*391e43daSPeter Zijlstra #ifdef CONFIG_SMP 2012*391e43daSPeter Zijlstra .select_task_rq = select_task_rq_rt, 2013*391e43daSPeter Zijlstra 2014*391e43daSPeter Zijlstra .set_cpus_allowed = set_cpus_allowed_rt, 2015*391e43daSPeter Zijlstra .rq_online = rq_online_rt, 2016*391e43daSPeter Zijlstra .rq_offline = rq_offline_rt, 2017*391e43daSPeter Zijlstra .pre_schedule = pre_schedule_rt, 2018*391e43daSPeter Zijlstra .post_schedule = post_schedule_rt, 2019*391e43daSPeter Zijlstra .task_woken = task_woken_rt, 2020*391e43daSPeter Zijlstra .switched_from = switched_from_rt, 2021*391e43daSPeter Zijlstra #endif 2022*391e43daSPeter Zijlstra 2023*391e43daSPeter Zijlstra .set_curr_task = set_curr_task_rt, 2024*391e43daSPeter Zijlstra .task_tick = task_tick_rt, 2025*391e43daSPeter Zijlstra 2026*391e43daSPeter Zijlstra .get_rr_interval = get_rr_interval_rt, 2027*391e43daSPeter Zijlstra 2028*391e43daSPeter Zijlstra .prio_changed = prio_changed_rt, 2029*391e43daSPeter Zijlstra .switched_to = switched_to_rt, 2030*391e43daSPeter Zijlstra }; 2031*391e43daSPeter Zijlstra 2032*391e43daSPeter Zijlstra #ifdef CONFIG_SCHED_DEBUG 2033*391e43daSPeter Zijlstra extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); 2034*391e43daSPeter Zijlstra 2035*391e43daSPeter Zijlstra void print_rt_stats(struct seq_file *m, int cpu) 2036*391e43daSPeter Zijlstra { 2037*391e43daSPeter Zijlstra rt_rq_iter_t iter; 2038*391e43daSPeter Zijlstra struct rt_rq *rt_rq; 2039*391e43daSPeter Zijlstra 2040*391e43daSPeter Zijlstra rcu_read_lock(); 2041*391e43daSPeter Zijlstra for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2042*391e43daSPeter Zijlstra print_rt_rq(m, cpu, rt_rq); 2043*391e43daSPeter Zijlstra rcu_read_unlock(); 2044*391e43daSPeter Zijlstra } 2045*391e43daSPeter Zijlstra #endif /* CONFIG_SCHED_DEBUG */ 2046