1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 4 * policies) 5 */ 6 7 int sched_rr_timeslice = RR_TIMESLICE; 8 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; 9 /* More than 4 hours if BW_SHIFT equals 20. */ 10 static const u64 max_rt_runtime = MAX_BW; 11 12 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 13 14 struct rt_bandwidth def_rt_bandwidth; 15 16 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 17 { 18 struct rt_bandwidth *rt_b = 19 container_of(timer, struct rt_bandwidth, rt_period_timer); 20 int idle = 0; 21 int overrun; 22 23 raw_spin_lock(&rt_b->rt_runtime_lock); 24 for (;;) { 25 overrun = hrtimer_forward_now(timer, rt_b->rt_period); 26 if (!overrun) 27 break; 28 29 raw_spin_unlock(&rt_b->rt_runtime_lock); 30 idle = do_sched_rt_period_timer(rt_b, overrun); 31 raw_spin_lock(&rt_b->rt_runtime_lock); 32 } 33 if (idle) 34 rt_b->rt_period_active = 0; 35 raw_spin_unlock(&rt_b->rt_runtime_lock); 36 37 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 38 } 39 40 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 41 { 42 rt_b->rt_period = ns_to_ktime(period); 43 rt_b->rt_runtime = runtime; 44 45 raw_spin_lock_init(&rt_b->rt_runtime_lock); 46 47 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, 48 HRTIMER_MODE_REL_HARD); 49 rt_b->rt_period_timer.function = sched_rt_period_timer; 50 } 51 52 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) 53 { 54 raw_spin_lock(&rt_b->rt_runtime_lock); 55 if (!rt_b->rt_period_active) { 56 rt_b->rt_period_active = 1; 57 /* 58 * SCHED_DEADLINE updates the bandwidth, as a run away 59 * RT task with a DL task could hog a CPU. But DL does 60 * not reset the period. If a deadline task was running 61 * without an RT task running, it can cause RT tasks to 62 * throttle when they start up. Kick the timer right away 63 * to update the period. 64 */ 65 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); 66 hrtimer_start_expires(&rt_b->rt_period_timer, 67 HRTIMER_MODE_ABS_PINNED_HARD); 68 } 69 raw_spin_unlock(&rt_b->rt_runtime_lock); 70 } 71 72 static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 73 { 74 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 75 return; 76 77 do_start_rt_bandwidth(rt_b); 78 } 79 80 void init_rt_rq(struct rt_rq *rt_rq) 81 { 82 struct rt_prio_array *array; 83 int i; 84 85 array = &rt_rq->active; 86 for (i = 0; i < MAX_RT_PRIO; i++) { 87 INIT_LIST_HEAD(array->queue + i); 88 __clear_bit(i, array->bitmap); 89 } 90 /* delimiter for bitsearch: */ 91 __set_bit(MAX_RT_PRIO, array->bitmap); 92 93 #if defined CONFIG_SMP 94 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 95 rt_rq->highest_prio.next = MAX_RT_PRIO-1; 96 rt_rq->rt_nr_migratory = 0; 97 rt_rq->overloaded = 0; 98 plist_head_init(&rt_rq->pushable_tasks); 99 #endif /* CONFIG_SMP */ 100 /* We start is dequeued state, because no RT tasks are queued */ 101 rt_rq->rt_queued = 0; 102 103 rt_rq->rt_time = 0; 104 rt_rq->rt_throttled = 0; 105 rt_rq->rt_runtime = 0; 106 raw_spin_lock_init(&rt_rq->rt_runtime_lock); 107 } 108 109 #ifdef CONFIG_RT_GROUP_SCHED 110 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 111 { 112 hrtimer_cancel(&rt_b->rt_period_timer); 113 } 114 115 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 116 117 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 118 { 119 #ifdef CONFIG_SCHED_DEBUG 120 WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 121 #endif 122 return container_of(rt_se, struct task_struct, rt); 123 } 124 125 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 126 { 127 return rt_rq->rq; 128 } 129 130 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 131 { 132 return rt_se->rt_rq; 133 } 134 135 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 136 { 137 struct rt_rq *rt_rq = rt_se->rt_rq; 138 139 return rt_rq->rq; 140 } 141 142 void unregister_rt_sched_group(struct task_group *tg) 143 { 144 if (tg->rt_se) 145 destroy_rt_bandwidth(&tg->rt_bandwidth); 146 147 } 148 149 void free_rt_sched_group(struct task_group *tg) 150 { 151 int i; 152 153 for_each_possible_cpu(i) { 154 if (tg->rt_rq) 155 kfree(tg->rt_rq[i]); 156 if (tg->rt_se) 157 kfree(tg->rt_se[i]); 158 } 159 160 kfree(tg->rt_rq); 161 kfree(tg->rt_se); 162 } 163 164 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 165 struct sched_rt_entity *rt_se, int cpu, 166 struct sched_rt_entity *parent) 167 { 168 struct rq *rq = cpu_rq(cpu); 169 170 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 171 rt_rq->rt_nr_boosted = 0; 172 rt_rq->rq = rq; 173 rt_rq->tg = tg; 174 175 tg->rt_rq[cpu] = rt_rq; 176 tg->rt_se[cpu] = rt_se; 177 178 if (!rt_se) 179 return; 180 181 if (!parent) 182 rt_se->rt_rq = &rq->rt; 183 else 184 rt_se->rt_rq = parent->my_q; 185 186 rt_se->my_q = rt_rq; 187 rt_se->parent = parent; 188 INIT_LIST_HEAD(&rt_se->run_list); 189 } 190 191 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 192 { 193 struct rt_rq *rt_rq; 194 struct sched_rt_entity *rt_se; 195 int i; 196 197 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); 198 if (!tg->rt_rq) 199 goto err; 200 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); 201 if (!tg->rt_se) 202 goto err; 203 204 init_rt_bandwidth(&tg->rt_bandwidth, 205 ktime_to_ns(def_rt_bandwidth.rt_period), 0); 206 207 for_each_possible_cpu(i) { 208 rt_rq = kzalloc_node(sizeof(struct rt_rq), 209 GFP_KERNEL, cpu_to_node(i)); 210 if (!rt_rq) 211 goto err; 212 213 rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 214 GFP_KERNEL, cpu_to_node(i)); 215 if (!rt_se) 216 goto err_free_rq; 217 218 init_rt_rq(rt_rq); 219 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 220 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 221 } 222 223 return 1; 224 225 err_free_rq: 226 kfree(rt_rq); 227 err: 228 return 0; 229 } 230 231 #else /* CONFIG_RT_GROUP_SCHED */ 232 233 #define rt_entity_is_task(rt_se) (1) 234 235 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 236 { 237 return container_of(rt_se, struct task_struct, rt); 238 } 239 240 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 241 { 242 return container_of(rt_rq, struct rq, rt); 243 } 244 245 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 246 { 247 struct task_struct *p = rt_task_of(rt_se); 248 249 return task_rq(p); 250 } 251 252 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 253 { 254 struct rq *rq = rq_of_rt_se(rt_se); 255 256 return &rq->rt; 257 } 258 259 void unregister_rt_sched_group(struct task_group *tg) { } 260 261 void free_rt_sched_group(struct task_group *tg) { } 262 263 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 264 { 265 return 1; 266 } 267 #endif /* CONFIG_RT_GROUP_SCHED */ 268 269 #ifdef CONFIG_SMP 270 271 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 272 { 273 /* Try to pull RT tasks here if we lower this rq's prio */ 274 return rq->online && rq->rt.highest_prio.curr > prev->prio; 275 } 276 277 static inline int rt_overloaded(struct rq *rq) 278 { 279 return atomic_read(&rq->rd->rto_count); 280 } 281 282 static inline void rt_set_overload(struct rq *rq) 283 { 284 if (!rq->online) 285 return; 286 287 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 288 /* 289 * Make sure the mask is visible before we set 290 * the overload count. That is checked to determine 291 * if we should look at the mask. It would be a shame 292 * if we looked at the mask, but the mask was not 293 * updated yet. 294 * 295 * Matched by the barrier in pull_rt_task(). 296 */ 297 smp_wmb(); 298 atomic_inc(&rq->rd->rto_count); 299 } 300 301 static inline void rt_clear_overload(struct rq *rq) 302 { 303 if (!rq->online) 304 return; 305 306 /* the order here really doesn't matter */ 307 atomic_dec(&rq->rd->rto_count); 308 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 309 } 310 311 static void update_rt_migration(struct rt_rq *rt_rq) 312 { 313 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 314 if (!rt_rq->overloaded) { 315 rt_set_overload(rq_of_rt_rq(rt_rq)); 316 rt_rq->overloaded = 1; 317 } 318 } else if (rt_rq->overloaded) { 319 rt_clear_overload(rq_of_rt_rq(rt_rq)); 320 rt_rq->overloaded = 0; 321 } 322 } 323 324 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 325 { 326 struct task_struct *p; 327 328 if (!rt_entity_is_task(rt_se)) 329 return; 330 331 p = rt_task_of(rt_se); 332 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 333 334 rt_rq->rt_nr_total++; 335 if (p->nr_cpus_allowed > 1) 336 rt_rq->rt_nr_migratory++; 337 338 update_rt_migration(rt_rq); 339 } 340 341 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 342 { 343 struct task_struct *p; 344 345 if (!rt_entity_is_task(rt_se)) 346 return; 347 348 p = rt_task_of(rt_se); 349 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 350 351 rt_rq->rt_nr_total--; 352 if (p->nr_cpus_allowed > 1) 353 rt_rq->rt_nr_migratory--; 354 355 update_rt_migration(rt_rq); 356 } 357 358 static inline int has_pushable_tasks(struct rq *rq) 359 { 360 return !plist_head_empty(&rq->rt.pushable_tasks); 361 } 362 363 static DEFINE_PER_CPU(struct callback_head, rt_push_head); 364 static DEFINE_PER_CPU(struct callback_head, rt_pull_head); 365 366 static void push_rt_tasks(struct rq *); 367 static void pull_rt_task(struct rq *); 368 369 static inline void rt_queue_push_tasks(struct rq *rq) 370 { 371 if (!has_pushable_tasks(rq)) 372 return; 373 374 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 375 } 376 377 static inline void rt_queue_pull_task(struct rq *rq) 378 { 379 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 380 } 381 382 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 383 { 384 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 385 plist_node_init(&p->pushable_tasks, p->prio); 386 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 387 388 /* Update the highest prio pushable task */ 389 if (p->prio < rq->rt.highest_prio.next) 390 rq->rt.highest_prio.next = p->prio; 391 } 392 393 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 394 { 395 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 396 397 /* Update the new highest prio pushable task */ 398 if (has_pushable_tasks(rq)) { 399 p = plist_first_entry(&rq->rt.pushable_tasks, 400 struct task_struct, pushable_tasks); 401 rq->rt.highest_prio.next = p->prio; 402 } else { 403 rq->rt.highest_prio.next = MAX_RT_PRIO-1; 404 } 405 } 406 407 #else 408 409 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 410 { 411 } 412 413 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 414 { 415 } 416 417 static inline 418 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 419 { 420 } 421 422 static inline 423 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 424 { 425 } 426 427 static inline void rt_queue_push_tasks(struct rq *rq) 428 { 429 } 430 #endif /* CONFIG_SMP */ 431 432 static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 433 static void dequeue_top_rt_rq(struct rt_rq *rt_rq); 434 435 static inline int on_rt_rq(struct sched_rt_entity *rt_se) 436 { 437 return rt_se->on_rq; 438 } 439 440 #ifdef CONFIG_UCLAMP_TASK 441 /* 442 * Verify the fitness of task @p to run on @cpu taking into account the uclamp 443 * settings. 444 * 445 * This check is only important for heterogeneous systems where uclamp_min value 446 * is higher than the capacity of a @cpu. For non-heterogeneous system this 447 * function will always return true. 448 * 449 * The function will return true if the capacity of the @cpu is >= the 450 * uclamp_min and false otherwise. 451 * 452 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min 453 * > uclamp_max. 454 */ 455 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 456 { 457 unsigned int min_cap; 458 unsigned int max_cap; 459 unsigned int cpu_cap; 460 461 /* Only heterogeneous systems can benefit from this check */ 462 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 463 return true; 464 465 min_cap = uclamp_eff_value(p, UCLAMP_MIN); 466 max_cap = uclamp_eff_value(p, UCLAMP_MAX); 467 468 cpu_cap = capacity_orig_of(cpu); 469 470 return cpu_cap >= min(min_cap, max_cap); 471 } 472 #else 473 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 474 { 475 return true; 476 } 477 #endif 478 479 #ifdef CONFIG_RT_GROUP_SCHED 480 481 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 482 { 483 if (!rt_rq->tg) 484 return RUNTIME_INF; 485 486 return rt_rq->rt_runtime; 487 } 488 489 static inline u64 sched_rt_period(struct rt_rq *rt_rq) 490 { 491 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 492 } 493 494 typedef struct task_group *rt_rq_iter_t; 495 496 static inline struct task_group *next_task_group(struct task_group *tg) 497 { 498 do { 499 tg = list_entry_rcu(tg->list.next, 500 typeof(struct task_group), list); 501 } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 502 503 if (&tg->list == &task_groups) 504 tg = NULL; 505 506 return tg; 507 } 508 509 #define for_each_rt_rq(rt_rq, iter, rq) \ 510 for (iter = container_of(&task_groups, typeof(*iter), list); \ 511 (iter = next_task_group(iter)) && \ 512 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 513 514 #define for_each_sched_rt_entity(rt_se) \ 515 for (; rt_se; rt_se = rt_se->parent) 516 517 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 518 { 519 return rt_se->my_q; 520 } 521 522 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 523 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 524 525 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 526 { 527 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 528 struct rq *rq = rq_of_rt_rq(rt_rq); 529 struct sched_rt_entity *rt_se; 530 531 int cpu = cpu_of(rq); 532 533 rt_se = rt_rq->tg->rt_se[cpu]; 534 535 if (rt_rq->rt_nr_running) { 536 if (!rt_se) 537 enqueue_top_rt_rq(rt_rq); 538 else if (!on_rt_rq(rt_se)) 539 enqueue_rt_entity(rt_se, 0); 540 541 if (rt_rq->highest_prio.curr < curr->prio) 542 resched_curr(rq); 543 } 544 } 545 546 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 547 { 548 struct sched_rt_entity *rt_se; 549 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 550 551 rt_se = rt_rq->tg->rt_se[cpu]; 552 553 if (!rt_se) { 554 dequeue_top_rt_rq(rt_rq); 555 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 556 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); 557 } 558 else if (on_rt_rq(rt_se)) 559 dequeue_rt_entity(rt_se, 0); 560 } 561 562 static inline int rt_rq_throttled(struct rt_rq *rt_rq) 563 { 564 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 565 } 566 567 static int rt_se_boosted(struct sched_rt_entity *rt_se) 568 { 569 struct rt_rq *rt_rq = group_rt_rq(rt_se); 570 struct task_struct *p; 571 572 if (rt_rq) 573 return !!rt_rq->rt_nr_boosted; 574 575 p = rt_task_of(rt_se); 576 return p->prio != p->normal_prio; 577 } 578 579 #ifdef CONFIG_SMP 580 static inline const struct cpumask *sched_rt_period_mask(void) 581 { 582 return this_rq()->rd->span; 583 } 584 #else 585 static inline const struct cpumask *sched_rt_period_mask(void) 586 { 587 return cpu_online_mask; 588 } 589 #endif 590 591 static inline 592 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 593 { 594 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 595 } 596 597 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 598 { 599 return &rt_rq->tg->rt_bandwidth; 600 } 601 602 #else /* !CONFIG_RT_GROUP_SCHED */ 603 604 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 605 { 606 return rt_rq->rt_runtime; 607 } 608 609 static inline u64 sched_rt_period(struct rt_rq *rt_rq) 610 { 611 return ktime_to_ns(def_rt_bandwidth.rt_period); 612 } 613 614 typedef struct rt_rq *rt_rq_iter_t; 615 616 #define for_each_rt_rq(rt_rq, iter, rq) \ 617 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 618 619 #define for_each_sched_rt_entity(rt_se) \ 620 for (; rt_se; rt_se = NULL) 621 622 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 623 { 624 return NULL; 625 } 626 627 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 628 { 629 struct rq *rq = rq_of_rt_rq(rt_rq); 630 631 if (!rt_rq->rt_nr_running) 632 return; 633 634 enqueue_top_rt_rq(rt_rq); 635 resched_curr(rq); 636 } 637 638 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 639 { 640 dequeue_top_rt_rq(rt_rq); 641 } 642 643 static inline int rt_rq_throttled(struct rt_rq *rt_rq) 644 { 645 return rt_rq->rt_throttled; 646 } 647 648 static inline const struct cpumask *sched_rt_period_mask(void) 649 { 650 return cpu_online_mask; 651 } 652 653 static inline 654 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 655 { 656 return &cpu_rq(cpu)->rt; 657 } 658 659 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 660 { 661 return &def_rt_bandwidth; 662 } 663 664 #endif /* CONFIG_RT_GROUP_SCHED */ 665 666 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 667 { 668 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 669 670 return (hrtimer_active(&rt_b->rt_period_timer) || 671 rt_rq->rt_time < rt_b->rt_runtime); 672 } 673 674 #ifdef CONFIG_SMP 675 /* 676 * We ran out of runtime, see if we can borrow some from our neighbours. 677 */ 678 static void do_balance_runtime(struct rt_rq *rt_rq) 679 { 680 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 681 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 682 int i, weight; 683 u64 rt_period; 684 685 weight = cpumask_weight(rd->span); 686 687 raw_spin_lock(&rt_b->rt_runtime_lock); 688 rt_period = ktime_to_ns(rt_b->rt_period); 689 for_each_cpu(i, rd->span) { 690 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 691 s64 diff; 692 693 if (iter == rt_rq) 694 continue; 695 696 raw_spin_lock(&iter->rt_runtime_lock); 697 /* 698 * Either all rqs have inf runtime and there's nothing to steal 699 * or __disable_runtime() below sets a specific rq to inf to 700 * indicate its been disabled and disallow stealing. 701 */ 702 if (iter->rt_runtime == RUNTIME_INF) 703 goto next; 704 705 /* 706 * From runqueues with spare time, take 1/n part of their 707 * spare time, but no more than our period. 708 */ 709 diff = iter->rt_runtime - iter->rt_time; 710 if (diff > 0) { 711 diff = div_u64((u64)diff, weight); 712 if (rt_rq->rt_runtime + diff > rt_period) 713 diff = rt_period - rt_rq->rt_runtime; 714 iter->rt_runtime -= diff; 715 rt_rq->rt_runtime += diff; 716 if (rt_rq->rt_runtime == rt_period) { 717 raw_spin_unlock(&iter->rt_runtime_lock); 718 break; 719 } 720 } 721 next: 722 raw_spin_unlock(&iter->rt_runtime_lock); 723 } 724 raw_spin_unlock(&rt_b->rt_runtime_lock); 725 } 726 727 /* 728 * Ensure this RQ takes back all the runtime it lend to its neighbours. 729 */ 730 static void __disable_runtime(struct rq *rq) 731 { 732 struct root_domain *rd = rq->rd; 733 rt_rq_iter_t iter; 734 struct rt_rq *rt_rq; 735 736 if (unlikely(!scheduler_running)) 737 return; 738 739 for_each_rt_rq(rt_rq, iter, rq) { 740 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 741 s64 want; 742 int i; 743 744 raw_spin_lock(&rt_b->rt_runtime_lock); 745 raw_spin_lock(&rt_rq->rt_runtime_lock); 746 /* 747 * Either we're all inf and nobody needs to borrow, or we're 748 * already disabled and thus have nothing to do, or we have 749 * exactly the right amount of runtime to take out. 750 */ 751 if (rt_rq->rt_runtime == RUNTIME_INF || 752 rt_rq->rt_runtime == rt_b->rt_runtime) 753 goto balanced; 754 raw_spin_unlock(&rt_rq->rt_runtime_lock); 755 756 /* 757 * Calculate the difference between what we started out with 758 * and what we current have, that's the amount of runtime 759 * we lend and now have to reclaim. 760 */ 761 want = rt_b->rt_runtime - rt_rq->rt_runtime; 762 763 /* 764 * Greedy reclaim, take back as much as we can. 765 */ 766 for_each_cpu(i, rd->span) { 767 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 768 s64 diff; 769 770 /* 771 * Can't reclaim from ourselves or disabled runqueues. 772 */ 773 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 774 continue; 775 776 raw_spin_lock(&iter->rt_runtime_lock); 777 if (want > 0) { 778 diff = min_t(s64, iter->rt_runtime, want); 779 iter->rt_runtime -= diff; 780 want -= diff; 781 } else { 782 iter->rt_runtime -= want; 783 want -= want; 784 } 785 raw_spin_unlock(&iter->rt_runtime_lock); 786 787 if (!want) 788 break; 789 } 790 791 raw_spin_lock(&rt_rq->rt_runtime_lock); 792 /* 793 * We cannot be left wanting - that would mean some runtime 794 * leaked out of the system. 795 */ 796 BUG_ON(want); 797 balanced: 798 /* 799 * Disable all the borrow logic by pretending we have inf 800 * runtime - in which case borrowing doesn't make sense. 801 */ 802 rt_rq->rt_runtime = RUNTIME_INF; 803 rt_rq->rt_throttled = 0; 804 raw_spin_unlock(&rt_rq->rt_runtime_lock); 805 raw_spin_unlock(&rt_b->rt_runtime_lock); 806 807 /* Make rt_rq available for pick_next_task() */ 808 sched_rt_rq_enqueue(rt_rq); 809 } 810 } 811 812 static void __enable_runtime(struct rq *rq) 813 { 814 rt_rq_iter_t iter; 815 struct rt_rq *rt_rq; 816 817 if (unlikely(!scheduler_running)) 818 return; 819 820 /* 821 * Reset each runqueue's bandwidth settings 822 */ 823 for_each_rt_rq(rt_rq, iter, rq) { 824 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 825 826 raw_spin_lock(&rt_b->rt_runtime_lock); 827 raw_spin_lock(&rt_rq->rt_runtime_lock); 828 rt_rq->rt_runtime = rt_b->rt_runtime; 829 rt_rq->rt_time = 0; 830 rt_rq->rt_throttled = 0; 831 raw_spin_unlock(&rt_rq->rt_runtime_lock); 832 raw_spin_unlock(&rt_b->rt_runtime_lock); 833 } 834 } 835 836 static void balance_runtime(struct rt_rq *rt_rq) 837 { 838 if (!sched_feat(RT_RUNTIME_SHARE)) 839 return; 840 841 if (rt_rq->rt_time > rt_rq->rt_runtime) { 842 raw_spin_unlock(&rt_rq->rt_runtime_lock); 843 do_balance_runtime(rt_rq); 844 raw_spin_lock(&rt_rq->rt_runtime_lock); 845 } 846 } 847 #else /* !CONFIG_SMP */ 848 static inline void balance_runtime(struct rt_rq *rt_rq) {} 849 #endif /* CONFIG_SMP */ 850 851 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 852 { 853 int i, idle = 1, throttled = 0; 854 const struct cpumask *span; 855 856 span = sched_rt_period_mask(); 857 #ifdef CONFIG_RT_GROUP_SCHED 858 /* 859 * FIXME: isolated CPUs should really leave the root task group, 860 * whether they are isolcpus or were isolated via cpusets, lest 861 * the timer run on a CPU which does not service all runqueues, 862 * potentially leaving other CPUs indefinitely throttled. If 863 * isolation is really required, the user will turn the throttle 864 * off to kill the perturbations it causes anyway. Meanwhile, 865 * this maintains functionality for boot and/or troubleshooting. 866 */ 867 if (rt_b == &root_task_group.rt_bandwidth) 868 span = cpu_online_mask; 869 #endif 870 for_each_cpu(i, span) { 871 int enqueue = 0; 872 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 873 struct rq *rq = rq_of_rt_rq(rt_rq); 874 struct rq_flags rf; 875 int skip; 876 877 /* 878 * When span == cpu_online_mask, taking each rq->lock 879 * can be time-consuming. Try to avoid it when possible. 880 */ 881 raw_spin_lock(&rt_rq->rt_runtime_lock); 882 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) 883 rt_rq->rt_runtime = rt_b->rt_runtime; 884 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 885 raw_spin_unlock(&rt_rq->rt_runtime_lock); 886 if (skip) 887 continue; 888 889 rq_lock(rq, &rf); 890 update_rq_clock(rq); 891 892 if (rt_rq->rt_time) { 893 u64 runtime; 894 895 raw_spin_lock(&rt_rq->rt_runtime_lock); 896 if (rt_rq->rt_throttled) 897 balance_runtime(rt_rq); 898 runtime = rt_rq->rt_runtime; 899 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 900 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 901 rt_rq->rt_throttled = 0; 902 enqueue = 1; 903 904 /* 905 * When we're idle and a woken (rt) task is 906 * throttled check_preempt_curr() will set 907 * skip_update and the time between the wakeup 908 * and this unthrottle will get accounted as 909 * 'runtime'. 910 */ 911 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 912 rq_clock_cancel_skipupdate(rq); 913 } 914 if (rt_rq->rt_time || rt_rq->rt_nr_running) 915 idle = 0; 916 raw_spin_unlock(&rt_rq->rt_runtime_lock); 917 } else if (rt_rq->rt_nr_running) { 918 idle = 0; 919 if (!rt_rq_throttled(rt_rq)) 920 enqueue = 1; 921 } 922 if (rt_rq->rt_throttled) 923 throttled = 1; 924 925 if (enqueue) 926 sched_rt_rq_enqueue(rt_rq); 927 rq_unlock(rq, &rf); 928 } 929 930 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 931 return 1; 932 933 return idle; 934 } 935 936 static inline int rt_se_prio(struct sched_rt_entity *rt_se) 937 { 938 #ifdef CONFIG_RT_GROUP_SCHED 939 struct rt_rq *rt_rq = group_rt_rq(rt_se); 940 941 if (rt_rq) 942 return rt_rq->highest_prio.curr; 943 #endif 944 945 return rt_task_of(rt_se)->prio; 946 } 947 948 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 949 { 950 u64 runtime = sched_rt_runtime(rt_rq); 951 952 if (rt_rq->rt_throttled) 953 return rt_rq_throttled(rt_rq); 954 955 if (runtime >= sched_rt_period(rt_rq)) 956 return 0; 957 958 balance_runtime(rt_rq); 959 runtime = sched_rt_runtime(rt_rq); 960 if (runtime == RUNTIME_INF) 961 return 0; 962 963 if (rt_rq->rt_time > runtime) { 964 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 965 966 /* 967 * Don't actually throttle groups that have no runtime assigned 968 * but accrue some time due to boosting. 969 */ 970 if (likely(rt_b->rt_runtime)) { 971 rt_rq->rt_throttled = 1; 972 printk_deferred_once("sched: RT throttling activated\n"); 973 } else { 974 /* 975 * In case we did anyway, make it go away, 976 * replenishment is a joke, since it will replenish us 977 * with exactly 0 ns. 978 */ 979 rt_rq->rt_time = 0; 980 } 981 982 if (rt_rq_throttled(rt_rq)) { 983 sched_rt_rq_dequeue(rt_rq); 984 return 1; 985 } 986 } 987 988 return 0; 989 } 990 991 /* 992 * Update the current task's runtime statistics. Skip current tasks that 993 * are not in our scheduling class. 994 */ 995 static void update_curr_rt(struct rq *rq) 996 { 997 struct task_struct *curr = rq->curr; 998 struct sched_rt_entity *rt_se = &curr->rt; 999 u64 delta_exec; 1000 u64 now; 1001 1002 if (curr->sched_class != &rt_sched_class) 1003 return; 1004 1005 now = rq_clock_task(rq); 1006 delta_exec = now - curr->se.exec_start; 1007 if (unlikely((s64)delta_exec <= 0)) 1008 return; 1009 1010 schedstat_set(curr->stats.exec_max, 1011 max(curr->stats.exec_max, delta_exec)); 1012 1013 trace_sched_stat_runtime(curr, delta_exec, 0); 1014 1015 curr->se.sum_exec_runtime += delta_exec; 1016 account_group_exec_runtime(curr, delta_exec); 1017 1018 curr->se.exec_start = now; 1019 cgroup_account_cputime(curr, delta_exec); 1020 1021 if (!rt_bandwidth_enabled()) 1022 return; 1023 1024 for_each_sched_rt_entity(rt_se) { 1025 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1026 int exceeded; 1027 1028 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 1029 raw_spin_lock(&rt_rq->rt_runtime_lock); 1030 rt_rq->rt_time += delta_exec; 1031 exceeded = sched_rt_runtime_exceeded(rt_rq); 1032 if (exceeded) 1033 resched_curr(rq); 1034 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1035 if (exceeded) 1036 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); 1037 } 1038 } 1039 } 1040 1041 static void 1042 dequeue_top_rt_rq(struct rt_rq *rt_rq) 1043 { 1044 struct rq *rq = rq_of_rt_rq(rt_rq); 1045 1046 BUG_ON(&rq->rt != rt_rq); 1047 1048 if (!rt_rq->rt_queued) 1049 return; 1050 1051 BUG_ON(!rq->nr_running); 1052 1053 sub_nr_running(rq, rt_rq->rt_nr_running); 1054 rt_rq->rt_queued = 0; 1055 1056 } 1057 1058 static void 1059 enqueue_top_rt_rq(struct rt_rq *rt_rq) 1060 { 1061 struct rq *rq = rq_of_rt_rq(rt_rq); 1062 1063 BUG_ON(&rq->rt != rt_rq); 1064 1065 if (rt_rq->rt_queued) 1066 return; 1067 1068 if (rt_rq_throttled(rt_rq)) 1069 return; 1070 1071 if (rt_rq->rt_nr_running) { 1072 add_nr_running(rq, rt_rq->rt_nr_running); 1073 rt_rq->rt_queued = 1; 1074 } 1075 1076 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 1077 cpufreq_update_util(rq, 0); 1078 } 1079 1080 #if defined CONFIG_SMP 1081 1082 static void 1083 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1084 { 1085 struct rq *rq = rq_of_rt_rq(rt_rq); 1086 1087 #ifdef CONFIG_RT_GROUP_SCHED 1088 /* 1089 * Change rq's cpupri only if rt_rq is the top queue. 1090 */ 1091 if (&rq->rt != rt_rq) 1092 return; 1093 #endif 1094 if (rq->online && prio < prev_prio) 1095 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1096 } 1097 1098 static void 1099 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1100 { 1101 struct rq *rq = rq_of_rt_rq(rt_rq); 1102 1103 #ifdef CONFIG_RT_GROUP_SCHED 1104 /* 1105 * Change rq's cpupri only if rt_rq is the top queue. 1106 */ 1107 if (&rq->rt != rt_rq) 1108 return; 1109 #endif 1110 if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1111 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1112 } 1113 1114 #else /* CONFIG_SMP */ 1115 1116 static inline 1117 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1118 static inline 1119 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1120 1121 #endif /* CONFIG_SMP */ 1122 1123 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 1124 static void 1125 inc_rt_prio(struct rt_rq *rt_rq, int prio) 1126 { 1127 int prev_prio = rt_rq->highest_prio.curr; 1128 1129 if (prio < prev_prio) 1130 rt_rq->highest_prio.curr = prio; 1131 1132 inc_rt_prio_smp(rt_rq, prio, prev_prio); 1133 } 1134 1135 static void 1136 dec_rt_prio(struct rt_rq *rt_rq, int prio) 1137 { 1138 int prev_prio = rt_rq->highest_prio.curr; 1139 1140 if (rt_rq->rt_nr_running) { 1141 1142 WARN_ON(prio < prev_prio); 1143 1144 /* 1145 * This may have been our highest task, and therefore 1146 * we may have some recomputation to do 1147 */ 1148 if (prio == prev_prio) { 1149 struct rt_prio_array *array = &rt_rq->active; 1150 1151 rt_rq->highest_prio.curr = 1152 sched_find_first_bit(array->bitmap); 1153 } 1154 1155 } else { 1156 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 1157 } 1158 1159 dec_rt_prio_smp(rt_rq, prio, prev_prio); 1160 } 1161 1162 #else 1163 1164 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 1165 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 1166 1167 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 1168 1169 #ifdef CONFIG_RT_GROUP_SCHED 1170 1171 static void 1172 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1173 { 1174 if (rt_se_boosted(rt_se)) 1175 rt_rq->rt_nr_boosted++; 1176 1177 if (rt_rq->tg) 1178 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1179 } 1180 1181 static void 1182 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1183 { 1184 if (rt_se_boosted(rt_se)) 1185 rt_rq->rt_nr_boosted--; 1186 1187 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1188 } 1189 1190 #else /* CONFIG_RT_GROUP_SCHED */ 1191 1192 static void 1193 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1194 { 1195 start_rt_bandwidth(&def_rt_bandwidth); 1196 } 1197 1198 static inline 1199 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1200 1201 #endif /* CONFIG_RT_GROUP_SCHED */ 1202 1203 static inline 1204 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 1205 { 1206 struct rt_rq *group_rq = group_rt_rq(rt_se); 1207 1208 if (group_rq) 1209 return group_rq->rt_nr_running; 1210 else 1211 return 1; 1212 } 1213 1214 static inline 1215 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) 1216 { 1217 struct rt_rq *group_rq = group_rt_rq(rt_se); 1218 struct task_struct *tsk; 1219 1220 if (group_rq) 1221 return group_rq->rr_nr_running; 1222 1223 tsk = rt_task_of(rt_se); 1224 1225 return (tsk->policy == SCHED_RR) ? 1 : 0; 1226 } 1227 1228 static inline 1229 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1230 { 1231 int prio = rt_se_prio(rt_se); 1232 1233 WARN_ON(!rt_prio(prio)); 1234 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 1235 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); 1236 1237 inc_rt_prio(rt_rq, prio); 1238 inc_rt_migration(rt_se, rt_rq); 1239 inc_rt_group(rt_se, rt_rq); 1240 } 1241 1242 static inline 1243 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1244 { 1245 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1246 WARN_ON(!rt_rq->rt_nr_running); 1247 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 1248 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); 1249 1250 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1251 dec_rt_migration(rt_se, rt_rq); 1252 dec_rt_group(rt_se, rt_rq); 1253 } 1254 1255 /* 1256 * Change rt_se->run_list location unless SAVE && !MOVE 1257 * 1258 * assumes ENQUEUE/DEQUEUE flags match 1259 */ 1260 static inline bool move_entity(unsigned int flags) 1261 { 1262 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 1263 return false; 1264 1265 return true; 1266 } 1267 1268 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) 1269 { 1270 list_del_init(&rt_se->run_list); 1271 1272 if (list_empty(array->queue + rt_se_prio(rt_se))) 1273 __clear_bit(rt_se_prio(rt_se), array->bitmap); 1274 1275 rt_se->on_list = 0; 1276 } 1277 1278 static inline struct sched_statistics * 1279 __schedstats_from_rt_se(struct sched_rt_entity *rt_se) 1280 { 1281 #ifdef CONFIG_RT_GROUP_SCHED 1282 /* schedstats is not supported for rt group. */ 1283 if (!rt_entity_is_task(rt_se)) 1284 return NULL; 1285 #endif 1286 1287 return &rt_task_of(rt_se)->stats; 1288 } 1289 1290 static inline void 1291 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1292 { 1293 struct sched_statistics *stats; 1294 struct task_struct *p = NULL; 1295 1296 if (!schedstat_enabled()) 1297 return; 1298 1299 if (rt_entity_is_task(rt_se)) 1300 p = rt_task_of(rt_se); 1301 1302 stats = __schedstats_from_rt_se(rt_se); 1303 if (!stats) 1304 return; 1305 1306 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); 1307 } 1308 1309 static inline void 1310 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1311 { 1312 struct sched_statistics *stats; 1313 struct task_struct *p = NULL; 1314 1315 if (!schedstat_enabled()) 1316 return; 1317 1318 if (rt_entity_is_task(rt_se)) 1319 p = rt_task_of(rt_se); 1320 1321 stats = __schedstats_from_rt_se(rt_se); 1322 if (!stats) 1323 return; 1324 1325 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); 1326 } 1327 1328 static inline void 1329 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 1330 int flags) 1331 { 1332 if (!schedstat_enabled()) 1333 return; 1334 1335 if (flags & ENQUEUE_WAKEUP) 1336 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); 1337 } 1338 1339 static inline void 1340 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1341 { 1342 struct sched_statistics *stats; 1343 struct task_struct *p = NULL; 1344 1345 if (!schedstat_enabled()) 1346 return; 1347 1348 if (rt_entity_is_task(rt_se)) 1349 p = rt_task_of(rt_se); 1350 1351 stats = __schedstats_from_rt_se(rt_se); 1352 if (!stats) 1353 return; 1354 1355 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); 1356 } 1357 1358 static inline void 1359 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 1360 int flags) 1361 { 1362 struct task_struct *p = NULL; 1363 1364 if (!schedstat_enabled()) 1365 return; 1366 1367 if (rt_entity_is_task(rt_se)) 1368 p = rt_task_of(rt_se); 1369 1370 if ((flags & DEQUEUE_SLEEP) && p) { 1371 unsigned int state; 1372 1373 state = READ_ONCE(p->__state); 1374 if (state & TASK_INTERRUPTIBLE) 1375 __schedstat_set(p->stats.sleep_start, 1376 rq_clock(rq_of_rt_rq(rt_rq))); 1377 1378 if (state & TASK_UNINTERRUPTIBLE) 1379 __schedstat_set(p->stats.block_start, 1380 rq_clock(rq_of_rt_rq(rt_rq))); 1381 } 1382 } 1383 1384 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1385 { 1386 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1387 struct rt_prio_array *array = &rt_rq->active; 1388 struct rt_rq *group_rq = group_rt_rq(rt_se); 1389 struct list_head *queue = array->queue + rt_se_prio(rt_se); 1390 1391 /* 1392 * Don't enqueue the group if its throttled, or when empty. 1393 * The latter is a consequence of the former when a child group 1394 * get throttled and the current group doesn't have any other 1395 * active members. 1396 */ 1397 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { 1398 if (rt_se->on_list) 1399 __delist_rt_entity(rt_se, array); 1400 return; 1401 } 1402 1403 if (move_entity(flags)) { 1404 WARN_ON_ONCE(rt_se->on_list); 1405 if (flags & ENQUEUE_HEAD) 1406 list_add(&rt_se->run_list, queue); 1407 else 1408 list_add_tail(&rt_se->run_list, queue); 1409 1410 __set_bit(rt_se_prio(rt_se), array->bitmap); 1411 rt_se->on_list = 1; 1412 } 1413 rt_se->on_rq = 1; 1414 1415 inc_rt_tasks(rt_se, rt_rq); 1416 } 1417 1418 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1419 { 1420 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1421 struct rt_prio_array *array = &rt_rq->active; 1422 1423 if (move_entity(flags)) { 1424 WARN_ON_ONCE(!rt_se->on_list); 1425 __delist_rt_entity(rt_se, array); 1426 } 1427 rt_se->on_rq = 0; 1428 1429 dec_rt_tasks(rt_se, rt_rq); 1430 } 1431 1432 /* 1433 * Because the prio of an upper entry depends on the lower 1434 * entries, we must remove entries top - down. 1435 */ 1436 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) 1437 { 1438 struct sched_rt_entity *back = NULL; 1439 1440 for_each_sched_rt_entity(rt_se) { 1441 rt_se->back = back; 1442 back = rt_se; 1443 } 1444 1445 dequeue_top_rt_rq(rt_rq_of_se(back)); 1446 1447 for (rt_se = back; rt_se; rt_se = rt_se->back) { 1448 if (on_rt_rq(rt_se)) 1449 __dequeue_rt_entity(rt_se, flags); 1450 } 1451 } 1452 1453 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1454 { 1455 struct rq *rq = rq_of_rt_se(rt_se); 1456 1457 update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags); 1458 1459 dequeue_rt_stack(rt_se, flags); 1460 for_each_sched_rt_entity(rt_se) 1461 __enqueue_rt_entity(rt_se, flags); 1462 enqueue_top_rt_rq(&rq->rt); 1463 } 1464 1465 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1466 { 1467 struct rq *rq = rq_of_rt_se(rt_se); 1468 1469 update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags); 1470 1471 dequeue_rt_stack(rt_se, flags); 1472 1473 for_each_sched_rt_entity(rt_se) { 1474 struct rt_rq *rt_rq = group_rt_rq(rt_se); 1475 1476 if (rt_rq && rt_rq->rt_nr_running) 1477 __enqueue_rt_entity(rt_se, flags); 1478 } 1479 enqueue_top_rt_rq(&rq->rt); 1480 } 1481 1482 /* 1483 * Adding/removing a task to/from a priority array: 1484 */ 1485 static void 1486 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1487 { 1488 struct sched_rt_entity *rt_se = &p->rt; 1489 1490 if (flags & ENQUEUE_WAKEUP) 1491 rt_se->timeout = 0; 1492 1493 check_schedstat_required(); 1494 update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se); 1495 1496 enqueue_rt_entity(rt_se, flags); 1497 1498 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1499 enqueue_pushable_task(rq, p); 1500 } 1501 1502 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1503 { 1504 struct sched_rt_entity *rt_se = &p->rt; 1505 1506 update_curr_rt(rq); 1507 dequeue_rt_entity(rt_se, flags); 1508 1509 dequeue_pushable_task(rq, p); 1510 } 1511 1512 /* 1513 * Put task to the head or the end of the run list without the overhead of 1514 * dequeue followed by enqueue. 1515 */ 1516 static void 1517 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1518 { 1519 if (on_rt_rq(rt_se)) { 1520 struct rt_prio_array *array = &rt_rq->active; 1521 struct list_head *queue = array->queue + rt_se_prio(rt_se); 1522 1523 if (head) 1524 list_move(&rt_se->run_list, queue); 1525 else 1526 list_move_tail(&rt_se->run_list, queue); 1527 } 1528 } 1529 1530 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1531 { 1532 struct sched_rt_entity *rt_se = &p->rt; 1533 struct rt_rq *rt_rq; 1534 1535 for_each_sched_rt_entity(rt_se) { 1536 rt_rq = rt_rq_of_se(rt_se); 1537 requeue_rt_entity(rt_rq, rt_se, head); 1538 } 1539 } 1540 1541 static void yield_task_rt(struct rq *rq) 1542 { 1543 requeue_task_rt(rq, rq->curr, 0); 1544 } 1545 1546 #ifdef CONFIG_SMP 1547 static int find_lowest_rq(struct task_struct *task); 1548 1549 static int 1550 select_task_rq_rt(struct task_struct *p, int cpu, int flags) 1551 { 1552 struct task_struct *curr; 1553 struct rq *rq; 1554 bool test; 1555 1556 /* For anything but wake ups, just return the task_cpu */ 1557 if (!(flags & (WF_TTWU | WF_FORK))) 1558 goto out; 1559 1560 rq = cpu_rq(cpu); 1561 1562 rcu_read_lock(); 1563 curr = READ_ONCE(rq->curr); /* unlocked access */ 1564 1565 /* 1566 * If the current task on @p's runqueue is an RT task, then 1567 * try to see if we can wake this RT task up on another 1568 * runqueue. Otherwise simply start this RT task 1569 * on its current runqueue. 1570 * 1571 * We want to avoid overloading runqueues. If the woken 1572 * task is a higher priority, then it will stay on this CPU 1573 * and the lower prio task should be moved to another CPU. 1574 * Even though this will probably make the lower prio task 1575 * lose its cache, we do not want to bounce a higher task 1576 * around just because it gave up its CPU, perhaps for a 1577 * lock? 1578 * 1579 * For equal prio tasks, we just let the scheduler sort it out. 1580 * 1581 * Otherwise, just let it ride on the affined RQ and the 1582 * post-schedule router will push the preempted task away 1583 * 1584 * This test is optimistic, if we get it wrong the load-balancer 1585 * will have to sort it out. 1586 * 1587 * We take into account the capacity of the CPU to ensure it fits the 1588 * requirement of the task - which is only important on heterogeneous 1589 * systems like big.LITTLE. 1590 */ 1591 test = curr && 1592 unlikely(rt_task(curr)) && 1593 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); 1594 1595 if (test || !rt_task_fits_capacity(p, cpu)) { 1596 int target = find_lowest_rq(p); 1597 1598 /* 1599 * Bail out if we were forcing a migration to find a better 1600 * fitting CPU but our search failed. 1601 */ 1602 if (!test && target != -1 && !rt_task_fits_capacity(p, target)) 1603 goto out_unlock; 1604 1605 /* 1606 * Don't bother moving it if the destination CPU is 1607 * not running a lower priority task. 1608 */ 1609 if (target != -1 && 1610 p->prio < cpu_rq(target)->rt.highest_prio.curr) 1611 cpu = target; 1612 } 1613 1614 out_unlock: 1615 rcu_read_unlock(); 1616 1617 out: 1618 return cpu; 1619 } 1620 1621 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1622 { 1623 /* 1624 * Current can't be migrated, useless to reschedule, 1625 * let's hope p can move out. 1626 */ 1627 if (rq->curr->nr_cpus_allowed == 1 || 1628 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1629 return; 1630 1631 /* 1632 * p is migratable, so let's not schedule it and 1633 * see if it is pushed or pulled somewhere else. 1634 */ 1635 if (p->nr_cpus_allowed != 1 && 1636 cpupri_find(&rq->rd->cpupri, p, NULL)) 1637 return; 1638 1639 /* 1640 * There appear to be other CPUs that can accept 1641 * the current task but none can run 'p', so lets reschedule 1642 * to try and push the current task away: 1643 */ 1644 requeue_task_rt(rq, p, 1); 1645 resched_curr(rq); 1646 } 1647 1648 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1649 { 1650 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { 1651 /* 1652 * This is OK, because current is on_cpu, which avoids it being 1653 * picked for load-balance and preemption/IRQs are still 1654 * disabled avoiding further scheduler activity on it and we've 1655 * not yet started the picking loop. 1656 */ 1657 rq_unpin_lock(rq, rf); 1658 pull_rt_task(rq); 1659 rq_repin_lock(rq, rf); 1660 } 1661 1662 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq); 1663 } 1664 #endif /* CONFIG_SMP */ 1665 1666 /* 1667 * Preempt the current task with a newly woken task if needed: 1668 */ 1669 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1670 { 1671 if (p->prio < rq->curr->prio) { 1672 resched_curr(rq); 1673 return; 1674 } 1675 1676 #ifdef CONFIG_SMP 1677 /* 1678 * If: 1679 * 1680 * - the newly woken task is of equal priority to the current task 1681 * - the newly woken task is non-migratable while current is migratable 1682 * - current will be preempted on the next reschedule 1683 * 1684 * we should check to see if current can readily move to a different 1685 * cpu. If so, we will reschedule to allow the push logic to try 1686 * to move current somewhere else, making room for our non-migratable 1687 * task. 1688 */ 1689 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1690 check_preempt_equal_prio(rq, p); 1691 #endif 1692 } 1693 1694 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) 1695 { 1696 struct sched_rt_entity *rt_se = &p->rt; 1697 struct rt_rq *rt_rq = &rq->rt; 1698 1699 p->se.exec_start = rq_clock_task(rq); 1700 if (on_rt_rq(&p->rt)) 1701 update_stats_wait_end_rt(rt_rq, rt_se); 1702 1703 /* The running task is never eligible for pushing */ 1704 dequeue_pushable_task(rq, p); 1705 1706 if (!first) 1707 return; 1708 1709 /* 1710 * If prev task was rt, put_prev_task() has already updated the 1711 * utilization. We only care of the case where we start to schedule a 1712 * rt task 1713 */ 1714 if (rq->curr->sched_class != &rt_sched_class) 1715 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1716 1717 rt_queue_push_tasks(rq); 1718 } 1719 1720 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) 1721 { 1722 struct rt_prio_array *array = &rt_rq->active; 1723 struct sched_rt_entity *next = NULL; 1724 struct list_head *queue; 1725 int idx; 1726 1727 idx = sched_find_first_bit(array->bitmap); 1728 BUG_ON(idx >= MAX_RT_PRIO); 1729 1730 queue = array->queue + idx; 1731 next = list_entry(queue->next, struct sched_rt_entity, run_list); 1732 1733 return next; 1734 } 1735 1736 static struct task_struct *_pick_next_task_rt(struct rq *rq) 1737 { 1738 struct sched_rt_entity *rt_se; 1739 struct rt_rq *rt_rq = &rq->rt; 1740 1741 do { 1742 rt_se = pick_next_rt_entity(rt_rq); 1743 BUG_ON(!rt_se); 1744 rt_rq = group_rt_rq(rt_se); 1745 } while (rt_rq); 1746 1747 return rt_task_of(rt_se); 1748 } 1749 1750 static struct task_struct *pick_task_rt(struct rq *rq) 1751 { 1752 struct task_struct *p; 1753 1754 if (!sched_rt_runnable(rq)) 1755 return NULL; 1756 1757 p = _pick_next_task_rt(rq); 1758 1759 return p; 1760 } 1761 1762 static struct task_struct *pick_next_task_rt(struct rq *rq) 1763 { 1764 struct task_struct *p = pick_task_rt(rq); 1765 1766 if (p) 1767 set_next_task_rt(rq, p, true); 1768 1769 return p; 1770 } 1771 1772 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1773 { 1774 struct sched_rt_entity *rt_se = &p->rt; 1775 struct rt_rq *rt_rq = &rq->rt; 1776 1777 if (on_rt_rq(&p->rt)) 1778 update_stats_wait_start_rt(rt_rq, rt_se); 1779 1780 update_curr_rt(rq); 1781 1782 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1783 1784 /* 1785 * The previous task needs to be made eligible for pushing 1786 * if it is still active 1787 */ 1788 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1789 enqueue_pushable_task(rq, p); 1790 } 1791 1792 #ifdef CONFIG_SMP 1793 1794 /* Only try algorithms three times */ 1795 #define RT_MAX_TRIES 3 1796 1797 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1798 { 1799 if (!task_running(rq, p) && 1800 cpumask_test_cpu(cpu, &p->cpus_mask)) 1801 return 1; 1802 1803 return 0; 1804 } 1805 1806 /* 1807 * Return the highest pushable rq's task, which is suitable to be executed 1808 * on the CPU, NULL otherwise 1809 */ 1810 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1811 { 1812 struct plist_head *head = &rq->rt.pushable_tasks; 1813 struct task_struct *p; 1814 1815 if (!has_pushable_tasks(rq)) 1816 return NULL; 1817 1818 plist_for_each_entry(p, head, pushable_tasks) { 1819 if (pick_rt_task(rq, p, cpu)) 1820 return p; 1821 } 1822 1823 return NULL; 1824 } 1825 1826 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1827 1828 static int find_lowest_rq(struct task_struct *task) 1829 { 1830 struct sched_domain *sd; 1831 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1832 int this_cpu = smp_processor_id(); 1833 int cpu = task_cpu(task); 1834 int ret; 1835 1836 /* Make sure the mask is initialized first */ 1837 if (unlikely(!lowest_mask)) 1838 return -1; 1839 1840 if (task->nr_cpus_allowed == 1) 1841 return -1; /* No other targets possible */ 1842 1843 /* 1844 * If we're on asym system ensure we consider the different capacities 1845 * of the CPUs when searching for the lowest_mask. 1846 */ 1847 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 1848 1849 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, 1850 task, lowest_mask, 1851 rt_task_fits_capacity); 1852 } else { 1853 1854 ret = cpupri_find(&task_rq(task)->rd->cpupri, 1855 task, lowest_mask); 1856 } 1857 1858 if (!ret) 1859 return -1; /* No targets found */ 1860 1861 /* 1862 * At this point we have built a mask of CPUs representing the 1863 * lowest priority tasks in the system. Now we want to elect 1864 * the best one based on our affinity and topology. 1865 * 1866 * We prioritize the last CPU that the task executed on since 1867 * it is most likely cache-hot in that location. 1868 */ 1869 if (cpumask_test_cpu(cpu, lowest_mask)) 1870 return cpu; 1871 1872 /* 1873 * Otherwise, we consult the sched_domains span maps to figure 1874 * out which CPU is logically closest to our hot cache data. 1875 */ 1876 if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1877 this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1878 1879 rcu_read_lock(); 1880 for_each_domain(cpu, sd) { 1881 if (sd->flags & SD_WAKE_AFFINE) { 1882 int best_cpu; 1883 1884 /* 1885 * "this_cpu" is cheaper to preempt than a 1886 * remote processor. 1887 */ 1888 if (this_cpu != -1 && 1889 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1890 rcu_read_unlock(); 1891 return this_cpu; 1892 } 1893 1894 best_cpu = cpumask_any_and_distribute(lowest_mask, 1895 sched_domain_span(sd)); 1896 if (best_cpu < nr_cpu_ids) { 1897 rcu_read_unlock(); 1898 return best_cpu; 1899 } 1900 } 1901 } 1902 rcu_read_unlock(); 1903 1904 /* 1905 * And finally, if there were no matches within the domains 1906 * just give the caller *something* to work with from the compatible 1907 * locations. 1908 */ 1909 if (this_cpu != -1) 1910 return this_cpu; 1911 1912 cpu = cpumask_any_distribute(lowest_mask); 1913 if (cpu < nr_cpu_ids) 1914 return cpu; 1915 1916 return -1; 1917 } 1918 1919 /* Will lock the rq it finds */ 1920 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1921 { 1922 struct rq *lowest_rq = NULL; 1923 int tries; 1924 int cpu; 1925 1926 for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1927 cpu = find_lowest_rq(task); 1928 1929 if ((cpu == -1) || (cpu == rq->cpu)) 1930 break; 1931 1932 lowest_rq = cpu_rq(cpu); 1933 1934 if (lowest_rq->rt.highest_prio.curr <= task->prio) { 1935 /* 1936 * Target rq has tasks of equal or higher priority, 1937 * retrying does not release any lock and is unlikely 1938 * to yield a different result. 1939 */ 1940 lowest_rq = NULL; 1941 break; 1942 } 1943 1944 /* if the prio of this runqueue changed, try again */ 1945 if (double_lock_balance(rq, lowest_rq)) { 1946 /* 1947 * We had to unlock the run queue. In 1948 * the mean time, task could have 1949 * migrated already or had its affinity changed. 1950 * Also make sure that it wasn't scheduled on its rq. 1951 */ 1952 if (unlikely(task_rq(task) != rq || 1953 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || 1954 task_running(rq, task) || 1955 !rt_task(task) || 1956 !task_on_rq_queued(task))) { 1957 1958 double_unlock_balance(rq, lowest_rq); 1959 lowest_rq = NULL; 1960 break; 1961 } 1962 } 1963 1964 /* If this rq is still suitable use it. */ 1965 if (lowest_rq->rt.highest_prio.curr > task->prio) 1966 break; 1967 1968 /* try again */ 1969 double_unlock_balance(rq, lowest_rq); 1970 lowest_rq = NULL; 1971 } 1972 1973 return lowest_rq; 1974 } 1975 1976 static struct task_struct *pick_next_pushable_task(struct rq *rq) 1977 { 1978 struct task_struct *p; 1979 1980 if (!has_pushable_tasks(rq)) 1981 return NULL; 1982 1983 p = plist_first_entry(&rq->rt.pushable_tasks, 1984 struct task_struct, pushable_tasks); 1985 1986 BUG_ON(rq->cpu != task_cpu(p)); 1987 BUG_ON(task_current(rq, p)); 1988 BUG_ON(p->nr_cpus_allowed <= 1); 1989 1990 BUG_ON(!task_on_rq_queued(p)); 1991 BUG_ON(!rt_task(p)); 1992 1993 return p; 1994 } 1995 1996 /* 1997 * If the current CPU has more than one RT task, see if the non 1998 * running task can migrate over to a CPU that is running a task 1999 * of lesser priority. 2000 */ 2001 static int push_rt_task(struct rq *rq, bool pull) 2002 { 2003 struct task_struct *next_task; 2004 struct rq *lowest_rq; 2005 int ret = 0; 2006 2007 if (!rq->rt.overloaded) 2008 return 0; 2009 2010 next_task = pick_next_pushable_task(rq); 2011 if (!next_task) 2012 return 0; 2013 2014 retry: 2015 /* 2016 * It's possible that the next_task slipped in of 2017 * higher priority than current. If that's the case 2018 * just reschedule current. 2019 */ 2020 if (unlikely(next_task->prio < rq->curr->prio)) { 2021 resched_curr(rq); 2022 return 0; 2023 } 2024 2025 if (is_migration_disabled(next_task)) { 2026 struct task_struct *push_task = NULL; 2027 int cpu; 2028 2029 if (!pull || rq->push_busy) 2030 return 0; 2031 2032 /* 2033 * Invoking find_lowest_rq() on anything but an RT task doesn't 2034 * make sense. Per the above priority check, curr has to 2035 * be of higher priority than next_task, so no need to 2036 * reschedule when bailing out. 2037 * 2038 * Note that the stoppers are masqueraded as SCHED_FIFO 2039 * (cf. sched_set_stop_task()), so we can't rely on rt_task(). 2040 */ 2041 if (rq->curr->sched_class != &rt_sched_class) 2042 return 0; 2043 2044 cpu = find_lowest_rq(rq->curr); 2045 if (cpu == -1 || cpu == rq->cpu) 2046 return 0; 2047 2048 /* 2049 * Given we found a CPU with lower priority than @next_task, 2050 * therefore it should be running. However we cannot migrate it 2051 * to this other CPU, instead attempt to push the current 2052 * running task on this CPU away. 2053 */ 2054 push_task = get_push_task(rq); 2055 if (push_task) { 2056 raw_spin_rq_unlock(rq); 2057 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2058 push_task, &rq->push_work); 2059 raw_spin_rq_lock(rq); 2060 } 2061 2062 return 0; 2063 } 2064 2065 if (WARN_ON(next_task == rq->curr)) 2066 return 0; 2067 2068 /* We might release rq lock */ 2069 get_task_struct(next_task); 2070 2071 /* find_lock_lowest_rq locks the rq if found */ 2072 lowest_rq = find_lock_lowest_rq(next_task, rq); 2073 if (!lowest_rq) { 2074 struct task_struct *task; 2075 /* 2076 * find_lock_lowest_rq releases rq->lock 2077 * so it is possible that next_task has migrated. 2078 * 2079 * We need to make sure that the task is still on the same 2080 * run-queue and is also still the next task eligible for 2081 * pushing. 2082 */ 2083 task = pick_next_pushable_task(rq); 2084 if (task == next_task) { 2085 /* 2086 * The task hasn't migrated, and is still the next 2087 * eligible task, but we failed to find a run-queue 2088 * to push it to. Do not retry in this case, since 2089 * other CPUs will pull from us when ready. 2090 */ 2091 goto out; 2092 } 2093 2094 if (!task) 2095 /* No more tasks, just exit */ 2096 goto out; 2097 2098 /* 2099 * Something has shifted, try again. 2100 */ 2101 put_task_struct(next_task); 2102 next_task = task; 2103 goto retry; 2104 } 2105 2106 deactivate_task(rq, next_task, 0); 2107 set_task_cpu(next_task, lowest_rq->cpu); 2108 activate_task(lowest_rq, next_task, 0); 2109 resched_curr(lowest_rq); 2110 ret = 1; 2111 2112 double_unlock_balance(rq, lowest_rq); 2113 out: 2114 put_task_struct(next_task); 2115 2116 return ret; 2117 } 2118 2119 static void push_rt_tasks(struct rq *rq) 2120 { 2121 /* push_rt_task will return true if it moved an RT */ 2122 while (push_rt_task(rq, false)) 2123 ; 2124 } 2125 2126 #ifdef HAVE_RT_PUSH_IPI 2127 2128 /* 2129 * When a high priority task schedules out from a CPU and a lower priority 2130 * task is scheduled in, a check is made to see if there's any RT tasks 2131 * on other CPUs that are waiting to run because a higher priority RT task 2132 * is currently running on its CPU. In this case, the CPU with multiple RT 2133 * tasks queued on it (overloaded) needs to be notified that a CPU has opened 2134 * up that may be able to run one of its non-running queued RT tasks. 2135 * 2136 * All CPUs with overloaded RT tasks need to be notified as there is currently 2137 * no way to know which of these CPUs have the highest priority task waiting 2138 * to run. Instead of trying to take a spinlock on each of these CPUs, 2139 * which has shown to cause large latency when done on machines with many 2140 * CPUs, sending an IPI to the CPUs to have them push off the overloaded 2141 * RT tasks waiting to run. 2142 * 2143 * Just sending an IPI to each of the CPUs is also an issue, as on large 2144 * count CPU machines, this can cause an IPI storm on a CPU, especially 2145 * if its the only CPU with multiple RT tasks queued, and a large number 2146 * of CPUs scheduling a lower priority task at the same time. 2147 * 2148 * Each root domain has its own irq work function that can iterate over 2149 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT 2150 * task must be checked if there's one or many CPUs that are lowering 2151 * their priority, there's a single irq work iterator that will try to 2152 * push off RT tasks that are waiting to run. 2153 * 2154 * When a CPU schedules a lower priority task, it will kick off the 2155 * irq work iterator that will jump to each CPU with overloaded RT tasks. 2156 * As it only takes the first CPU that schedules a lower priority task 2157 * to start the process, the rto_start variable is incremented and if 2158 * the atomic result is one, then that CPU will try to take the rto_lock. 2159 * This prevents high contention on the lock as the process handles all 2160 * CPUs scheduling lower priority tasks. 2161 * 2162 * All CPUs that are scheduling a lower priority task will increment the 2163 * rt_loop_next variable. This will make sure that the irq work iterator 2164 * checks all RT overloaded CPUs whenever a CPU schedules a new lower 2165 * priority task, even if the iterator is in the middle of a scan. Incrementing 2166 * the rt_loop_next will cause the iterator to perform another scan. 2167 * 2168 */ 2169 static int rto_next_cpu(struct root_domain *rd) 2170 { 2171 int next; 2172 int cpu; 2173 2174 /* 2175 * When starting the IPI RT pushing, the rto_cpu is set to -1, 2176 * rt_next_cpu() will simply return the first CPU found in 2177 * the rto_mask. 2178 * 2179 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it 2180 * will return the next CPU found in the rto_mask. 2181 * 2182 * If there are no more CPUs left in the rto_mask, then a check is made 2183 * against rto_loop and rto_loop_next. rto_loop is only updated with 2184 * the rto_lock held, but any CPU may increment the rto_loop_next 2185 * without any locking. 2186 */ 2187 for (;;) { 2188 2189 /* When rto_cpu is -1 this acts like cpumask_first() */ 2190 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); 2191 2192 rd->rto_cpu = cpu; 2193 2194 if (cpu < nr_cpu_ids) 2195 return cpu; 2196 2197 rd->rto_cpu = -1; 2198 2199 /* 2200 * ACQUIRE ensures we see the @rto_mask changes 2201 * made prior to the @next value observed. 2202 * 2203 * Matches WMB in rt_set_overload(). 2204 */ 2205 next = atomic_read_acquire(&rd->rto_loop_next); 2206 2207 if (rd->rto_loop == next) 2208 break; 2209 2210 rd->rto_loop = next; 2211 } 2212 2213 return -1; 2214 } 2215 2216 static inline bool rto_start_trylock(atomic_t *v) 2217 { 2218 return !atomic_cmpxchg_acquire(v, 0, 1); 2219 } 2220 2221 static inline void rto_start_unlock(atomic_t *v) 2222 { 2223 atomic_set_release(v, 0); 2224 } 2225 2226 static void tell_cpu_to_push(struct rq *rq) 2227 { 2228 int cpu = -1; 2229 2230 /* Keep the loop going if the IPI is currently active */ 2231 atomic_inc(&rq->rd->rto_loop_next); 2232 2233 /* Only one CPU can initiate a loop at a time */ 2234 if (!rto_start_trylock(&rq->rd->rto_loop_start)) 2235 return; 2236 2237 raw_spin_lock(&rq->rd->rto_lock); 2238 2239 /* 2240 * The rto_cpu is updated under the lock, if it has a valid CPU 2241 * then the IPI is still running and will continue due to the 2242 * update to loop_next, and nothing needs to be done here. 2243 * Otherwise it is finishing up and an ipi needs to be sent. 2244 */ 2245 if (rq->rd->rto_cpu < 0) 2246 cpu = rto_next_cpu(rq->rd); 2247 2248 raw_spin_unlock(&rq->rd->rto_lock); 2249 2250 rto_start_unlock(&rq->rd->rto_loop_start); 2251 2252 if (cpu >= 0) { 2253 /* Make sure the rd does not get freed while pushing */ 2254 sched_get_rd(rq->rd); 2255 irq_work_queue_on(&rq->rd->rto_push_work, cpu); 2256 } 2257 } 2258 2259 /* Called from hardirq context */ 2260 void rto_push_irq_work_func(struct irq_work *work) 2261 { 2262 struct root_domain *rd = 2263 container_of(work, struct root_domain, rto_push_work); 2264 struct rq *rq; 2265 int cpu; 2266 2267 rq = this_rq(); 2268 2269 /* 2270 * We do not need to grab the lock to check for has_pushable_tasks. 2271 * When it gets updated, a check is made if a push is possible. 2272 */ 2273 if (has_pushable_tasks(rq)) { 2274 raw_spin_rq_lock(rq); 2275 while (push_rt_task(rq, true)) 2276 ; 2277 raw_spin_rq_unlock(rq); 2278 } 2279 2280 raw_spin_lock(&rd->rto_lock); 2281 2282 /* Pass the IPI to the next rt overloaded queue */ 2283 cpu = rto_next_cpu(rd); 2284 2285 raw_spin_unlock(&rd->rto_lock); 2286 2287 if (cpu < 0) { 2288 sched_put_rd(rd); 2289 return; 2290 } 2291 2292 /* Try the next RT overloaded CPU */ 2293 irq_work_queue_on(&rd->rto_push_work, cpu); 2294 } 2295 #endif /* HAVE_RT_PUSH_IPI */ 2296 2297 static void pull_rt_task(struct rq *this_rq) 2298 { 2299 int this_cpu = this_rq->cpu, cpu; 2300 bool resched = false; 2301 struct task_struct *p, *push_task; 2302 struct rq *src_rq; 2303 int rt_overload_count = rt_overloaded(this_rq); 2304 2305 if (likely(!rt_overload_count)) 2306 return; 2307 2308 /* 2309 * Match the barrier from rt_set_overloaded; this guarantees that if we 2310 * see overloaded we must also see the rto_mask bit. 2311 */ 2312 smp_rmb(); 2313 2314 /* If we are the only overloaded CPU do nothing */ 2315 if (rt_overload_count == 1 && 2316 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) 2317 return; 2318 2319 #ifdef HAVE_RT_PUSH_IPI 2320 if (sched_feat(RT_PUSH_IPI)) { 2321 tell_cpu_to_push(this_rq); 2322 return; 2323 } 2324 #endif 2325 2326 for_each_cpu(cpu, this_rq->rd->rto_mask) { 2327 if (this_cpu == cpu) 2328 continue; 2329 2330 src_rq = cpu_rq(cpu); 2331 2332 /* 2333 * Don't bother taking the src_rq->lock if the next highest 2334 * task is known to be lower-priority than our current task. 2335 * This may look racy, but if this value is about to go 2336 * logically higher, the src_rq will push this task away. 2337 * And if its going logically lower, we do not care 2338 */ 2339 if (src_rq->rt.highest_prio.next >= 2340 this_rq->rt.highest_prio.curr) 2341 continue; 2342 2343 /* 2344 * We can potentially drop this_rq's lock in 2345 * double_lock_balance, and another CPU could 2346 * alter this_rq 2347 */ 2348 push_task = NULL; 2349 double_lock_balance(this_rq, src_rq); 2350 2351 /* 2352 * We can pull only a task, which is pushable 2353 * on its rq, and no others. 2354 */ 2355 p = pick_highest_pushable_task(src_rq, this_cpu); 2356 2357 /* 2358 * Do we have an RT task that preempts 2359 * the to-be-scheduled task? 2360 */ 2361 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 2362 WARN_ON(p == src_rq->curr); 2363 WARN_ON(!task_on_rq_queued(p)); 2364 2365 /* 2366 * There's a chance that p is higher in priority 2367 * than what's currently running on its CPU. 2368 * This is just that p is waking up and hasn't 2369 * had a chance to schedule. We only pull 2370 * p if it is lower in priority than the 2371 * current task on the run queue 2372 */ 2373 if (p->prio < src_rq->curr->prio) 2374 goto skip; 2375 2376 if (is_migration_disabled(p)) { 2377 push_task = get_push_task(src_rq); 2378 } else { 2379 deactivate_task(src_rq, p, 0); 2380 set_task_cpu(p, this_cpu); 2381 activate_task(this_rq, p, 0); 2382 resched = true; 2383 } 2384 /* 2385 * We continue with the search, just in 2386 * case there's an even higher prio task 2387 * in another runqueue. (low likelihood 2388 * but possible) 2389 */ 2390 } 2391 skip: 2392 double_unlock_balance(this_rq, src_rq); 2393 2394 if (push_task) { 2395 raw_spin_rq_unlock(this_rq); 2396 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2397 push_task, &src_rq->push_work); 2398 raw_spin_rq_lock(this_rq); 2399 } 2400 } 2401 2402 if (resched) 2403 resched_curr(this_rq); 2404 } 2405 2406 /* 2407 * If we are not running and we are not going to reschedule soon, we should 2408 * try to push tasks away now 2409 */ 2410 static void task_woken_rt(struct rq *rq, struct task_struct *p) 2411 { 2412 bool need_to_push = !task_running(rq, p) && 2413 !test_tsk_need_resched(rq->curr) && 2414 p->nr_cpus_allowed > 1 && 2415 (dl_task(rq->curr) || rt_task(rq->curr)) && 2416 (rq->curr->nr_cpus_allowed < 2 || 2417 rq->curr->prio <= p->prio); 2418 2419 if (need_to_push) 2420 push_rt_tasks(rq); 2421 } 2422 2423 /* Assumes rq->lock is held */ 2424 static void rq_online_rt(struct rq *rq) 2425 { 2426 if (rq->rt.overloaded) 2427 rt_set_overload(rq); 2428 2429 __enable_runtime(rq); 2430 2431 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 2432 } 2433 2434 /* Assumes rq->lock is held */ 2435 static void rq_offline_rt(struct rq *rq) 2436 { 2437 if (rq->rt.overloaded) 2438 rt_clear_overload(rq); 2439 2440 __disable_runtime(rq); 2441 2442 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 2443 } 2444 2445 /* 2446 * When switch from the rt queue, we bring ourselves to a position 2447 * that we might want to pull RT tasks from other runqueues. 2448 */ 2449 static void switched_from_rt(struct rq *rq, struct task_struct *p) 2450 { 2451 /* 2452 * If there are other RT tasks then we will reschedule 2453 * and the scheduling of the other RT tasks will handle 2454 * the balancing. But if we are the last RT task 2455 * we may need to handle the pulling of RT tasks 2456 * now. 2457 */ 2458 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 2459 return; 2460 2461 rt_queue_pull_task(rq); 2462 } 2463 2464 void __init init_sched_rt_class(void) 2465 { 2466 unsigned int i; 2467 2468 for_each_possible_cpu(i) { 2469 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 2470 GFP_KERNEL, cpu_to_node(i)); 2471 } 2472 } 2473 #endif /* CONFIG_SMP */ 2474 2475 /* 2476 * When switching a task to RT, we may overload the runqueue 2477 * with RT tasks. In this case we try to push them off to 2478 * other runqueues. 2479 */ 2480 static void switched_to_rt(struct rq *rq, struct task_struct *p) 2481 { 2482 /* 2483 * If we are running, update the avg_rt tracking, as the running time 2484 * will now on be accounted into the latter. 2485 */ 2486 if (task_current(rq, p)) { 2487 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2488 return; 2489 } 2490 2491 /* 2492 * If we are not running we may need to preempt the current 2493 * running task. If that current running task is also an RT task 2494 * then see if we can move to another run queue. 2495 */ 2496 if (task_on_rq_queued(p)) { 2497 #ifdef CONFIG_SMP 2498 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2499 rt_queue_push_tasks(rq); 2500 #endif /* CONFIG_SMP */ 2501 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) 2502 resched_curr(rq); 2503 } 2504 } 2505 2506 /* 2507 * Priority of the task has changed. This may cause 2508 * us to initiate a push or pull. 2509 */ 2510 static void 2511 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 2512 { 2513 if (!task_on_rq_queued(p)) 2514 return; 2515 2516 if (task_current(rq, p)) { 2517 #ifdef CONFIG_SMP 2518 /* 2519 * If our priority decreases while running, we 2520 * may need to pull tasks to this runqueue. 2521 */ 2522 if (oldprio < p->prio) 2523 rt_queue_pull_task(rq); 2524 2525 /* 2526 * If there's a higher priority task waiting to run 2527 * then reschedule. 2528 */ 2529 if (p->prio > rq->rt.highest_prio.curr) 2530 resched_curr(rq); 2531 #else 2532 /* For UP simply resched on drop of prio */ 2533 if (oldprio < p->prio) 2534 resched_curr(rq); 2535 #endif /* CONFIG_SMP */ 2536 } else { 2537 /* 2538 * This task is not running, but if it is 2539 * greater than the current running task 2540 * then reschedule. 2541 */ 2542 if (p->prio < rq->curr->prio) 2543 resched_curr(rq); 2544 } 2545 } 2546 2547 #ifdef CONFIG_POSIX_TIMERS 2548 static void watchdog(struct rq *rq, struct task_struct *p) 2549 { 2550 unsigned long soft, hard; 2551 2552 /* max may change after cur was read, this will be fixed next tick */ 2553 soft = task_rlimit(p, RLIMIT_RTTIME); 2554 hard = task_rlimit_max(p, RLIMIT_RTTIME); 2555 2556 if (soft != RLIM_INFINITY) { 2557 unsigned long next; 2558 2559 if (p->rt.watchdog_stamp != jiffies) { 2560 p->rt.timeout++; 2561 p->rt.watchdog_stamp = jiffies; 2562 } 2563 2564 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 2565 if (p->rt.timeout > next) { 2566 posix_cputimers_rt_watchdog(&p->posix_cputimers, 2567 p->se.sum_exec_runtime); 2568 } 2569 } 2570 } 2571 #else 2572 static inline void watchdog(struct rq *rq, struct task_struct *p) { } 2573 #endif 2574 2575 /* 2576 * scheduler tick hitting a task of our scheduling class. 2577 * 2578 * NOTE: This function can be called remotely by the tick offload that 2579 * goes along full dynticks. Therefore no local assumption can be made 2580 * and everything must be accessed through the @rq and @curr passed in 2581 * parameters. 2582 */ 2583 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2584 { 2585 struct sched_rt_entity *rt_se = &p->rt; 2586 2587 update_curr_rt(rq); 2588 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2589 2590 watchdog(rq, p); 2591 2592 /* 2593 * RR tasks need a special form of timeslice management. 2594 * FIFO tasks have no timeslices. 2595 */ 2596 if (p->policy != SCHED_RR) 2597 return; 2598 2599 if (--p->rt.time_slice) 2600 return; 2601 2602 p->rt.time_slice = sched_rr_timeslice; 2603 2604 /* 2605 * Requeue to the end of queue if we (and all of our ancestors) are not 2606 * the only element on the queue 2607 */ 2608 for_each_sched_rt_entity(rt_se) { 2609 if (rt_se->run_list.prev != rt_se->run_list.next) { 2610 requeue_task_rt(rq, p, 0); 2611 resched_curr(rq); 2612 return; 2613 } 2614 } 2615 } 2616 2617 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2618 { 2619 /* 2620 * Time slice is 0 for SCHED_FIFO tasks 2621 */ 2622 if (task->policy == SCHED_RR) 2623 return sched_rr_timeslice; 2624 else 2625 return 0; 2626 } 2627 2628 DEFINE_SCHED_CLASS(rt) = { 2629 2630 .enqueue_task = enqueue_task_rt, 2631 .dequeue_task = dequeue_task_rt, 2632 .yield_task = yield_task_rt, 2633 2634 .check_preempt_curr = check_preempt_curr_rt, 2635 2636 .pick_next_task = pick_next_task_rt, 2637 .put_prev_task = put_prev_task_rt, 2638 .set_next_task = set_next_task_rt, 2639 2640 #ifdef CONFIG_SMP 2641 .balance = balance_rt, 2642 .pick_task = pick_task_rt, 2643 .select_task_rq = select_task_rq_rt, 2644 .set_cpus_allowed = set_cpus_allowed_common, 2645 .rq_online = rq_online_rt, 2646 .rq_offline = rq_offline_rt, 2647 .task_woken = task_woken_rt, 2648 .switched_from = switched_from_rt, 2649 .find_lock_rq = find_lock_lowest_rq, 2650 #endif 2651 2652 .task_tick = task_tick_rt, 2653 2654 .get_rr_interval = get_rr_interval_rt, 2655 2656 .prio_changed = prio_changed_rt, 2657 .switched_to = switched_to_rt, 2658 2659 .update_curr = update_curr_rt, 2660 2661 #ifdef CONFIG_UCLAMP_TASK 2662 .uclamp_enabled = 1, 2663 #endif 2664 }; 2665 2666 #ifdef CONFIG_RT_GROUP_SCHED 2667 /* 2668 * Ensure that the real time constraints are schedulable. 2669 */ 2670 static DEFINE_MUTEX(rt_constraints_mutex); 2671 2672 static inline int tg_has_rt_tasks(struct task_group *tg) 2673 { 2674 struct task_struct *task; 2675 struct css_task_iter it; 2676 int ret = 0; 2677 2678 /* 2679 * Autogroups do not have RT tasks; see autogroup_create(). 2680 */ 2681 if (task_group_is_autogroup(tg)) 2682 return 0; 2683 2684 css_task_iter_start(&tg->css, 0, &it); 2685 while (!ret && (task = css_task_iter_next(&it))) 2686 ret |= rt_task(task); 2687 css_task_iter_end(&it); 2688 2689 return ret; 2690 } 2691 2692 struct rt_schedulable_data { 2693 struct task_group *tg; 2694 u64 rt_period; 2695 u64 rt_runtime; 2696 }; 2697 2698 static int tg_rt_schedulable(struct task_group *tg, void *data) 2699 { 2700 struct rt_schedulable_data *d = data; 2701 struct task_group *child; 2702 unsigned long total, sum = 0; 2703 u64 period, runtime; 2704 2705 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 2706 runtime = tg->rt_bandwidth.rt_runtime; 2707 2708 if (tg == d->tg) { 2709 period = d->rt_period; 2710 runtime = d->rt_runtime; 2711 } 2712 2713 /* 2714 * Cannot have more runtime than the period. 2715 */ 2716 if (runtime > period && runtime != RUNTIME_INF) 2717 return -EINVAL; 2718 2719 /* 2720 * Ensure we don't starve existing RT tasks if runtime turns zero. 2721 */ 2722 if (rt_bandwidth_enabled() && !runtime && 2723 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) 2724 return -EBUSY; 2725 2726 total = to_ratio(period, runtime); 2727 2728 /* 2729 * Nobody can have more than the global setting allows. 2730 */ 2731 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 2732 return -EINVAL; 2733 2734 /* 2735 * The sum of our children's runtime should not exceed our own. 2736 */ 2737 list_for_each_entry_rcu(child, &tg->children, siblings) { 2738 period = ktime_to_ns(child->rt_bandwidth.rt_period); 2739 runtime = child->rt_bandwidth.rt_runtime; 2740 2741 if (child == d->tg) { 2742 period = d->rt_period; 2743 runtime = d->rt_runtime; 2744 } 2745 2746 sum += to_ratio(period, runtime); 2747 } 2748 2749 if (sum > total) 2750 return -EINVAL; 2751 2752 return 0; 2753 } 2754 2755 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 2756 { 2757 int ret; 2758 2759 struct rt_schedulable_data data = { 2760 .tg = tg, 2761 .rt_period = period, 2762 .rt_runtime = runtime, 2763 }; 2764 2765 rcu_read_lock(); 2766 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 2767 rcu_read_unlock(); 2768 2769 return ret; 2770 } 2771 2772 static int tg_set_rt_bandwidth(struct task_group *tg, 2773 u64 rt_period, u64 rt_runtime) 2774 { 2775 int i, err = 0; 2776 2777 /* 2778 * Disallowing the root group RT runtime is BAD, it would disallow the 2779 * kernel creating (and or operating) RT threads. 2780 */ 2781 if (tg == &root_task_group && rt_runtime == 0) 2782 return -EINVAL; 2783 2784 /* No period doesn't make any sense. */ 2785 if (rt_period == 0) 2786 return -EINVAL; 2787 2788 /* 2789 * Bound quota to defend quota against overflow during bandwidth shift. 2790 */ 2791 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) 2792 return -EINVAL; 2793 2794 mutex_lock(&rt_constraints_mutex); 2795 err = __rt_schedulable(tg, rt_period, rt_runtime); 2796 if (err) 2797 goto unlock; 2798 2799 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 2800 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 2801 tg->rt_bandwidth.rt_runtime = rt_runtime; 2802 2803 for_each_possible_cpu(i) { 2804 struct rt_rq *rt_rq = tg->rt_rq[i]; 2805 2806 raw_spin_lock(&rt_rq->rt_runtime_lock); 2807 rt_rq->rt_runtime = rt_runtime; 2808 raw_spin_unlock(&rt_rq->rt_runtime_lock); 2809 } 2810 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 2811 unlock: 2812 mutex_unlock(&rt_constraints_mutex); 2813 2814 return err; 2815 } 2816 2817 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 2818 { 2819 u64 rt_runtime, rt_period; 2820 2821 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 2822 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 2823 if (rt_runtime_us < 0) 2824 rt_runtime = RUNTIME_INF; 2825 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) 2826 return -EINVAL; 2827 2828 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 2829 } 2830 2831 long sched_group_rt_runtime(struct task_group *tg) 2832 { 2833 u64 rt_runtime_us; 2834 2835 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 2836 return -1; 2837 2838 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 2839 do_div(rt_runtime_us, NSEC_PER_USEC); 2840 return rt_runtime_us; 2841 } 2842 2843 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 2844 { 2845 u64 rt_runtime, rt_period; 2846 2847 if (rt_period_us > U64_MAX / NSEC_PER_USEC) 2848 return -EINVAL; 2849 2850 rt_period = rt_period_us * NSEC_PER_USEC; 2851 rt_runtime = tg->rt_bandwidth.rt_runtime; 2852 2853 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 2854 } 2855 2856 long sched_group_rt_period(struct task_group *tg) 2857 { 2858 u64 rt_period_us; 2859 2860 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 2861 do_div(rt_period_us, NSEC_PER_USEC); 2862 return rt_period_us; 2863 } 2864 2865 static int sched_rt_global_constraints(void) 2866 { 2867 int ret = 0; 2868 2869 mutex_lock(&rt_constraints_mutex); 2870 ret = __rt_schedulable(NULL, 0, 0); 2871 mutex_unlock(&rt_constraints_mutex); 2872 2873 return ret; 2874 } 2875 2876 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 2877 { 2878 /* Don't accept realtime tasks when there is no way for them to run */ 2879 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 2880 return 0; 2881 2882 return 1; 2883 } 2884 2885 #else /* !CONFIG_RT_GROUP_SCHED */ 2886 static int sched_rt_global_constraints(void) 2887 { 2888 unsigned long flags; 2889 int i; 2890 2891 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 2892 for_each_possible_cpu(i) { 2893 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 2894 2895 raw_spin_lock(&rt_rq->rt_runtime_lock); 2896 rt_rq->rt_runtime = global_rt_runtime(); 2897 raw_spin_unlock(&rt_rq->rt_runtime_lock); 2898 } 2899 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 2900 2901 return 0; 2902 } 2903 #endif /* CONFIG_RT_GROUP_SCHED */ 2904 2905 static int sched_rt_global_validate(void) 2906 { 2907 if (sysctl_sched_rt_period <= 0) 2908 return -EINVAL; 2909 2910 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 2911 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || 2912 ((u64)sysctl_sched_rt_runtime * 2913 NSEC_PER_USEC > max_rt_runtime))) 2914 return -EINVAL; 2915 2916 return 0; 2917 } 2918 2919 static void sched_rt_do_global(void) 2920 { 2921 unsigned long flags; 2922 2923 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 2924 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 2925 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 2926 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 2927 } 2928 2929 int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 2930 size_t *lenp, loff_t *ppos) 2931 { 2932 int old_period, old_runtime; 2933 static DEFINE_MUTEX(mutex); 2934 int ret; 2935 2936 mutex_lock(&mutex); 2937 old_period = sysctl_sched_rt_period; 2938 old_runtime = sysctl_sched_rt_runtime; 2939 2940 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2941 2942 if (!ret && write) { 2943 ret = sched_rt_global_validate(); 2944 if (ret) 2945 goto undo; 2946 2947 ret = sched_dl_global_validate(); 2948 if (ret) 2949 goto undo; 2950 2951 ret = sched_rt_global_constraints(); 2952 if (ret) 2953 goto undo; 2954 2955 sched_rt_do_global(); 2956 sched_dl_do_global(); 2957 } 2958 if (0) { 2959 undo: 2960 sysctl_sched_rt_period = old_period; 2961 sysctl_sched_rt_runtime = old_runtime; 2962 } 2963 mutex_unlock(&mutex); 2964 2965 return ret; 2966 } 2967 2968 int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 2969 size_t *lenp, loff_t *ppos) 2970 { 2971 int ret; 2972 static DEFINE_MUTEX(mutex); 2973 2974 mutex_lock(&mutex); 2975 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2976 /* 2977 * Make sure that internally we keep jiffies. 2978 * Also, writing zero resets the timeslice to default: 2979 */ 2980 if (!ret && write) { 2981 sched_rr_timeslice = 2982 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : 2983 msecs_to_jiffies(sysctl_sched_rr_timeslice); 2984 } 2985 mutex_unlock(&mutex); 2986 2987 return ret; 2988 } 2989 2990 #ifdef CONFIG_SCHED_DEBUG 2991 void print_rt_stats(struct seq_file *m, int cpu) 2992 { 2993 rt_rq_iter_t iter; 2994 struct rt_rq *rt_rq; 2995 2996 rcu_read_lock(); 2997 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 2998 print_rt_rq(m, cpu, rt_rq); 2999 rcu_read_unlock(); 3000 } 3001 #endif /* CONFIG_SCHED_DEBUG */ 3002