1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR 4 * policies) 5 */ 6 #include "sched.h" 7 8 #include "pelt.h" 9 10 int sched_rr_timeslice = RR_TIMESLICE; 11 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; 12 /* More than 4 hours if BW_SHIFT equals 20. */ 13 static const u64 max_rt_runtime = MAX_BW; 14 15 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); 16 17 struct rt_bandwidth def_rt_bandwidth; 18 19 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) 20 { 21 struct rt_bandwidth *rt_b = 22 container_of(timer, struct rt_bandwidth, rt_period_timer); 23 int idle = 0; 24 int overrun; 25 26 raw_spin_lock(&rt_b->rt_runtime_lock); 27 for (;;) { 28 overrun = hrtimer_forward_now(timer, rt_b->rt_period); 29 if (!overrun) 30 break; 31 32 raw_spin_unlock(&rt_b->rt_runtime_lock); 33 idle = do_sched_rt_period_timer(rt_b, overrun); 34 raw_spin_lock(&rt_b->rt_runtime_lock); 35 } 36 if (idle) 37 rt_b->rt_period_active = 0; 38 raw_spin_unlock(&rt_b->rt_runtime_lock); 39 40 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; 41 } 42 43 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) 44 { 45 rt_b->rt_period = ns_to_ktime(period); 46 rt_b->rt_runtime = runtime; 47 48 raw_spin_lock_init(&rt_b->rt_runtime_lock); 49 50 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, 51 HRTIMER_MODE_REL_HARD); 52 rt_b->rt_period_timer.function = sched_rt_period_timer; 53 } 54 55 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b) 56 { 57 raw_spin_lock(&rt_b->rt_runtime_lock); 58 if (!rt_b->rt_period_active) { 59 rt_b->rt_period_active = 1; 60 /* 61 * SCHED_DEADLINE updates the bandwidth, as a run away 62 * RT task with a DL task could hog a CPU. But DL does 63 * not reset the period. If a deadline task was running 64 * without an RT task running, it can cause RT tasks to 65 * throttle when they start up. Kick the timer right away 66 * to update the period. 67 */ 68 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0)); 69 hrtimer_start_expires(&rt_b->rt_period_timer, 70 HRTIMER_MODE_ABS_PINNED_HARD); 71 } 72 raw_spin_unlock(&rt_b->rt_runtime_lock); 73 } 74 75 static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 76 { 77 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) 78 return; 79 80 do_start_rt_bandwidth(rt_b); 81 } 82 83 void init_rt_rq(struct rt_rq *rt_rq) 84 { 85 struct rt_prio_array *array; 86 int i; 87 88 array = &rt_rq->active; 89 for (i = 0; i < MAX_RT_PRIO; i++) { 90 INIT_LIST_HEAD(array->queue + i); 91 __clear_bit(i, array->bitmap); 92 } 93 /* delimiter for bitsearch: */ 94 __set_bit(MAX_RT_PRIO, array->bitmap); 95 96 #if defined CONFIG_SMP 97 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 98 rt_rq->highest_prio.next = MAX_RT_PRIO-1; 99 rt_rq->rt_nr_migratory = 0; 100 rt_rq->overloaded = 0; 101 plist_head_init(&rt_rq->pushable_tasks); 102 #endif /* CONFIG_SMP */ 103 /* We start is dequeued state, because no RT tasks are queued */ 104 rt_rq->rt_queued = 0; 105 106 rt_rq->rt_time = 0; 107 rt_rq->rt_throttled = 0; 108 rt_rq->rt_runtime = 0; 109 raw_spin_lock_init(&rt_rq->rt_runtime_lock); 110 } 111 112 #ifdef CONFIG_RT_GROUP_SCHED 113 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) 114 { 115 hrtimer_cancel(&rt_b->rt_period_timer); 116 } 117 118 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) 119 120 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 121 { 122 #ifdef CONFIG_SCHED_DEBUG 123 WARN_ON_ONCE(!rt_entity_is_task(rt_se)); 124 #endif 125 return container_of(rt_se, struct task_struct, rt); 126 } 127 128 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 129 { 130 return rt_rq->rq; 131 } 132 133 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 134 { 135 return rt_se->rt_rq; 136 } 137 138 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 139 { 140 struct rt_rq *rt_rq = rt_se->rt_rq; 141 142 return rt_rq->rq; 143 } 144 145 void unregister_rt_sched_group(struct task_group *tg) 146 { 147 if (tg->rt_se) 148 destroy_rt_bandwidth(&tg->rt_bandwidth); 149 150 } 151 152 void free_rt_sched_group(struct task_group *tg) 153 { 154 int i; 155 156 for_each_possible_cpu(i) { 157 if (tg->rt_rq) 158 kfree(tg->rt_rq[i]); 159 if (tg->rt_se) 160 kfree(tg->rt_se[i]); 161 } 162 163 kfree(tg->rt_rq); 164 kfree(tg->rt_se); 165 } 166 167 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, 168 struct sched_rt_entity *rt_se, int cpu, 169 struct sched_rt_entity *parent) 170 { 171 struct rq *rq = cpu_rq(cpu); 172 173 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 174 rt_rq->rt_nr_boosted = 0; 175 rt_rq->rq = rq; 176 rt_rq->tg = tg; 177 178 tg->rt_rq[cpu] = rt_rq; 179 tg->rt_se[cpu] = rt_se; 180 181 if (!rt_se) 182 return; 183 184 if (!parent) 185 rt_se->rt_rq = &rq->rt; 186 else 187 rt_se->rt_rq = parent->my_q; 188 189 rt_se->my_q = rt_rq; 190 rt_se->parent = parent; 191 INIT_LIST_HEAD(&rt_se->run_list); 192 } 193 194 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 195 { 196 struct rt_rq *rt_rq; 197 struct sched_rt_entity *rt_se; 198 int i; 199 200 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL); 201 if (!tg->rt_rq) 202 goto err; 203 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL); 204 if (!tg->rt_se) 205 goto err; 206 207 init_rt_bandwidth(&tg->rt_bandwidth, 208 ktime_to_ns(def_rt_bandwidth.rt_period), 0); 209 210 for_each_possible_cpu(i) { 211 rt_rq = kzalloc_node(sizeof(struct rt_rq), 212 GFP_KERNEL, cpu_to_node(i)); 213 if (!rt_rq) 214 goto err; 215 216 rt_se = kzalloc_node(sizeof(struct sched_rt_entity), 217 GFP_KERNEL, cpu_to_node(i)); 218 if (!rt_se) 219 goto err_free_rq; 220 221 init_rt_rq(rt_rq); 222 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; 223 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); 224 } 225 226 return 1; 227 228 err_free_rq: 229 kfree(rt_rq); 230 err: 231 return 0; 232 } 233 234 #else /* CONFIG_RT_GROUP_SCHED */ 235 236 #define rt_entity_is_task(rt_se) (1) 237 238 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 239 { 240 return container_of(rt_se, struct task_struct, rt); 241 } 242 243 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) 244 { 245 return container_of(rt_rq, struct rq, rt); 246 } 247 248 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) 249 { 250 struct task_struct *p = rt_task_of(rt_se); 251 252 return task_rq(p); 253 } 254 255 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) 256 { 257 struct rq *rq = rq_of_rt_se(rt_se); 258 259 return &rq->rt; 260 } 261 262 void unregister_rt_sched_group(struct task_group *tg) { } 263 264 void free_rt_sched_group(struct task_group *tg) { } 265 266 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) 267 { 268 return 1; 269 } 270 #endif /* CONFIG_RT_GROUP_SCHED */ 271 272 #ifdef CONFIG_SMP 273 274 static void pull_rt_task(struct rq *this_rq); 275 276 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 277 { 278 /* Try to pull RT tasks here if we lower this rq's prio */ 279 return rq->online && rq->rt.highest_prio.curr > prev->prio; 280 } 281 282 static inline int rt_overloaded(struct rq *rq) 283 { 284 return atomic_read(&rq->rd->rto_count); 285 } 286 287 static inline void rt_set_overload(struct rq *rq) 288 { 289 if (!rq->online) 290 return; 291 292 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); 293 /* 294 * Make sure the mask is visible before we set 295 * the overload count. That is checked to determine 296 * if we should look at the mask. It would be a shame 297 * if we looked at the mask, but the mask was not 298 * updated yet. 299 * 300 * Matched by the barrier in pull_rt_task(). 301 */ 302 smp_wmb(); 303 atomic_inc(&rq->rd->rto_count); 304 } 305 306 static inline void rt_clear_overload(struct rq *rq) 307 { 308 if (!rq->online) 309 return; 310 311 /* the order here really doesn't matter */ 312 atomic_dec(&rq->rd->rto_count); 313 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 314 } 315 316 static void update_rt_migration(struct rt_rq *rt_rq) 317 { 318 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { 319 if (!rt_rq->overloaded) { 320 rt_set_overload(rq_of_rt_rq(rt_rq)); 321 rt_rq->overloaded = 1; 322 } 323 } else if (rt_rq->overloaded) { 324 rt_clear_overload(rq_of_rt_rq(rt_rq)); 325 rt_rq->overloaded = 0; 326 } 327 } 328 329 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 330 { 331 struct task_struct *p; 332 333 if (!rt_entity_is_task(rt_se)) 334 return; 335 336 p = rt_task_of(rt_se); 337 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 338 339 rt_rq->rt_nr_total++; 340 if (p->nr_cpus_allowed > 1) 341 rt_rq->rt_nr_migratory++; 342 343 update_rt_migration(rt_rq); 344 } 345 346 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 347 { 348 struct task_struct *p; 349 350 if (!rt_entity_is_task(rt_se)) 351 return; 352 353 p = rt_task_of(rt_se); 354 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 355 356 rt_rq->rt_nr_total--; 357 if (p->nr_cpus_allowed > 1) 358 rt_rq->rt_nr_migratory--; 359 360 update_rt_migration(rt_rq); 361 } 362 363 static inline int has_pushable_tasks(struct rq *rq) 364 { 365 return !plist_head_empty(&rq->rt.pushable_tasks); 366 } 367 368 static DEFINE_PER_CPU(struct callback_head, rt_push_head); 369 static DEFINE_PER_CPU(struct callback_head, rt_pull_head); 370 371 static void push_rt_tasks(struct rq *); 372 static void pull_rt_task(struct rq *); 373 374 static inline void rt_queue_push_tasks(struct rq *rq) 375 { 376 if (!has_pushable_tasks(rq)) 377 return; 378 379 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks); 380 } 381 382 static inline void rt_queue_pull_task(struct rq *rq) 383 { 384 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task); 385 } 386 387 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 388 { 389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 390 plist_node_init(&p->pushable_tasks, p->prio); 391 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); 392 393 /* Update the highest prio pushable task */ 394 if (p->prio < rq->rt.highest_prio.next) 395 rq->rt.highest_prio.next = p->prio; 396 } 397 398 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 399 { 400 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); 401 402 /* Update the new highest prio pushable task */ 403 if (has_pushable_tasks(rq)) { 404 p = plist_first_entry(&rq->rt.pushable_tasks, 405 struct task_struct, pushable_tasks); 406 rq->rt.highest_prio.next = p->prio; 407 } else { 408 rq->rt.highest_prio.next = MAX_RT_PRIO-1; 409 } 410 } 411 412 #else 413 414 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) 415 { 416 } 417 418 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) 419 { 420 } 421 422 static inline 423 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 424 { 425 } 426 427 static inline 428 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 429 { 430 } 431 432 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 433 { 434 return false; 435 } 436 437 static inline void pull_rt_task(struct rq *this_rq) 438 { 439 } 440 441 static inline void rt_queue_push_tasks(struct rq *rq) 442 { 443 } 444 #endif /* CONFIG_SMP */ 445 446 static void enqueue_top_rt_rq(struct rt_rq *rt_rq); 447 static void dequeue_top_rt_rq(struct rt_rq *rt_rq); 448 449 static inline int on_rt_rq(struct sched_rt_entity *rt_se) 450 { 451 return rt_se->on_rq; 452 } 453 454 #ifdef CONFIG_UCLAMP_TASK 455 /* 456 * Verify the fitness of task @p to run on @cpu taking into account the uclamp 457 * settings. 458 * 459 * This check is only important for heterogeneous systems where uclamp_min value 460 * is higher than the capacity of a @cpu. For non-heterogeneous system this 461 * function will always return true. 462 * 463 * The function will return true if the capacity of the @cpu is >= the 464 * uclamp_min and false otherwise. 465 * 466 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min 467 * > uclamp_max. 468 */ 469 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 470 { 471 unsigned int min_cap; 472 unsigned int max_cap; 473 unsigned int cpu_cap; 474 475 /* Only heterogeneous systems can benefit from this check */ 476 if (!static_branch_unlikely(&sched_asym_cpucapacity)) 477 return true; 478 479 min_cap = uclamp_eff_value(p, UCLAMP_MIN); 480 max_cap = uclamp_eff_value(p, UCLAMP_MAX); 481 482 cpu_cap = capacity_orig_of(cpu); 483 484 return cpu_cap >= min(min_cap, max_cap); 485 } 486 #else 487 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu) 488 { 489 return true; 490 } 491 #endif 492 493 #ifdef CONFIG_RT_GROUP_SCHED 494 495 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 496 { 497 if (!rt_rq->tg) 498 return RUNTIME_INF; 499 500 return rt_rq->rt_runtime; 501 } 502 503 static inline u64 sched_rt_period(struct rt_rq *rt_rq) 504 { 505 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); 506 } 507 508 typedef struct task_group *rt_rq_iter_t; 509 510 static inline struct task_group *next_task_group(struct task_group *tg) 511 { 512 do { 513 tg = list_entry_rcu(tg->list.next, 514 typeof(struct task_group), list); 515 } while (&tg->list != &task_groups && task_group_is_autogroup(tg)); 516 517 if (&tg->list == &task_groups) 518 tg = NULL; 519 520 return tg; 521 } 522 523 #define for_each_rt_rq(rt_rq, iter, rq) \ 524 for (iter = container_of(&task_groups, typeof(*iter), list); \ 525 (iter = next_task_group(iter)) && \ 526 (rt_rq = iter->rt_rq[cpu_of(rq)]);) 527 528 #define for_each_sched_rt_entity(rt_se) \ 529 for (; rt_se; rt_se = rt_se->parent) 530 531 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 532 { 533 return rt_se->my_q; 534 } 535 536 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 537 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags); 538 539 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 540 { 541 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 542 struct rq *rq = rq_of_rt_rq(rt_rq); 543 struct sched_rt_entity *rt_se; 544 545 int cpu = cpu_of(rq); 546 547 rt_se = rt_rq->tg->rt_se[cpu]; 548 549 if (rt_rq->rt_nr_running) { 550 if (!rt_se) 551 enqueue_top_rt_rq(rt_rq); 552 else if (!on_rt_rq(rt_se)) 553 enqueue_rt_entity(rt_se, 0); 554 555 if (rt_rq->highest_prio.curr < curr->prio) 556 resched_curr(rq); 557 } 558 } 559 560 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 561 { 562 struct sched_rt_entity *rt_se; 563 int cpu = cpu_of(rq_of_rt_rq(rt_rq)); 564 565 rt_se = rt_rq->tg->rt_se[cpu]; 566 567 if (!rt_se) { 568 dequeue_top_rt_rq(rt_rq); 569 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 570 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0); 571 } 572 else if (on_rt_rq(rt_se)) 573 dequeue_rt_entity(rt_se, 0); 574 } 575 576 static inline int rt_rq_throttled(struct rt_rq *rt_rq) 577 { 578 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; 579 } 580 581 static int rt_se_boosted(struct sched_rt_entity *rt_se) 582 { 583 struct rt_rq *rt_rq = group_rt_rq(rt_se); 584 struct task_struct *p; 585 586 if (rt_rq) 587 return !!rt_rq->rt_nr_boosted; 588 589 p = rt_task_of(rt_se); 590 return p->prio != p->normal_prio; 591 } 592 593 #ifdef CONFIG_SMP 594 static inline const struct cpumask *sched_rt_period_mask(void) 595 { 596 return this_rq()->rd->span; 597 } 598 #else 599 static inline const struct cpumask *sched_rt_period_mask(void) 600 { 601 return cpu_online_mask; 602 } 603 #endif 604 605 static inline 606 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 607 { 608 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; 609 } 610 611 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 612 { 613 return &rt_rq->tg->rt_bandwidth; 614 } 615 616 #else /* !CONFIG_RT_GROUP_SCHED */ 617 618 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) 619 { 620 return rt_rq->rt_runtime; 621 } 622 623 static inline u64 sched_rt_period(struct rt_rq *rt_rq) 624 { 625 return ktime_to_ns(def_rt_bandwidth.rt_period); 626 } 627 628 typedef struct rt_rq *rt_rq_iter_t; 629 630 #define for_each_rt_rq(rt_rq, iter, rq) \ 631 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 632 633 #define for_each_sched_rt_entity(rt_se) \ 634 for (; rt_se; rt_se = NULL) 635 636 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) 637 { 638 return NULL; 639 } 640 641 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 642 { 643 struct rq *rq = rq_of_rt_rq(rt_rq); 644 645 if (!rt_rq->rt_nr_running) 646 return; 647 648 enqueue_top_rt_rq(rt_rq); 649 resched_curr(rq); 650 } 651 652 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 653 { 654 dequeue_top_rt_rq(rt_rq); 655 } 656 657 static inline int rt_rq_throttled(struct rt_rq *rt_rq) 658 { 659 return rt_rq->rt_throttled; 660 } 661 662 static inline const struct cpumask *sched_rt_period_mask(void) 663 { 664 return cpu_online_mask; 665 } 666 667 static inline 668 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) 669 { 670 return &cpu_rq(cpu)->rt; 671 } 672 673 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) 674 { 675 return &def_rt_bandwidth; 676 } 677 678 #endif /* CONFIG_RT_GROUP_SCHED */ 679 680 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) 681 { 682 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 683 684 return (hrtimer_active(&rt_b->rt_period_timer) || 685 rt_rq->rt_time < rt_b->rt_runtime); 686 } 687 688 #ifdef CONFIG_SMP 689 /* 690 * We ran out of runtime, see if we can borrow some from our neighbours. 691 */ 692 static void do_balance_runtime(struct rt_rq *rt_rq) 693 { 694 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 695 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; 696 int i, weight; 697 u64 rt_period; 698 699 weight = cpumask_weight(rd->span); 700 701 raw_spin_lock(&rt_b->rt_runtime_lock); 702 rt_period = ktime_to_ns(rt_b->rt_period); 703 for_each_cpu(i, rd->span) { 704 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 705 s64 diff; 706 707 if (iter == rt_rq) 708 continue; 709 710 raw_spin_lock(&iter->rt_runtime_lock); 711 /* 712 * Either all rqs have inf runtime and there's nothing to steal 713 * or __disable_runtime() below sets a specific rq to inf to 714 * indicate its been disabled and disallow stealing. 715 */ 716 if (iter->rt_runtime == RUNTIME_INF) 717 goto next; 718 719 /* 720 * From runqueues with spare time, take 1/n part of their 721 * spare time, but no more than our period. 722 */ 723 diff = iter->rt_runtime - iter->rt_time; 724 if (diff > 0) { 725 diff = div_u64((u64)diff, weight); 726 if (rt_rq->rt_runtime + diff > rt_period) 727 diff = rt_period - rt_rq->rt_runtime; 728 iter->rt_runtime -= diff; 729 rt_rq->rt_runtime += diff; 730 if (rt_rq->rt_runtime == rt_period) { 731 raw_spin_unlock(&iter->rt_runtime_lock); 732 break; 733 } 734 } 735 next: 736 raw_spin_unlock(&iter->rt_runtime_lock); 737 } 738 raw_spin_unlock(&rt_b->rt_runtime_lock); 739 } 740 741 /* 742 * Ensure this RQ takes back all the runtime it lend to its neighbours. 743 */ 744 static void __disable_runtime(struct rq *rq) 745 { 746 struct root_domain *rd = rq->rd; 747 rt_rq_iter_t iter; 748 struct rt_rq *rt_rq; 749 750 if (unlikely(!scheduler_running)) 751 return; 752 753 for_each_rt_rq(rt_rq, iter, rq) { 754 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 755 s64 want; 756 int i; 757 758 raw_spin_lock(&rt_b->rt_runtime_lock); 759 raw_spin_lock(&rt_rq->rt_runtime_lock); 760 /* 761 * Either we're all inf and nobody needs to borrow, or we're 762 * already disabled and thus have nothing to do, or we have 763 * exactly the right amount of runtime to take out. 764 */ 765 if (rt_rq->rt_runtime == RUNTIME_INF || 766 rt_rq->rt_runtime == rt_b->rt_runtime) 767 goto balanced; 768 raw_spin_unlock(&rt_rq->rt_runtime_lock); 769 770 /* 771 * Calculate the difference between what we started out with 772 * and what we current have, that's the amount of runtime 773 * we lend and now have to reclaim. 774 */ 775 want = rt_b->rt_runtime - rt_rq->rt_runtime; 776 777 /* 778 * Greedy reclaim, take back as much as we can. 779 */ 780 for_each_cpu(i, rd->span) { 781 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 782 s64 diff; 783 784 /* 785 * Can't reclaim from ourselves or disabled runqueues. 786 */ 787 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 788 continue; 789 790 raw_spin_lock(&iter->rt_runtime_lock); 791 if (want > 0) { 792 diff = min_t(s64, iter->rt_runtime, want); 793 iter->rt_runtime -= diff; 794 want -= diff; 795 } else { 796 iter->rt_runtime -= want; 797 want -= want; 798 } 799 raw_spin_unlock(&iter->rt_runtime_lock); 800 801 if (!want) 802 break; 803 } 804 805 raw_spin_lock(&rt_rq->rt_runtime_lock); 806 /* 807 * We cannot be left wanting - that would mean some runtime 808 * leaked out of the system. 809 */ 810 BUG_ON(want); 811 balanced: 812 /* 813 * Disable all the borrow logic by pretending we have inf 814 * runtime - in which case borrowing doesn't make sense. 815 */ 816 rt_rq->rt_runtime = RUNTIME_INF; 817 rt_rq->rt_throttled = 0; 818 raw_spin_unlock(&rt_rq->rt_runtime_lock); 819 raw_spin_unlock(&rt_b->rt_runtime_lock); 820 821 /* Make rt_rq available for pick_next_task() */ 822 sched_rt_rq_enqueue(rt_rq); 823 } 824 } 825 826 static void __enable_runtime(struct rq *rq) 827 { 828 rt_rq_iter_t iter; 829 struct rt_rq *rt_rq; 830 831 if (unlikely(!scheduler_running)) 832 return; 833 834 /* 835 * Reset each runqueue's bandwidth settings 836 */ 837 for_each_rt_rq(rt_rq, iter, rq) { 838 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 839 840 raw_spin_lock(&rt_b->rt_runtime_lock); 841 raw_spin_lock(&rt_rq->rt_runtime_lock); 842 rt_rq->rt_runtime = rt_b->rt_runtime; 843 rt_rq->rt_time = 0; 844 rt_rq->rt_throttled = 0; 845 raw_spin_unlock(&rt_rq->rt_runtime_lock); 846 raw_spin_unlock(&rt_b->rt_runtime_lock); 847 } 848 } 849 850 static void balance_runtime(struct rt_rq *rt_rq) 851 { 852 if (!sched_feat(RT_RUNTIME_SHARE)) 853 return; 854 855 if (rt_rq->rt_time > rt_rq->rt_runtime) { 856 raw_spin_unlock(&rt_rq->rt_runtime_lock); 857 do_balance_runtime(rt_rq); 858 raw_spin_lock(&rt_rq->rt_runtime_lock); 859 } 860 } 861 #else /* !CONFIG_SMP */ 862 static inline void balance_runtime(struct rt_rq *rt_rq) {} 863 #endif /* CONFIG_SMP */ 864 865 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 866 { 867 int i, idle = 1, throttled = 0; 868 const struct cpumask *span; 869 870 span = sched_rt_period_mask(); 871 #ifdef CONFIG_RT_GROUP_SCHED 872 /* 873 * FIXME: isolated CPUs should really leave the root task group, 874 * whether they are isolcpus or were isolated via cpusets, lest 875 * the timer run on a CPU which does not service all runqueues, 876 * potentially leaving other CPUs indefinitely throttled. If 877 * isolation is really required, the user will turn the throttle 878 * off to kill the perturbations it causes anyway. Meanwhile, 879 * this maintains functionality for boot and/or troubleshooting. 880 */ 881 if (rt_b == &root_task_group.rt_bandwidth) 882 span = cpu_online_mask; 883 #endif 884 for_each_cpu(i, span) { 885 int enqueue = 0; 886 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 887 struct rq *rq = rq_of_rt_rq(rt_rq); 888 int skip; 889 890 /* 891 * When span == cpu_online_mask, taking each rq->lock 892 * can be time-consuming. Try to avoid it when possible. 893 */ 894 raw_spin_lock(&rt_rq->rt_runtime_lock); 895 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF) 896 rt_rq->rt_runtime = rt_b->rt_runtime; 897 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running; 898 raw_spin_unlock(&rt_rq->rt_runtime_lock); 899 if (skip) 900 continue; 901 902 raw_spin_rq_lock(rq); 903 update_rq_clock(rq); 904 905 if (rt_rq->rt_time) { 906 u64 runtime; 907 908 raw_spin_lock(&rt_rq->rt_runtime_lock); 909 if (rt_rq->rt_throttled) 910 balance_runtime(rt_rq); 911 runtime = rt_rq->rt_runtime; 912 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); 913 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { 914 rt_rq->rt_throttled = 0; 915 enqueue = 1; 916 917 /* 918 * When we're idle and a woken (rt) task is 919 * throttled check_preempt_curr() will set 920 * skip_update and the time between the wakeup 921 * and this unthrottle will get accounted as 922 * 'runtime'. 923 */ 924 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 925 rq_clock_cancel_skipupdate(rq); 926 } 927 if (rt_rq->rt_time || rt_rq->rt_nr_running) 928 idle = 0; 929 raw_spin_unlock(&rt_rq->rt_runtime_lock); 930 } else if (rt_rq->rt_nr_running) { 931 idle = 0; 932 if (!rt_rq_throttled(rt_rq)) 933 enqueue = 1; 934 } 935 if (rt_rq->rt_throttled) 936 throttled = 1; 937 938 if (enqueue) 939 sched_rt_rq_enqueue(rt_rq); 940 raw_spin_rq_unlock(rq); 941 } 942 943 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) 944 return 1; 945 946 return idle; 947 } 948 949 static inline int rt_se_prio(struct sched_rt_entity *rt_se) 950 { 951 #ifdef CONFIG_RT_GROUP_SCHED 952 struct rt_rq *rt_rq = group_rt_rq(rt_se); 953 954 if (rt_rq) 955 return rt_rq->highest_prio.curr; 956 #endif 957 958 return rt_task_of(rt_se)->prio; 959 } 960 961 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) 962 { 963 u64 runtime = sched_rt_runtime(rt_rq); 964 965 if (rt_rq->rt_throttled) 966 return rt_rq_throttled(rt_rq); 967 968 if (runtime >= sched_rt_period(rt_rq)) 969 return 0; 970 971 balance_runtime(rt_rq); 972 runtime = sched_rt_runtime(rt_rq); 973 if (runtime == RUNTIME_INF) 974 return 0; 975 976 if (rt_rq->rt_time > runtime) { 977 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 978 979 /* 980 * Don't actually throttle groups that have no runtime assigned 981 * but accrue some time due to boosting. 982 */ 983 if (likely(rt_b->rt_runtime)) { 984 rt_rq->rt_throttled = 1; 985 printk_deferred_once("sched: RT throttling activated\n"); 986 } else { 987 /* 988 * In case we did anyway, make it go away, 989 * replenishment is a joke, since it will replenish us 990 * with exactly 0 ns. 991 */ 992 rt_rq->rt_time = 0; 993 } 994 995 if (rt_rq_throttled(rt_rq)) { 996 sched_rt_rq_dequeue(rt_rq); 997 return 1; 998 } 999 } 1000 1001 return 0; 1002 } 1003 1004 /* 1005 * Update the current task's runtime statistics. Skip current tasks that 1006 * are not in our scheduling class. 1007 */ 1008 static void update_curr_rt(struct rq *rq) 1009 { 1010 struct task_struct *curr = rq->curr; 1011 struct sched_rt_entity *rt_se = &curr->rt; 1012 u64 delta_exec; 1013 u64 now; 1014 1015 if (curr->sched_class != &rt_sched_class) 1016 return; 1017 1018 now = rq_clock_task(rq); 1019 delta_exec = now - curr->se.exec_start; 1020 if (unlikely((s64)delta_exec <= 0)) 1021 return; 1022 1023 schedstat_set(curr->stats.exec_max, 1024 max(curr->stats.exec_max, delta_exec)); 1025 1026 trace_sched_stat_runtime(curr, delta_exec, 0); 1027 1028 curr->se.sum_exec_runtime += delta_exec; 1029 account_group_exec_runtime(curr, delta_exec); 1030 1031 curr->se.exec_start = now; 1032 cgroup_account_cputime(curr, delta_exec); 1033 1034 if (!rt_bandwidth_enabled()) 1035 return; 1036 1037 for_each_sched_rt_entity(rt_se) { 1038 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1039 int exceeded; 1040 1041 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 1042 raw_spin_lock(&rt_rq->rt_runtime_lock); 1043 rt_rq->rt_time += delta_exec; 1044 exceeded = sched_rt_runtime_exceeded(rt_rq); 1045 if (exceeded) 1046 resched_curr(rq); 1047 raw_spin_unlock(&rt_rq->rt_runtime_lock); 1048 if (exceeded) 1049 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); 1050 } 1051 } 1052 } 1053 1054 static void 1055 dequeue_top_rt_rq(struct rt_rq *rt_rq) 1056 { 1057 struct rq *rq = rq_of_rt_rq(rt_rq); 1058 1059 BUG_ON(&rq->rt != rt_rq); 1060 1061 if (!rt_rq->rt_queued) 1062 return; 1063 1064 BUG_ON(!rq->nr_running); 1065 1066 sub_nr_running(rq, rt_rq->rt_nr_running); 1067 rt_rq->rt_queued = 0; 1068 1069 } 1070 1071 static void 1072 enqueue_top_rt_rq(struct rt_rq *rt_rq) 1073 { 1074 struct rq *rq = rq_of_rt_rq(rt_rq); 1075 1076 BUG_ON(&rq->rt != rt_rq); 1077 1078 if (rt_rq->rt_queued) 1079 return; 1080 1081 if (rt_rq_throttled(rt_rq)) 1082 return; 1083 1084 if (rt_rq->rt_nr_running) { 1085 add_nr_running(rq, rt_rq->rt_nr_running); 1086 rt_rq->rt_queued = 1; 1087 } 1088 1089 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 1090 cpufreq_update_util(rq, 0); 1091 } 1092 1093 #if defined CONFIG_SMP 1094 1095 static void 1096 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1097 { 1098 struct rq *rq = rq_of_rt_rq(rt_rq); 1099 1100 #ifdef CONFIG_RT_GROUP_SCHED 1101 /* 1102 * Change rq's cpupri only if rt_rq is the top queue. 1103 */ 1104 if (&rq->rt != rt_rq) 1105 return; 1106 #endif 1107 if (rq->online && prio < prev_prio) 1108 cpupri_set(&rq->rd->cpupri, rq->cpu, prio); 1109 } 1110 1111 static void 1112 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) 1113 { 1114 struct rq *rq = rq_of_rt_rq(rt_rq); 1115 1116 #ifdef CONFIG_RT_GROUP_SCHED 1117 /* 1118 * Change rq's cpupri only if rt_rq is the top queue. 1119 */ 1120 if (&rq->rt != rt_rq) 1121 return; 1122 #endif 1123 if (rq->online && rt_rq->highest_prio.curr != prev_prio) 1124 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); 1125 } 1126 1127 #else /* CONFIG_SMP */ 1128 1129 static inline 1130 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1131 static inline 1132 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} 1133 1134 #endif /* CONFIG_SMP */ 1135 1136 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 1137 static void 1138 inc_rt_prio(struct rt_rq *rt_rq, int prio) 1139 { 1140 int prev_prio = rt_rq->highest_prio.curr; 1141 1142 if (prio < prev_prio) 1143 rt_rq->highest_prio.curr = prio; 1144 1145 inc_rt_prio_smp(rt_rq, prio, prev_prio); 1146 } 1147 1148 static void 1149 dec_rt_prio(struct rt_rq *rt_rq, int prio) 1150 { 1151 int prev_prio = rt_rq->highest_prio.curr; 1152 1153 if (rt_rq->rt_nr_running) { 1154 1155 WARN_ON(prio < prev_prio); 1156 1157 /* 1158 * This may have been our highest task, and therefore 1159 * we may have some recomputation to do 1160 */ 1161 if (prio == prev_prio) { 1162 struct rt_prio_array *array = &rt_rq->active; 1163 1164 rt_rq->highest_prio.curr = 1165 sched_find_first_bit(array->bitmap); 1166 } 1167 1168 } else { 1169 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; 1170 } 1171 1172 dec_rt_prio_smp(rt_rq, prio, prev_prio); 1173 } 1174 1175 #else 1176 1177 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} 1178 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} 1179 1180 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ 1181 1182 #ifdef CONFIG_RT_GROUP_SCHED 1183 1184 static void 1185 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1186 { 1187 if (rt_se_boosted(rt_se)) 1188 rt_rq->rt_nr_boosted++; 1189 1190 if (rt_rq->tg) 1191 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 1192 } 1193 1194 static void 1195 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1196 { 1197 if (rt_se_boosted(rt_se)) 1198 rt_rq->rt_nr_boosted--; 1199 1200 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 1201 } 1202 1203 #else /* CONFIG_RT_GROUP_SCHED */ 1204 1205 static void 1206 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1207 { 1208 start_rt_bandwidth(&def_rt_bandwidth); 1209 } 1210 1211 static inline 1212 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} 1213 1214 #endif /* CONFIG_RT_GROUP_SCHED */ 1215 1216 static inline 1217 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) 1218 { 1219 struct rt_rq *group_rq = group_rt_rq(rt_se); 1220 1221 if (group_rq) 1222 return group_rq->rt_nr_running; 1223 else 1224 return 1; 1225 } 1226 1227 static inline 1228 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se) 1229 { 1230 struct rt_rq *group_rq = group_rt_rq(rt_se); 1231 struct task_struct *tsk; 1232 1233 if (group_rq) 1234 return group_rq->rr_nr_running; 1235 1236 tsk = rt_task_of(rt_se); 1237 1238 return (tsk->policy == SCHED_RR) ? 1 : 0; 1239 } 1240 1241 static inline 1242 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1243 { 1244 int prio = rt_se_prio(rt_se); 1245 1246 WARN_ON(!rt_prio(prio)); 1247 rt_rq->rt_nr_running += rt_se_nr_running(rt_se); 1248 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se); 1249 1250 inc_rt_prio(rt_rq, prio); 1251 inc_rt_migration(rt_se, rt_rq); 1252 inc_rt_group(rt_se, rt_rq); 1253 } 1254 1255 static inline 1256 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 1257 { 1258 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 1259 WARN_ON(!rt_rq->rt_nr_running); 1260 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se); 1261 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se); 1262 1263 dec_rt_prio(rt_rq, rt_se_prio(rt_se)); 1264 dec_rt_migration(rt_se, rt_rq); 1265 dec_rt_group(rt_se, rt_rq); 1266 } 1267 1268 /* 1269 * Change rt_se->run_list location unless SAVE && !MOVE 1270 * 1271 * assumes ENQUEUE/DEQUEUE flags match 1272 */ 1273 static inline bool move_entity(unsigned int flags) 1274 { 1275 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE) 1276 return false; 1277 1278 return true; 1279 } 1280 1281 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array) 1282 { 1283 list_del_init(&rt_se->run_list); 1284 1285 if (list_empty(array->queue + rt_se_prio(rt_se))) 1286 __clear_bit(rt_se_prio(rt_se), array->bitmap); 1287 1288 rt_se->on_list = 0; 1289 } 1290 1291 static inline struct sched_statistics * 1292 __schedstats_from_rt_se(struct sched_rt_entity *rt_se) 1293 { 1294 #ifdef CONFIG_RT_GROUP_SCHED 1295 /* schedstats is not supported for rt group. */ 1296 if (!rt_entity_is_task(rt_se)) 1297 return NULL; 1298 #endif 1299 1300 return &rt_task_of(rt_se)->stats; 1301 } 1302 1303 static inline void 1304 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1305 { 1306 struct sched_statistics *stats; 1307 struct task_struct *p = NULL; 1308 1309 if (!schedstat_enabled()) 1310 return; 1311 1312 if (rt_entity_is_task(rt_se)) 1313 p = rt_task_of(rt_se); 1314 1315 stats = __schedstats_from_rt_se(rt_se); 1316 if (!stats) 1317 return; 1318 1319 __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats); 1320 } 1321 1322 static inline void 1323 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1324 { 1325 struct sched_statistics *stats; 1326 struct task_struct *p = NULL; 1327 1328 if (!schedstat_enabled()) 1329 return; 1330 1331 if (rt_entity_is_task(rt_se)) 1332 p = rt_task_of(rt_se); 1333 1334 stats = __schedstats_from_rt_se(rt_se); 1335 if (!stats) 1336 return; 1337 1338 __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats); 1339 } 1340 1341 static inline void 1342 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 1343 int flags) 1344 { 1345 if (!schedstat_enabled()) 1346 return; 1347 1348 if (flags & ENQUEUE_WAKEUP) 1349 update_stats_enqueue_sleeper_rt(rt_rq, rt_se); 1350 } 1351 1352 static inline void 1353 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) 1354 { 1355 struct sched_statistics *stats; 1356 struct task_struct *p = NULL; 1357 1358 if (!schedstat_enabled()) 1359 return; 1360 1361 if (rt_entity_is_task(rt_se)) 1362 p = rt_task_of(rt_se); 1363 1364 stats = __schedstats_from_rt_se(rt_se); 1365 if (!stats) 1366 return; 1367 1368 __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats); 1369 } 1370 1371 static inline void 1372 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, 1373 int flags) 1374 { 1375 struct task_struct *p = NULL; 1376 1377 if (!schedstat_enabled()) 1378 return; 1379 1380 if (rt_entity_is_task(rt_se)) 1381 p = rt_task_of(rt_se); 1382 1383 if ((flags & DEQUEUE_SLEEP) && p) { 1384 unsigned int state; 1385 1386 state = READ_ONCE(p->__state); 1387 if (state & TASK_INTERRUPTIBLE) 1388 __schedstat_set(p->stats.sleep_start, 1389 rq_clock(rq_of_rt_rq(rt_rq))); 1390 1391 if (state & TASK_UNINTERRUPTIBLE) 1392 __schedstat_set(p->stats.block_start, 1393 rq_clock(rq_of_rt_rq(rt_rq))); 1394 } 1395 } 1396 1397 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1398 { 1399 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1400 struct rt_prio_array *array = &rt_rq->active; 1401 struct rt_rq *group_rq = group_rt_rq(rt_se); 1402 struct list_head *queue = array->queue + rt_se_prio(rt_se); 1403 1404 /* 1405 * Don't enqueue the group if its throttled, or when empty. 1406 * The latter is a consequence of the former when a child group 1407 * get throttled and the current group doesn't have any other 1408 * active members. 1409 */ 1410 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) { 1411 if (rt_se->on_list) 1412 __delist_rt_entity(rt_se, array); 1413 return; 1414 } 1415 1416 if (move_entity(flags)) { 1417 WARN_ON_ONCE(rt_se->on_list); 1418 if (flags & ENQUEUE_HEAD) 1419 list_add(&rt_se->run_list, queue); 1420 else 1421 list_add_tail(&rt_se->run_list, queue); 1422 1423 __set_bit(rt_se_prio(rt_se), array->bitmap); 1424 rt_se->on_list = 1; 1425 } 1426 rt_se->on_rq = 1; 1427 1428 inc_rt_tasks(rt_se, rt_rq); 1429 } 1430 1431 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1432 { 1433 struct rt_rq *rt_rq = rt_rq_of_se(rt_se); 1434 struct rt_prio_array *array = &rt_rq->active; 1435 1436 if (move_entity(flags)) { 1437 WARN_ON_ONCE(!rt_se->on_list); 1438 __delist_rt_entity(rt_se, array); 1439 } 1440 rt_se->on_rq = 0; 1441 1442 dec_rt_tasks(rt_se, rt_rq); 1443 } 1444 1445 /* 1446 * Because the prio of an upper entry depends on the lower 1447 * entries, we must remove entries top - down. 1448 */ 1449 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags) 1450 { 1451 struct sched_rt_entity *back = NULL; 1452 1453 for_each_sched_rt_entity(rt_se) { 1454 rt_se->back = back; 1455 back = rt_se; 1456 } 1457 1458 dequeue_top_rt_rq(rt_rq_of_se(back)); 1459 1460 for (rt_se = back; rt_se; rt_se = rt_se->back) { 1461 if (on_rt_rq(rt_se)) 1462 __dequeue_rt_entity(rt_se, flags); 1463 } 1464 } 1465 1466 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1467 { 1468 struct rq *rq = rq_of_rt_se(rt_se); 1469 1470 update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags); 1471 1472 dequeue_rt_stack(rt_se, flags); 1473 for_each_sched_rt_entity(rt_se) 1474 __enqueue_rt_entity(rt_se, flags); 1475 enqueue_top_rt_rq(&rq->rt); 1476 } 1477 1478 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags) 1479 { 1480 struct rq *rq = rq_of_rt_se(rt_se); 1481 1482 update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags); 1483 1484 dequeue_rt_stack(rt_se, flags); 1485 1486 for_each_sched_rt_entity(rt_se) { 1487 struct rt_rq *rt_rq = group_rt_rq(rt_se); 1488 1489 if (rt_rq && rt_rq->rt_nr_running) 1490 __enqueue_rt_entity(rt_se, flags); 1491 } 1492 enqueue_top_rt_rq(&rq->rt); 1493 } 1494 1495 /* 1496 * Adding/removing a task to/from a priority array: 1497 */ 1498 static void 1499 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1500 { 1501 struct sched_rt_entity *rt_se = &p->rt; 1502 1503 if (flags & ENQUEUE_WAKEUP) 1504 rt_se->timeout = 0; 1505 1506 check_schedstat_required(); 1507 update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se); 1508 1509 enqueue_rt_entity(rt_se, flags); 1510 1511 if (!task_current(rq, p) && p->nr_cpus_allowed > 1) 1512 enqueue_pushable_task(rq, p); 1513 } 1514 1515 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) 1516 { 1517 struct sched_rt_entity *rt_se = &p->rt; 1518 1519 update_curr_rt(rq); 1520 dequeue_rt_entity(rt_se, flags); 1521 1522 dequeue_pushable_task(rq, p); 1523 } 1524 1525 /* 1526 * Put task to the head or the end of the run list without the overhead of 1527 * dequeue followed by enqueue. 1528 */ 1529 static void 1530 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) 1531 { 1532 if (on_rt_rq(rt_se)) { 1533 struct rt_prio_array *array = &rt_rq->active; 1534 struct list_head *queue = array->queue + rt_se_prio(rt_se); 1535 1536 if (head) 1537 list_move(&rt_se->run_list, queue); 1538 else 1539 list_move_tail(&rt_se->run_list, queue); 1540 } 1541 } 1542 1543 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) 1544 { 1545 struct sched_rt_entity *rt_se = &p->rt; 1546 struct rt_rq *rt_rq; 1547 1548 for_each_sched_rt_entity(rt_se) { 1549 rt_rq = rt_rq_of_se(rt_se); 1550 requeue_rt_entity(rt_rq, rt_se, head); 1551 } 1552 } 1553 1554 static void yield_task_rt(struct rq *rq) 1555 { 1556 requeue_task_rt(rq, rq->curr, 0); 1557 } 1558 1559 #ifdef CONFIG_SMP 1560 static int find_lowest_rq(struct task_struct *task); 1561 1562 static int 1563 select_task_rq_rt(struct task_struct *p, int cpu, int flags) 1564 { 1565 struct task_struct *curr; 1566 struct rq *rq; 1567 bool test; 1568 1569 /* For anything but wake ups, just return the task_cpu */ 1570 if (!(flags & (WF_TTWU | WF_FORK))) 1571 goto out; 1572 1573 rq = cpu_rq(cpu); 1574 1575 rcu_read_lock(); 1576 curr = READ_ONCE(rq->curr); /* unlocked access */ 1577 1578 /* 1579 * If the current task on @p's runqueue is an RT task, then 1580 * try to see if we can wake this RT task up on another 1581 * runqueue. Otherwise simply start this RT task 1582 * on its current runqueue. 1583 * 1584 * We want to avoid overloading runqueues. If the woken 1585 * task is a higher priority, then it will stay on this CPU 1586 * and the lower prio task should be moved to another CPU. 1587 * Even though this will probably make the lower prio task 1588 * lose its cache, we do not want to bounce a higher task 1589 * around just because it gave up its CPU, perhaps for a 1590 * lock? 1591 * 1592 * For equal prio tasks, we just let the scheduler sort it out. 1593 * 1594 * Otherwise, just let it ride on the affined RQ and the 1595 * post-schedule router will push the preempted task away 1596 * 1597 * This test is optimistic, if we get it wrong the load-balancer 1598 * will have to sort it out. 1599 * 1600 * We take into account the capacity of the CPU to ensure it fits the 1601 * requirement of the task - which is only important on heterogeneous 1602 * systems like big.LITTLE. 1603 */ 1604 test = curr && 1605 unlikely(rt_task(curr)) && 1606 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio); 1607 1608 if (test || !rt_task_fits_capacity(p, cpu)) { 1609 int target = find_lowest_rq(p); 1610 1611 /* 1612 * Bail out if we were forcing a migration to find a better 1613 * fitting CPU but our search failed. 1614 */ 1615 if (!test && target != -1 && !rt_task_fits_capacity(p, target)) 1616 goto out_unlock; 1617 1618 /* 1619 * Don't bother moving it if the destination CPU is 1620 * not running a lower priority task. 1621 */ 1622 if (target != -1 && 1623 p->prio < cpu_rq(target)->rt.highest_prio.curr) 1624 cpu = target; 1625 } 1626 1627 out_unlock: 1628 rcu_read_unlock(); 1629 1630 out: 1631 return cpu; 1632 } 1633 1634 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1635 { 1636 /* 1637 * Current can't be migrated, useless to reschedule, 1638 * let's hope p can move out. 1639 */ 1640 if (rq->curr->nr_cpus_allowed == 1 || 1641 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) 1642 return; 1643 1644 /* 1645 * p is migratable, so let's not schedule it and 1646 * see if it is pushed or pulled somewhere else. 1647 */ 1648 if (p->nr_cpus_allowed != 1 && 1649 cpupri_find(&rq->rd->cpupri, p, NULL)) 1650 return; 1651 1652 /* 1653 * There appear to be other CPUs that can accept 1654 * the current task but none can run 'p', so lets reschedule 1655 * to try and push the current task away: 1656 */ 1657 requeue_task_rt(rq, p, 1); 1658 resched_curr(rq); 1659 } 1660 1661 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1662 { 1663 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { 1664 /* 1665 * This is OK, because current is on_cpu, which avoids it being 1666 * picked for load-balance and preemption/IRQs are still 1667 * disabled avoiding further scheduler activity on it and we've 1668 * not yet started the picking loop. 1669 */ 1670 rq_unpin_lock(rq, rf); 1671 pull_rt_task(rq); 1672 rq_repin_lock(rq, rf); 1673 } 1674 1675 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq); 1676 } 1677 #endif /* CONFIG_SMP */ 1678 1679 /* 1680 * Preempt the current task with a newly woken task if needed: 1681 */ 1682 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) 1683 { 1684 if (p->prio < rq->curr->prio) { 1685 resched_curr(rq); 1686 return; 1687 } 1688 1689 #ifdef CONFIG_SMP 1690 /* 1691 * If: 1692 * 1693 * - the newly woken task is of equal priority to the current task 1694 * - the newly woken task is non-migratable while current is migratable 1695 * - current will be preempted on the next reschedule 1696 * 1697 * we should check to see if current can readily move to a different 1698 * cpu. If so, we will reschedule to allow the push logic to try 1699 * to move current somewhere else, making room for our non-migratable 1700 * task. 1701 */ 1702 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) 1703 check_preempt_equal_prio(rq, p); 1704 #endif 1705 } 1706 1707 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first) 1708 { 1709 struct sched_rt_entity *rt_se = &p->rt; 1710 struct rt_rq *rt_rq = &rq->rt; 1711 1712 p->se.exec_start = rq_clock_task(rq); 1713 if (on_rt_rq(&p->rt)) 1714 update_stats_wait_end_rt(rt_rq, rt_se); 1715 1716 /* The running task is never eligible for pushing */ 1717 dequeue_pushable_task(rq, p); 1718 1719 if (!first) 1720 return; 1721 1722 /* 1723 * If prev task was rt, put_prev_task() has already updated the 1724 * utilization. We only care of the case where we start to schedule a 1725 * rt task 1726 */ 1727 if (rq->curr->sched_class != &rt_sched_class) 1728 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 1729 1730 rt_queue_push_tasks(rq); 1731 } 1732 1733 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, 1734 struct rt_rq *rt_rq) 1735 { 1736 struct rt_prio_array *array = &rt_rq->active; 1737 struct sched_rt_entity *next = NULL; 1738 struct list_head *queue; 1739 int idx; 1740 1741 idx = sched_find_first_bit(array->bitmap); 1742 BUG_ON(idx >= MAX_RT_PRIO); 1743 1744 queue = array->queue + idx; 1745 next = list_entry(queue->next, struct sched_rt_entity, run_list); 1746 1747 return next; 1748 } 1749 1750 static struct task_struct *_pick_next_task_rt(struct rq *rq) 1751 { 1752 struct sched_rt_entity *rt_se; 1753 struct rt_rq *rt_rq = &rq->rt; 1754 1755 do { 1756 rt_se = pick_next_rt_entity(rq, rt_rq); 1757 BUG_ON(!rt_se); 1758 rt_rq = group_rt_rq(rt_se); 1759 } while (rt_rq); 1760 1761 return rt_task_of(rt_se); 1762 } 1763 1764 static struct task_struct *pick_task_rt(struct rq *rq) 1765 { 1766 struct task_struct *p; 1767 1768 if (!sched_rt_runnable(rq)) 1769 return NULL; 1770 1771 p = _pick_next_task_rt(rq); 1772 1773 return p; 1774 } 1775 1776 static struct task_struct *pick_next_task_rt(struct rq *rq) 1777 { 1778 struct task_struct *p = pick_task_rt(rq); 1779 1780 if (p) 1781 set_next_task_rt(rq, p, true); 1782 1783 return p; 1784 } 1785 1786 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) 1787 { 1788 struct sched_rt_entity *rt_se = &p->rt; 1789 struct rt_rq *rt_rq = &rq->rt; 1790 1791 if (on_rt_rq(&p->rt)) 1792 update_stats_wait_start_rt(rt_rq, rt_se); 1793 1794 update_curr_rt(rq); 1795 1796 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1797 1798 /* 1799 * The previous task needs to be made eligible for pushing 1800 * if it is still active 1801 */ 1802 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1803 enqueue_pushable_task(rq, p); 1804 } 1805 1806 #ifdef CONFIG_SMP 1807 1808 /* Only try algorithms three times */ 1809 #define RT_MAX_TRIES 3 1810 1811 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 1812 { 1813 if (!task_running(rq, p) && 1814 cpumask_test_cpu(cpu, &p->cpus_mask)) 1815 return 1; 1816 1817 return 0; 1818 } 1819 1820 /* 1821 * Return the highest pushable rq's task, which is suitable to be executed 1822 * on the CPU, NULL otherwise 1823 */ 1824 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu) 1825 { 1826 struct plist_head *head = &rq->rt.pushable_tasks; 1827 struct task_struct *p; 1828 1829 if (!has_pushable_tasks(rq)) 1830 return NULL; 1831 1832 plist_for_each_entry(p, head, pushable_tasks) { 1833 if (pick_rt_task(rq, p, cpu)) 1834 return p; 1835 } 1836 1837 return NULL; 1838 } 1839 1840 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1841 1842 static int find_lowest_rq(struct task_struct *task) 1843 { 1844 struct sched_domain *sd; 1845 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask); 1846 int this_cpu = smp_processor_id(); 1847 int cpu = task_cpu(task); 1848 int ret; 1849 1850 /* Make sure the mask is initialized first */ 1851 if (unlikely(!lowest_mask)) 1852 return -1; 1853 1854 if (task->nr_cpus_allowed == 1) 1855 return -1; /* No other targets possible */ 1856 1857 /* 1858 * If we're on asym system ensure we consider the different capacities 1859 * of the CPUs when searching for the lowest_mask. 1860 */ 1861 if (static_branch_unlikely(&sched_asym_cpucapacity)) { 1862 1863 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri, 1864 task, lowest_mask, 1865 rt_task_fits_capacity); 1866 } else { 1867 1868 ret = cpupri_find(&task_rq(task)->rd->cpupri, 1869 task, lowest_mask); 1870 } 1871 1872 if (!ret) 1873 return -1; /* No targets found */ 1874 1875 /* 1876 * At this point we have built a mask of CPUs representing the 1877 * lowest priority tasks in the system. Now we want to elect 1878 * the best one based on our affinity and topology. 1879 * 1880 * We prioritize the last CPU that the task executed on since 1881 * it is most likely cache-hot in that location. 1882 */ 1883 if (cpumask_test_cpu(cpu, lowest_mask)) 1884 return cpu; 1885 1886 /* 1887 * Otherwise, we consult the sched_domains span maps to figure 1888 * out which CPU is logically closest to our hot cache data. 1889 */ 1890 if (!cpumask_test_cpu(this_cpu, lowest_mask)) 1891 this_cpu = -1; /* Skip this_cpu opt if not among lowest */ 1892 1893 rcu_read_lock(); 1894 for_each_domain(cpu, sd) { 1895 if (sd->flags & SD_WAKE_AFFINE) { 1896 int best_cpu; 1897 1898 /* 1899 * "this_cpu" is cheaper to preempt than a 1900 * remote processor. 1901 */ 1902 if (this_cpu != -1 && 1903 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { 1904 rcu_read_unlock(); 1905 return this_cpu; 1906 } 1907 1908 best_cpu = cpumask_any_and_distribute(lowest_mask, 1909 sched_domain_span(sd)); 1910 if (best_cpu < nr_cpu_ids) { 1911 rcu_read_unlock(); 1912 return best_cpu; 1913 } 1914 } 1915 } 1916 rcu_read_unlock(); 1917 1918 /* 1919 * And finally, if there were no matches within the domains 1920 * just give the caller *something* to work with from the compatible 1921 * locations. 1922 */ 1923 if (this_cpu != -1) 1924 return this_cpu; 1925 1926 cpu = cpumask_any_distribute(lowest_mask); 1927 if (cpu < nr_cpu_ids) 1928 return cpu; 1929 1930 return -1; 1931 } 1932 1933 /* Will lock the rq it finds */ 1934 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) 1935 { 1936 struct rq *lowest_rq = NULL; 1937 int tries; 1938 int cpu; 1939 1940 for (tries = 0; tries < RT_MAX_TRIES; tries++) { 1941 cpu = find_lowest_rq(task); 1942 1943 if ((cpu == -1) || (cpu == rq->cpu)) 1944 break; 1945 1946 lowest_rq = cpu_rq(cpu); 1947 1948 if (lowest_rq->rt.highest_prio.curr <= task->prio) { 1949 /* 1950 * Target rq has tasks of equal or higher priority, 1951 * retrying does not release any lock and is unlikely 1952 * to yield a different result. 1953 */ 1954 lowest_rq = NULL; 1955 break; 1956 } 1957 1958 /* if the prio of this runqueue changed, try again */ 1959 if (double_lock_balance(rq, lowest_rq)) { 1960 /* 1961 * We had to unlock the run queue. In 1962 * the mean time, task could have 1963 * migrated already or had its affinity changed. 1964 * Also make sure that it wasn't scheduled on its rq. 1965 */ 1966 if (unlikely(task_rq(task) != rq || 1967 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || 1968 task_running(rq, task) || 1969 !rt_task(task) || 1970 !task_on_rq_queued(task))) { 1971 1972 double_unlock_balance(rq, lowest_rq); 1973 lowest_rq = NULL; 1974 break; 1975 } 1976 } 1977 1978 /* If this rq is still suitable use it. */ 1979 if (lowest_rq->rt.highest_prio.curr > task->prio) 1980 break; 1981 1982 /* try again */ 1983 double_unlock_balance(rq, lowest_rq); 1984 lowest_rq = NULL; 1985 } 1986 1987 return lowest_rq; 1988 } 1989 1990 static struct task_struct *pick_next_pushable_task(struct rq *rq) 1991 { 1992 struct task_struct *p; 1993 1994 if (!has_pushable_tasks(rq)) 1995 return NULL; 1996 1997 p = plist_first_entry(&rq->rt.pushable_tasks, 1998 struct task_struct, pushable_tasks); 1999 2000 BUG_ON(rq->cpu != task_cpu(p)); 2001 BUG_ON(task_current(rq, p)); 2002 BUG_ON(p->nr_cpus_allowed <= 1); 2003 2004 BUG_ON(!task_on_rq_queued(p)); 2005 BUG_ON(!rt_task(p)); 2006 2007 return p; 2008 } 2009 2010 /* 2011 * If the current CPU has more than one RT task, see if the non 2012 * running task can migrate over to a CPU that is running a task 2013 * of lesser priority. 2014 */ 2015 static int push_rt_task(struct rq *rq, bool pull) 2016 { 2017 struct task_struct *next_task; 2018 struct rq *lowest_rq; 2019 int ret = 0; 2020 2021 if (!rq->rt.overloaded) 2022 return 0; 2023 2024 next_task = pick_next_pushable_task(rq); 2025 if (!next_task) 2026 return 0; 2027 2028 retry: 2029 if (is_migration_disabled(next_task)) { 2030 struct task_struct *push_task = NULL; 2031 int cpu; 2032 2033 if (!pull || rq->push_busy) 2034 return 0; 2035 2036 cpu = find_lowest_rq(rq->curr); 2037 if (cpu == -1 || cpu == rq->cpu) 2038 return 0; 2039 2040 /* 2041 * Given we found a CPU with lower priority than @next_task, 2042 * therefore it should be running. However we cannot migrate it 2043 * to this other CPU, instead attempt to push the current 2044 * running task on this CPU away. 2045 */ 2046 push_task = get_push_task(rq); 2047 if (push_task) { 2048 raw_spin_rq_unlock(rq); 2049 stop_one_cpu_nowait(rq->cpu, push_cpu_stop, 2050 push_task, &rq->push_work); 2051 raw_spin_rq_lock(rq); 2052 } 2053 2054 return 0; 2055 } 2056 2057 if (WARN_ON(next_task == rq->curr)) 2058 return 0; 2059 2060 /* 2061 * It's possible that the next_task slipped in of 2062 * higher priority than current. If that's the case 2063 * just reschedule current. 2064 */ 2065 if (unlikely(next_task->prio < rq->curr->prio)) { 2066 resched_curr(rq); 2067 return 0; 2068 } 2069 2070 /* We might release rq lock */ 2071 get_task_struct(next_task); 2072 2073 /* find_lock_lowest_rq locks the rq if found */ 2074 lowest_rq = find_lock_lowest_rq(next_task, rq); 2075 if (!lowest_rq) { 2076 struct task_struct *task; 2077 /* 2078 * find_lock_lowest_rq releases rq->lock 2079 * so it is possible that next_task has migrated. 2080 * 2081 * We need to make sure that the task is still on the same 2082 * run-queue and is also still the next task eligible for 2083 * pushing. 2084 */ 2085 task = pick_next_pushable_task(rq); 2086 if (task == next_task) { 2087 /* 2088 * The task hasn't migrated, and is still the next 2089 * eligible task, but we failed to find a run-queue 2090 * to push it to. Do not retry in this case, since 2091 * other CPUs will pull from us when ready. 2092 */ 2093 goto out; 2094 } 2095 2096 if (!task) 2097 /* No more tasks, just exit */ 2098 goto out; 2099 2100 /* 2101 * Something has shifted, try again. 2102 */ 2103 put_task_struct(next_task); 2104 next_task = task; 2105 goto retry; 2106 } 2107 2108 deactivate_task(rq, next_task, 0); 2109 set_task_cpu(next_task, lowest_rq->cpu); 2110 activate_task(lowest_rq, next_task, 0); 2111 resched_curr(lowest_rq); 2112 ret = 1; 2113 2114 double_unlock_balance(rq, lowest_rq); 2115 out: 2116 put_task_struct(next_task); 2117 2118 return ret; 2119 } 2120 2121 static void push_rt_tasks(struct rq *rq) 2122 { 2123 /* push_rt_task will return true if it moved an RT */ 2124 while (push_rt_task(rq, false)) 2125 ; 2126 } 2127 2128 #ifdef HAVE_RT_PUSH_IPI 2129 2130 /* 2131 * When a high priority task schedules out from a CPU and a lower priority 2132 * task is scheduled in, a check is made to see if there's any RT tasks 2133 * on other CPUs that are waiting to run because a higher priority RT task 2134 * is currently running on its CPU. In this case, the CPU with multiple RT 2135 * tasks queued on it (overloaded) needs to be notified that a CPU has opened 2136 * up that may be able to run one of its non-running queued RT tasks. 2137 * 2138 * All CPUs with overloaded RT tasks need to be notified as there is currently 2139 * no way to know which of these CPUs have the highest priority task waiting 2140 * to run. Instead of trying to take a spinlock on each of these CPUs, 2141 * which has shown to cause large latency when done on machines with many 2142 * CPUs, sending an IPI to the CPUs to have them push off the overloaded 2143 * RT tasks waiting to run. 2144 * 2145 * Just sending an IPI to each of the CPUs is also an issue, as on large 2146 * count CPU machines, this can cause an IPI storm on a CPU, especially 2147 * if its the only CPU with multiple RT tasks queued, and a large number 2148 * of CPUs scheduling a lower priority task at the same time. 2149 * 2150 * Each root domain has its own irq work function that can iterate over 2151 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT 2152 * task must be checked if there's one or many CPUs that are lowering 2153 * their priority, there's a single irq work iterator that will try to 2154 * push off RT tasks that are waiting to run. 2155 * 2156 * When a CPU schedules a lower priority task, it will kick off the 2157 * irq work iterator that will jump to each CPU with overloaded RT tasks. 2158 * As it only takes the first CPU that schedules a lower priority task 2159 * to start the process, the rto_start variable is incremented and if 2160 * the atomic result is one, then that CPU will try to take the rto_lock. 2161 * This prevents high contention on the lock as the process handles all 2162 * CPUs scheduling lower priority tasks. 2163 * 2164 * All CPUs that are scheduling a lower priority task will increment the 2165 * rt_loop_next variable. This will make sure that the irq work iterator 2166 * checks all RT overloaded CPUs whenever a CPU schedules a new lower 2167 * priority task, even if the iterator is in the middle of a scan. Incrementing 2168 * the rt_loop_next will cause the iterator to perform another scan. 2169 * 2170 */ 2171 static int rto_next_cpu(struct root_domain *rd) 2172 { 2173 int next; 2174 int cpu; 2175 2176 /* 2177 * When starting the IPI RT pushing, the rto_cpu is set to -1, 2178 * rt_next_cpu() will simply return the first CPU found in 2179 * the rto_mask. 2180 * 2181 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it 2182 * will return the next CPU found in the rto_mask. 2183 * 2184 * If there are no more CPUs left in the rto_mask, then a check is made 2185 * against rto_loop and rto_loop_next. rto_loop is only updated with 2186 * the rto_lock held, but any CPU may increment the rto_loop_next 2187 * without any locking. 2188 */ 2189 for (;;) { 2190 2191 /* When rto_cpu is -1 this acts like cpumask_first() */ 2192 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask); 2193 2194 rd->rto_cpu = cpu; 2195 2196 if (cpu < nr_cpu_ids) 2197 return cpu; 2198 2199 rd->rto_cpu = -1; 2200 2201 /* 2202 * ACQUIRE ensures we see the @rto_mask changes 2203 * made prior to the @next value observed. 2204 * 2205 * Matches WMB in rt_set_overload(). 2206 */ 2207 next = atomic_read_acquire(&rd->rto_loop_next); 2208 2209 if (rd->rto_loop == next) 2210 break; 2211 2212 rd->rto_loop = next; 2213 } 2214 2215 return -1; 2216 } 2217 2218 static inline bool rto_start_trylock(atomic_t *v) 2219 { 2220 return !atomic_cmpxchg_acquire(v, 0, 1); 2221 } 2222 2223 static inline void rto_start_unlock(atomic_t *v) 2224 { 2225 atomic_set_release(v, 0); 2226 } 2227 2228 static void tell_cpu_to_push(struct rq *rq) 2229 { 2230 int cpu = -1; 2231 2232 /* Keep the loop going if the IPI is currently active */ 2233 atomic_inc(&rq->rd->rto_loop_next); 2234 2235 /* Only one CPU can initiate a loop at a time */ 2236 if (!rto_start_trylock(&rq->rd->rto_loop_start)) 2237 return; 2238 2239 raw_spin_lock(&rq->rd->rto_lock); 2240 2241 /* 2242 * The rto_cpu is updated under the lock, if it has a valid CPU 2243 * then the IPI is still running and will continue due to the 2244 * update to loop_next, and nothing needs to be done here. 2245 * Otherwise it is finishing up and an ipi needs to be sent. 2246 */ 2247 if (rq->rd->rto_cpu < 0) 2248 cpu = rto_next_cpu(rq->rd); 2249 2250 raw_spin_unlock(&rq->rd->rto_lock); 2251 2252 rto_start_unlock(&rq->rd->rto_loop_start); 2253 2254 if (cpu >= 0) { 2255 /* Make sure the rd does not get freed while pushing */ 2256 sched_get_rd(rq->rd); 2257 irq_work_queue_on(&rq->rd->rto_push_work, cpu); 2258 } 2259 } 2260 2261 /* Called from hardirq context */ 2262 void rto_push_irq_work_func(struct irq_work *work) 2263 { 2264 struct root_domain *rd = 2265 container_of(work, struct root_domain, rto_push_work); 2266 struct rq *rq; 2267 int cpu; 2268 2269 rq = this_rq(); 2270 2271 /* 2272 * We do not need to grab the lock to check for has_pushable_tasks. 2273 * When it gets updated, a check is made if a push is possible. 2274 */ 2275 if (has_pushable_tasks(rq)) { 2276 raw_spin_rq_lock(rq); 2277 while (push_rt_task(rq, true)) 2278 ; 2279 raw_spin_rq_unlock(rq); 2280 } 2281 2282 raw_spin_lock(&rd->rto_lock); 2283 2284 /* Pass the IPI to the next rt overloaded queue */ 2285 cpu = rto_next_cpu(rd); 2286 2287 raw_spin_unlock(&rd->rto_lock); 2288 2289 if (cpu < 0) { 2290 sched_put_rd(rd); 2291 return; 2292 } 2293 2294 /* Try the next RT overloaded CPU */ 2295 irq_work_queue_on(&rd->rto_push_work, cpu); 2296 } 2297 #endif /* HAVE_RT_PUSH_IPI */ 2298 2299 static void pull_rt_task(struct rq *this_rq) 2300 { 2301 int this_cpu = this_rq->cpu, cpu; 2302 bool resched = false; 2303 struct task_struct *p, *push_task; 2304 struct rq *src_rq; 2305 int rt_overload_count = rt_overloaded(this_rq); 2306 2307 if (likely(!rt_overload_count)) 2308 return; 2309 2310 /* 2311 * Match the barrier from rt_set_overloaded; this guarantees that if we 2312 * see overloaded we must also see the rto_mask bit. 2313 */ 2314 smp_rmb(); 2315 2316 /* If we are the only overloaded CPU do nothing */ 2317 if (rt_overload_count == 1 && 2318 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) 2319 return; 2320 2321 #ifdef HAVE_RT_PUSH_IPI 2322 if (sched_feat(RT_PUSH_IPI)) { 2323 tell_cpu_to_push(this_rq); 2324 return; 2325 } 2326 #endif 2327 2328 for_each_cpu(cpu, this_rq->rd->rto_mask) { 2329 if (this_cpu == cpu) 2330 continue; 2331 2332 src_rq = cpu_rq(cpu); 2333 2334 /* 2335 * Don't bother taking the src_rq->lock if the next highest 2336 * task is known to be lower-priority than our current task. 2337 * This may look racy, but if this value is about to go 2338 * logically higher, the src_rq will push this task away. 2339 * And if its going logically lower, we do not care 2340 */ 2341 if (src_rq->rt.highest_prio.next >= 2342 this_rq->rt.highest_prio.curr) 2343 continue; 2344 2345 /* 2346 * We can potentially drop this_rq's lock in 2347 * double_lock_balance, and another CPU could 2348 * alter this_rq 2349 */ 2350 push_task = NULL; 2351 double_lock_balance(this_rq, src_rq); 2352 2353 /* 2354 * We can pull only a task, which is pushable 2355 * on its rq, and no others. 2356 */ 2357 p = pick_highest_pushable_task(src_rq, this_cpu); 2358 2359 /* 2360 * Do we have an RT task that preempts 2361 * the to-be-scheduled task? 2362 */ 2363 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { 2364 WARN_ON(p == src_rq->curr); 2365 WARN_ON(!task_on_rq_queued(p)); 2366 2367 /* 2368 * There's a chance that p is higher in priority 2369 * than what's currently running on its CPU. 2370 * This is just that p is waking up and hasn't 2371 * had a chance to schedule. We only pull 2372 * p if it is lower in priority than the 2373 * current task on the run queue 2374 */ 2375 if (p->prio < src_rq->curr->prio) 2376 goto skip; 2377 2378 if (is_migration_disabled(p)) { 2379 push_task = get_push_task(src_rq); 2380 } else { 2381 deactivate_task(src_rq, p, 0); 2382 set_task_cpu(p, this_cpu); 2383 activate_task(this_rq, p, 0); 2384 resched = true; 2385 } 2386 /* 2387 * We continue with the search, just in 2388 * case there's an even higher prio task 2389 * in another runqueue. (low likelihood 2390 * but possible) 2391 */ 2392 } 2393 skip: 2394 double_unlock_balance(this_rq, src_rq); 2395 2396 if (push_task) { 2397 raw_spin_rq_unlock(this_rq); 2398 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, 2399 push_task, &src_rq->push_work); 2400 raw_spin_rq_lock(this_rq); 2401 } 2402 } 2403 2404 if (resched) 2405 resched_curr(this_rq); 2406 } 2407 2408 /* 2409 * If we are not running and we are not going to reschedule soon, we should 2410 * try to push tasks away now 2411 */ 2412 static void task_woken_rt(struct rq *rq, struct task_struct *p) 2413 { 2414 bool need_to_push = !task_running(rq, p) && 2415 !test_tsk_need_resched(rq->curr) && 2416 p->nr_cpus_allowed > 1 && 2417 (dl_task(rq->curr) || rt_task(rq->curr)) && 2418 (rq->curr->nr_cpus_allowed < 2 || 2419 rq->curr->prio <= p->prio); 2420 2421 if (need_to_push) 2422 push_rt_tasks(rq); 2423 } 2424 2425 /* Assumes rq->lock is held */ 2426 static void rq_online_rt(struct rq *rq) 2427 { 2428 if (rq->rt.overloaded) 2429 rt_set_overload(rq); 2430 2431 __enable_runtime(rq); 2432 2433 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); 2434 } 2435 2436 /* Assumes rq->lock is held */ 2437 static void rq_offline_rt(struct rq *rq) 2438 { 2439 if (rq->rt.overloaded) 2440 rt_clear_overload(rq); 2441 2442 __disable_runtime(rq); 2443 2444 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); 2445 } 2446 2447 /* 2448 * When switch from the rt queue, we bring ourselves to a position 2449 * that we might want to pull RT tasks from other runqueues. 2450 */ 2451 static void switched_from_rt(struct rq *rq, struct task_struct *p) 2452 { 2453 /* 2454 * If there are other RT tasks then we will reschedule 2455 * and the scheduling of the other RT tasks will handle 2456 * the balancing. But if we are the last RT task 2457 * we may need to handle the pulling of RT tasks 2458 * now. 2459 */ 2460 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 2461 return; 2462 2463 rt_queue_pull_task(rq); 2464 } 2465 2466 void __init init_sched_rt_class(void) 2467 { 2468 unsigned int i; 2469 2470 for_each_possible_cpu(i) { 2471 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), 2472 GFP_KERNEL, cpu_to_node(i)); 2473 } 2474 } 2475 #endif /* CONFIG_SMP */ 2476 2477 /* 2478 * When switching a task to RT, we may overload the runqueue 2479 * with RT tasks. In this case we try to push them off to 2480 * other runqueues. 2481 */ 2482 static void switched_to_rt(struct rq *rq, struct task_struct *p) 2483 { 2484 /* 2485 * If we are running, update the avg_rt tracking, as the running time 2486 * will now on be accounted into the latter. 2487 */ 2488 if (task_current(rq, p)) { 2489 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0); 2490 return; 2491 } 2492 2493 /* 2494 * If we are not running we may need to preempt the current 2495 * running task. If that current running task is also an RT task 2496 * then see if we can move to another run queue. 2497 */ 2498 if (task_on_rq_queued(p)) { 2499 #ifdef CONFIG_SMP 2500 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) 2501 rt_queue_push_tasks(rq); 2502 #endif /* CONFIG_SMP */ 2503 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) 2504 resched_curr(rq); 2505 } 2506 } 2507 2508 /* 2509 * Priority of the task has changed. This may cause 2510 * us to initiate a push or pull. 2511 */ 2512 static void 2513 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) 2514 { 2515 if (!task_on_rq_queued(p)) 2516 return; 2517 2518 if (task_current(rq, p)) { 2519 #ifdef CONFIG_SMP 2520 /* 2521 * If our priority decreases while running, we 2522 * may need to pull tasks to this runqueue. 2523 */ 2524 if (oldprio < p->prio) 2525 rt_queue_pull_task(rq); 2526 2527 /* 2528 * If there's a higher priority task waiting to run 2529 * then reschedule. 2530 */ 2531 if (p->prio > rq->rt.highest_prio.curr) 2532 resched_curr(rq); 2533 #else 2534 /* For UP simply resched on drop of prio */ 2535 if (oldprio < p->prio) 2536 resched_curr(rq); 2537 #endif /* CONFIG_SMP */ 2538 } else { 2539 /* 2540 * This task is not running, but if it is 2541 * greater than the current running task 2542 * then reschedule. 2543 */ 2544 if (p->prio < rq->curr->prio) 2545 resched_curr(rq); 2546 } 2547 } 2548 2549 #ifdef CONFIG_POSIX_TIMERS 2550 static void watchdog(struct rq *rq, struct task_struct *p) 2551 { 2552 unsigned long soft, hard; 2553 2554 /* max may change after cur was read, this will be fixed next tick */ 2555 soft = task_rlimit(p, RLIMIT_RTTIME); 2556 hard = task_rlimit_max(p, RLIMIT_RTTIME); 2557 2558 if (soft != RLIM_INFINITY) { 2559 unsigned long next; 2560 2561 if (p->rt.watchdog_stamp != jiffies) { 2562 p->rt.timeout++; 2563 p->rt.watchdog_stamp = jiffies; 2564 } 2565 2566 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 2567 if (p->rt.timeout > next) { 2568 posix_cputimers_rt_watchdog(&p->posix_cputimers, 2569 p->se.sum_exec_runtime); 2570 } 2571 } 2572 } 2573 #else 2574 static inline void watchdog(struct rq *rq, struct task_struct *p) { } 2575 #endif 2576 2577 /* 2578 * scheduler tick hitting a task of our scheduling class. 2579 * 2580 * NOTE: This function can be called remotely by the tick offload that 2581 * goes along full dynticks. Therefore no local assumption can be made 2582 * and everything must be accessed through the @rq and @curr passed in 2583 * parameters. 2584 */ 2585 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 2586 { 2587 struct sched_rt_entity *rt_se = &p->rt; 2588 2589 update_curr_rt(rq); 2590 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1); 2591 2592 watchdog(rq, p); 2593 2594 /* 2595 * RR tasks need a special form of timeslice management. 2596 * FIFO tasks have no timeslices. 2597 */ 2598 if (p->policy != SCHED_RR) 2599 return; 2600 2601 if (--p->rt.time_slice) 2602 return; 2603 2604 p->rt.time_slice = sched_rr_timeslice; 2605 2606 /* 2607 * Requeue to the end of queue if we (and all of our ancestors) are not 2608 * the only element on the queue 2609 */ 2610 for_each_sched_rt_entity(rt_se) { 2611 if (rt_se->run_list.prev != rt_se->run_list.next) { 2612 requeue_task_rt(rq, p, 0); 2613 resched_curr(rq); 2614 return; 2615 } 2616 } 2617 } 2618 2619 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) 2620 { 2621 /* 2622 * Time slice is 0 for SCHED_FIFO tasks 2623 */ 2624 if (task->policy == SCHED_RR) 2625 return sched_rr_timeslice; 2626 else 2627 return 0; 2628 } 2629 2630 DEFINE_SCHED_CLASS(rt) = { 2631 2632 .enqueue_task = enqueue_task_rt, 2633 .dequeue_task = dequeue_task_rt, 2634 .yield_task = yield_task_rt, 2635 2636 .check_preempt_curr = check_preempt_curr_rt, 2637 2638 .pick_next_task = pick_next_task_rt, 2639 .put_prev_task = put_prev_task_rt, 2640 .set_next_task = set_next_task_rt, 2641 2642 #ifdef CONFIG_SMP 2643 .balance = balance_rt, 2644 .pick_task = pick_task_rt, 2645 .select_task_rq = select_task_rq_rt, 2646 .set_cpus_allowed = set_cpus_allowed_common, 2647 .rq_online = rq_online_rt, 2648 .rq_offline = rq_offline_rt, 2649 .task_woken = task_woken_rt, 2650 .switched_from = switched_from_rt, 2651 .find_lock_rq = find_lock_lowest_rq, 2652 #endif 2653 2654 .task_tick = task_tick_rt, 2655 2656 .get_rr_interval = get_rr_interval_rt, 2657 2658 .prio_changed = prio_changed_rt, 2659 .switched_to = switched_to_rt, 2660 2661 .update_curr = update_curr_rt, 2662 2663 #ifdef CONFIG_UCLAMP_TASK 2664 .uclamp_enabled = 1, 2665 #endif 2666 }; 2667 2668 #ifdef CONFIG_RT_GROUP_SCHED 2669 /* 2670 * Ensure that the real time constraints are schedulable. 2671 */ 2672 static DEFINE_MUTEX(rt_constraints_mutex); 2673 2674 static inline int tg_has_rt_tasks(struct task_group *tg) 2675 { 2676 struct task_struct *task; 2677 struct css_task_iter it; 2678 int ret = 0; 2679 2680 /* 2681 * Autogroups do not have RT tasks; see autogroup_create(). 2682 */ 2683 if (task_group_is_autogroup(tg)) 2684 return 0; 2685 2686 css_task_iter_start(&tg->css, 0, &it); 2687 while (!ret && (task = css_task_iter_next(&it))) 2688 ret |= rt_task(task); 2689 css_task_iter_end(&it); 2690 2691 return ret; 2692 } 2693 2694 struct rt_schedulable_data { 2695 struct task_group *tg; 2696 u64 rt_period; 2697 u64 rt_runtime; 2698 }; 2699 2700 static int tg_rt_schedulable(struct task_group *tg, void *data) 2701 { 2702 struct rt_schedulable_data *d = data; 2703 struct task_group *child; 2704 unsigned long total, sum = 0; 2705 u64 period, runtime; 2706 2707 period = ktime_to_ns(tg->rt_bandwidth.rt_period); 2708 runtime = tg->rt_bandwidth.rt_runtime; 2709 2710 if (tg == d->tg) { 2711 period = d->rt_period; 2712 runtime = d->rt_runtime; 2713 } 2714 2715 /* 2716 * Cannot have more runtime than the period. 2717 */ 2718 if (runtime > period && runtime != RUNTIME_INF) 2719 return -EINVAL; 2720 2721 /* 2722 * Ensure we don't starve existing RT tasks if runtime turns zero. 2723 */ 2724 if (rt_bandwidth_enabled() && !runtime && 2725 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg)) 2726 return -EBUSY; 2727 2728 total = to_ratio(period, runtime); 2729 2730 /* 2731 * Nobody can have more than the global setting allows. 2732 */ 2733 if (total > to_ratio(global_rt_period(), global_rt_runtime())) 2734 return -EINVAL; 2735 2736 /* 2737 * The sum of our children's runtime should not exceed our own. 2738 */ 2739 list_for_each_entry_rcu(child, &tg->children, siblings) { 2740 period = ktime_to_ns(child->rt_bandwidth.rt_period); 2741 runtime = child->rt_bandwidth.rt_runtime; 2742 2743 if (child == d->tg) { 2744 period = d->rt_period; 2745 runtime = d->rt_runtime; 2746 } 2747 2748 sum += to_ratio(period, runtime); 2749 } 2750 2751 if (sum > total) 2752 return -EINVAL; 2753 2754 return 0; 2755 } 2756 2757 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 2758 { 2759 int ret; 2760 2761 struct rt_schedulable_data data = { 2762 .tg = tg, 2763 .rt_period = period, 2764 .rt_runtime = runtime, 2765 }; 2766 2767 rcu_read_lock(); 2768 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); 2769 rcu_read_unlock(); 2770 2771 return ret; 2772 } 2773 2774 static int tg_set_rt_bandwidth(struct task_group *tg, 2775 u64 rt_period, u64 rt_runtime) 2776 { 2777 int i, err = 0; 2778 2779 /* 2780 * Disallowing the root group RT runtime is BAD, it would disallow the 2781 * kernel creating (and or operating) RT threads. 2782 */ 2783 if (tg == &root_task_group && rt_runtime == 0) 2784 return -EINVAL; 2785 2786 /* No period doesn't make any sense. */ 2787 if (rt_period == 0) 2788 return -EINVAL; 2789 2790 /* 2791 * Bound quota to defend quota against overflow during bandwidth shift. 2792 */ 2793 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime) 2794 return -EINVAL; 2795 2796 mutex_lock(&rt_constraints_mutex); 2797 err = __rt_schedulable(tg, rt_period, rt_runtime); 2798 if (err) 2799 goto unlock; 2800 2801 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 2802 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 2803 tg->rt_bandwidth.rt_runtime = rt_runtime; 2804 2805 for_each_possible_cpu(i) { 2806 struct rt_rq *rt_rq = tg->rt_rq[i]; 2807 2808 raw_spin_lock(&rt_rq->rt_runtime_lock); 2809 rt_rq->rt_runtime = rt_runtime; 2810 raw_spin_unlock(&rt_rq->rt_runtime_lock); 2811 } 2812 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 2813 unlock: 2814 mutex_unlock(&rt_constraints_mutex); 2815 2816 return err; 2817 } 2818 2819 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) 2820 { 2821 u64 rt_runtime, rt_period; 2822 2823 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 2824 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; 2825 if (rt_runtime_us < 0) 2826 rt_runtime = RUNTIME_INF; 2827 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC) 2828 return -EINVAL; 2829 2830 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 2831 } 2832 2833 long sched_group_rt_runtime(struct task_group *tg) 2834 { 2835 u64 rt_runtime_us; 2836 2837 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) 2838 return -1; 2839 2840 rt_runtime_us = tg->rt_bandwidth.rt_runtime; 2841 do_div(rt_runtime_us, NSEC_PER_USEC); 2842 return rt_runtime_us; 2843 } 2844 2845 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) 2846 { 2847 u64 rt_runtime, rt_period; 2848 2849 if (rt_period_us > U64_MAX / NSEC_PER_USEC) 2850 return -EINVAL; 2851 2852 rt_period = rt_period_us * NSEC_PER_USEC; 2853 rt_runtime = tg->rt_bandwidth.rt_runtime; 2854 2855 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 2856 } 2857 2858 long sched_group_rt_period(struct task_group *tg) 2859 { 2860 u64 rt_period_us; 2861 2862 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); 2863 do_div(rt_period_us, NSEC_PER_USEC); 2864 return rt_period_us; 2865 } 2866 2867 static int sched_rt_global_constraints(void) 2868 { 2869 int ret = 0; 2870 2871 mutex_lock(&rt_constraints_mutex); 2872 ret = __rt_schedulable(NULL, 0, 0); 2873 mutex_unlock(&rt_constraints_mutex); 2874 2875 return ret; 2876 } 2877 2878 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) 2879 { 2880 /* Don't accept realtime tasks when there is no way for them to run */ 2881 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) 2882 return 0; 2883 2884 return 1; 2885 } 2886 2887 #else /* !CONFIG_RT_GROUP_SCHED */ 2888 static int sched_rt_global_constraints(void) 2889 { 2890 unsigned long flags; 2891 int i; 2892 2893 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 2894 for_each_possible_cpu(i) { 2895 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 2896 2897 raw_spin_lock(&rt_rq->rt_runtime_lock); 2898 rt_rq->rt_runtime = global_rt_runtime(); 2899 raw_spin_unlock(&rt_rq->rt_runtime_lock); 2900 } 2901 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 2902 2903 return 0; 2904 } 2905 #endif /* CONFIG_RT_GROUP_SCHED */ 2906 2907 static int sched_rt_global_validate(void) 2908 { 2909 if (sysctl_sched_rt_period <= 0) 2910 return -EINVAL; 2911 2912 if ((sysctl_sched_rt_runtime != RUNTIME_INF) && 2913 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || 2914 ((u64)sysctl_sched_rt_runtime * 2915 NSEC_PER_USEC > max_rt_runtime))) 2916 return -EINVAL; 2917 2918 return 0; 2919 } 2920 2921 static void sched_rt_do_global(void) 2922 { 2923 unsigned long flags; 2924 2925 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 2926 def_rt_bandwidth.rt_runtime = global_rt_runtime(); 2927 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); 2928 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 2929 } 2930 2931 int sched_rt_handler(struct ctl_table *table, int write, void *buffer, 2932 size_t *lenp, loff_t *ppos) 2933 { 2934 int old_period, old_runtime; 2935 static DEFINE_MUTEX(mutex); 2936 int ret; 2937 2938 mutex_lock(&mutex); 2939 old_period = sysctl_sched_rt_period; 2940 old_runtime = sysctl_sched_rt_runtime; 2941 2942 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2943 2944 if (!ret && write) { 2945 ret = sched_rt_global_validate(); 2946 if (ret) 2947 goto undo; 2948 2949 ret = sched_dl_global_validate(); 2950 if (ret) 2951 goto undo; 2952 2953 ret = sched_rt_global_constraints(); 2954 if (ret) 2955 goto undo; 2956 2957 sched_rt_do_global(); 2958 sched_dl_do_global(); 2959 } 2960 if (0) { 2961 undo: 2962 sysctl_sched_rt_period = old_period; 2963 sysctl_sched_rt_runtime = old_runtime; 2964 } 2965 mutex_unlock(&mutex); 2966 2967 return ret; 2968 } 2969 2970 int sched_rr_handler(struct ctl_table *table, int write, void *buffer, 2971 size_t *lenp, loff_t *ppos) 2972 { 2973 int ret; 2974 static DEFINE_MUTEX(mutex); 2975 2976 mutex_lock(&mutex); 2977 ret = proc_dointvec(table, write, buffer, lenp, ppos); 2978 /* 2979 * Make sure that internally we keep jiffies. 2980 * Also, writing zero resets the timeslice to default: 2981 */ 2982 if (!ret && write) { 2983 sched_rr_timeslice = 2984 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : 2985 msecs_to_jiffies(sysctl_sched_rr_timeslice); 2986 } 2987 mutex_unlock(&mutex); 2988 2989 return ret; 2990 } 2991 2992 #ifdef CONFIG_SCHED_DEBUG 2993 void print_rt_stats(struct seq_file *m, int cpu) 2994 { 2995 rt_rq_iter_t iter; 2996 struct rt_rq *rt_rq; 2997 2998 rcu_read_lock(); 2999 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) 3000 print_rt_rq(m, cpu, rt_rq); 3001 rcu_read_unlock(); 3002 } 3003 #endif /* CONFIG_SCHED_DEBUG */ 3004