1 /* 2 * Pressure stall information for CPU, memory and IO 3 * 4 * Copyright (c) 2018 Facebook, Inc. 5 * Author: Johannes Weiner <hannes@cmpxchg.org> 6 * 7 * Polling support by Suren Baghdasaryan <surenb@google.com> 8 * Copyright (c) 2018 Google, Inc. 9 * 10 * When CPU, memory and IO are contended, tasks experience delays that 11 * reduce throughput and introduce latencies into the workload. Memory 12 * and IO contention, in addition, can cause a full loss of forward 13 * progress in which the CPU goes idle. 14 * 15 * This code aggregates individual task delays into resource pressure 16 * metrics that indicate problems with both workload health and 17 * resource utilization. 18 * 19 * Model 20 * 21 * The time in which a task can execute on a CPU is our baseline for 22 * productivity. Pressure expresses the amount of time in which this 23 * potential cannot be realized due to resource contention. 24 * 25 * This concept of productivity has two components: the workload and 26 * the CPU. To measure the impact of pressure on both, we define two 27 * contention states for a resource: SOME and FULL. 28 * 29 * In the SOME state of a given resource, one or more tasks are 30 * delayed on that resource. This affects the workload's ability to 31 * perform work, but the CPU may still be executing other tasks. 32 * 33 * In the FULL state of a given resource, all non-idle tasks are 34 * delayed on that resource such that nobody is advancing and the CPU 35 * goes idle. This leaves both workload and CPU unproductive. 36 * 37 * (Naturally, the FULL state doesn't exist for the CPU resource.) 38 * 39 * SOME = nr_delayed_tasks != 0 40 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 41 * 42 * The percentage of wallclock time spent in those compound stall 43 * states gives pressure numbers between 0 and 100 for each resource, 44 * where the SOME percentage indicates workload slowdowns and the FULL 45 * percentage indicates reduced CPU utilization: 46 * 47 * %SOME = time(SOME) / period 48 * %FULL = time(FULL) / period 49 * 50 * Multiple CPUs 51 * 52 * The more tasks and available CPUs there are, the more work can be 53 * performed concurrently. This means that the potential that can go 54 * unrealized due to resource contention *also* scales with non-idle 55 * tasks and CPUs. 56 * 57 * Consider a scenario where 257 number crunching tasks are trying to 58 * run concurrently on 256 CPUs. If we simply aggregated the task 59 * states, we would have to conclude a CPU SOME pressure number of 60 * 100%, since *somebody* is waiting on a runqueue at all 61 * times. However, that is clearly not the amount of contention the 62 * workload is experiencing: only one out of 256 possible exceution 63 * threads will be contended at any given time, or about 0.4%. 64 * 65 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any 66 * given time *one* of the tasks is delayed due to a lack of memory. 67 * Again, looking purely at the task state would yield a memory FULL 68 * pressure number of 0%, since *somebody* is always making forward 69 * progress. But again this wouldn't capture the amount of execution 70 * potential lost, which is 1 out of 4 CPUs, or 25%. 71 * 72 * To calculate wasted potential (pressure) with multiple processors, 73 * we have to base our calculation on the number of non-idle tasks in 74 * conjunction with the number of available CPUs, which is the number 75 * of potential execution threads. SOME becomes then the proportion of 76 * delayed tasks to possibe threads, and FULL is the share of possible 77 * threads that are unproductive due to delays: 78 * 79 * threads = min(nr_nonidle_tasks, nr_cpus) 80 * SOME = min(nr_delayed_tasks / threads, 1) 81 * FULL = (threads - min(nr_running_tasks, threads)) / threads 82 * 83 * For the 257 number crunchers on 256 CPUs, this yields: 84 * 85 * threads = min(257, 256) 86 * SOME = min(1 / 256, 1) = 0.4% 87 * FULL = (256 - min(257, 256)) / 256 = 0% 88 * 89 * For the 1 out of 4 memory-delayed tasks, this yields: 90 * 91 * threads = min(4, 4) 92 * SOME = min(1 / 4, 1) = 25% 93 * FULL = (4 - min(3, 4)) / 4 = 25% 94 * 95 * [ Substitute nr_cpus with 1, and you can see that it's a natural 96 * extension of the single-CPU model. ] 97 * 98 * Implementation 99 * 100 * To assess the precise time spent in each such state, we would have 101 * to freeze the system on task changes and start/stop the state 102 * clocks accordingly. Obviously that doesn't scale in practice. 103 * 104 * Because the scheduler aims to distribute the compute load evenly 105 * among the available CPUs, we can track task state locally to each 106 * CPU and, at much lower frequency, extrapolate the global state for 107 * the cumulative stall times and the running averages. 108 * 109 * For each runqueue, we track: 110 * 111 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) 112 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) 113 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) 114 * 115 * and then periodically aggregate: 116 * 117 * tNONIDLE = sum(tNONIDLE[i]) 118 * 119 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE 120 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE 121 * 122 * %SOME = tSOME / period 123 * %FULL = tFULL / period 124 * 125 * This gives us an approximation of pressure that is practical 126 * cost-wise, yet way more sensitive and accurate than periodic 127 * sampling of the aggregate task states would be. 128 */ 129 130 #include "../workqueue_internal.h" 131 #include <linux/sched/loadavg.h> 132 #include <linux/seq_file.h> 133 #include <linux/proc_fs.h> 134 #include <linux/seqlock.h> 135 #include <linux/uaccess.h> 136 #include <linux/cgroup.h> 137 #include <linux/module.h> 138 #include <linux/sched.h> 139 #include <linux/ctype.h> 140 #include <linux/file.h> 141 #include <linux/poll.h> 142 #include <linux/psi.h> 143 #include "sched.h" 144 145 static int psi_bug __read_mostly; 146 147 DEFINE_STATIC_KEY_FALSE(psi_disabled); 148 149 #ifdef CONFIG_PSI_DEFAULT_DISABLED 150 static bool psi_enable; 151 #else 152 static bool psi_enable = true; 153 #endif 154 static int __init setup_psi(char *str) 155 { 156 return kstrtobool(str, &psi_enable) == 0; 157 } 158 __setup("psi=", setup_psi); 159 160 /* Running averages - we need to be higher-res than loadavg */ 161 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ 162 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ 163 #define EXP_60s 1981 /* 1/exp(2s/60s) */ 164 #define EXP_300s 2034 /* 1/exp(2s/300s) */ 165 166 /* PSI trigger definitions */ 167 #define WINDOW_MIN_US 500000 /* Min window size is 500ms */ 168 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */ 169 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */ 170 171 /* Sampling frequency in nanoseconds */ 172 static u64 psi_period __read_mostly; 173 174 /* System-level pressure and stall tracking */ 175 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); 176 struct psi_group psi_system = { 177 .pcpu = &system_group_pcpu, 178 }; 179 180 static void psi_avgs_work(struct work_struct *work); 181 182 static void group_init(struct psi_group *group) 183 { 184 int cpu; 185 186 for_each_possible_cpu(cpu) 187 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); 188 group->avg_last_update = sched_clock(); 189 group->avg_next_update = group->avg_last_update + psi_period; 190 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); 191 mutex_init(&group->avgs_lock); 192 /* Init trigger-related members */ 193 atomic_set(&group->poll_scheduled, 0); 194 mutex_init(&group->trigger_lock); 195 INIT_LIST_HEAD(&group->triggers); 196 memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); 197 group->poll_states = 0; 198 group->poll_min_period = U32_MAX; 199 memset(group->polling_total, 0, sizeof(group->polling_total)); 200 group->polling_next_update = ULLONG_MAX; 201 group->polling_until = 0; 202 rcu_assign_pointer(group->poll_kworker, NULL); 203 } 204 205 void __init psi_init(void) 206 { 207 if (!psi_enable) { 208 static_branch_enable(&psi_disabled); 209 return; 210 } 211 212 psi_period = jiffies_to_nsecs(PSI_FREQ); 213 group_init(&psi_system); 214 } 215 216 static bool test_state(unsigned int *tasks, enum psi_states state) 217 { 218 switch (state) { 219 case PSI_IO_SOME: 220 return tasks[NR_IOWAIT]; 221 case PSI_IO_FULL: 222 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; 223 case PSI_MEM_SOME: 224 return tasks[NR_MEMSTALL]; 225 case PSI_MEM_FULL: 226 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; 227 case PSI_CPU_SOME: 228 return tasks[NR_RUNNING] > tasks[NR_ONCPU]; 229 case PSI_NONIDLE: 230 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || 231 tasks[NR_RUNNING]; 232 default: 233 return false; 234 } 235 } 236 237 static void get_recent_times(struct psi_group *group, int cpu, 238 enum psi_aggregators aggregator, u32 *times, 239 u32 *pchanged_states) 240 { 241 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); 242 u64 now, state_start; 243 enum psi_states s; 244 unsigned int seq; 245 u32 state_mask; 246 247 *pchanged_states = 0; 248 249 /* Snapshot a coherent view of the CPU state */ 250 do { 251 seq = read_seqcount_begin(&groupc->seq); 252 now = cpu_clock(cpu); 253 memcpy(times, groupc->times, sizeof(groupc->times)); 254 state_mask = groupc->state_mask; 255 state_start = groupc->state_start; 256 } while (read_seqcount_retry(&groupc->seq, seq)); 257 258 /* Calculate state time deltas against the previous snapshot */ 259 for (s = 0; s < NR_PSI_STATES; s++) { 260 u32 delta; 261 /* 262 * In addition to already concluded states, we also 263 * incorporate currently active states on the CPU, 264 * since states may last for many sampling periods. 265 * 266 * This way we keep our delta sampling buckets small 267 * (u32) and our reported pressure close to what's 268 * actually happening. 269 */ 270 if (state_mask & (1 << s)) 271 times[s] += now - state_start; 272 273 delta = times[s] - groupc->times_prev[aggregator][s]; 274 groupc->times_prev[aggregator][s] = times[s]; 275 276 times[s] = delta; 277 if (delta) 278 *pchanged_states |= (1 << s); 279 } 280 } 281 282 static void calc_avgs(unsigned long avg[3], int missed_periods, 283 u64 time, u64 period) 284 { 285 unsigned long pct; 286 287 /* Fill in zeroes for periods of no activity */ 288 if (missed_periods) { 289 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); 290 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); 291 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); 292 } 293 294 /* Sample the most recent active period */ 295 pct = div_u64(time * 100, period); 296 pct *= FIXED_1; 297 avg[0] = calc_load(avg[0], EXP_10s, pct); 298 avg[1] = calc_load(avg[1], EXP_60s, pct); 299 avg[2] = calc_load(avg[2], EXP_300s, pct); 300 } 301 302 static void collect_percpu_times(struct psi_group *group, 303 enum psi_aggregators aggregator, 304 u32 *pchanged_states) 305 { 306 u64 deltas[NR_PSI_STATES - 1] = { 0, }; 307 unsigned long nonidle_total = 0; 308 u32 changed_states = 0; 309 int cpu; 310 int s; 311 312 /* 313 * Collect the per-cpu time buckets and average them into a 314 * single time sample that is normalized to wallclock time. 315 * 316 * For averaging, each CPU is weighted by its non-idle time in 317 * the sampling period. This eliminates artifacts from uneven 318 * loading, or even entirely idle CPUs. 319 */ 320 for_each_possible_cpu(cpu) { 321 u32 times[NR_PSI_STATES]; 322 u32 nonidle; 323 u32 cpu_changed_states; 324 325 get_recent_times(group, cpu, aggregator, times, 326 &cpu_changed_states); 327 changed_states |= cpu_changed_states; 328 329 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); 330 nonidle_total += nonidle; 331 332 for (s = 0; s < PSI_NONIDLE; s++) 333 deltas[s] += (u64)times[s] * nonidle; 334 } 335 336 /* 337 * Integrate the sample into the running statistics that are 338 * reported to userspace: the cumulative stall times and the 339 * decaying averages. 340 * 341 * Pressure percentages are sampled at PSI_FREQ. We might be 342 * called more often when the user polls more frequently than 343 * that; we might be called less often when there is no task 344 * activity, thus no data, and clock ticks are sporadic. The 345 * below handles both. 346 */ 347 348 /* total= */ 349 for (s = 0; s < NR_PSI_STATES - 1; s++) 350 group->total[aggregator][s] += 351 div_u64(deltas[s], max(nonidle_total, 1UL)); 352 353 if (pchanged_states) 354 *pchanged_states = changed_states; 355 } 356 357 static u64 update_averages(struct psi_group *group, u64 now) 358 { 359 unsigned long missed_periods = 0; 360 u64 expires, period; 361 u64 avg_next_update; 362 int s; 363 364 /* avgX= */ 365 expires = group->avg_next_update; 366 if (now - expires >= psi_period) 367 missed_periods = div_u64(now - expires, psi_period); 368 369 /* 370 * The periodic clock tick can get delayed for various 371 * reasons, especially on loaded systems. To avoid clock 372 * drift, we schedule the clock in fixed psi_period intervals. 373 * But the deltas we sample out of the per-cpu buckets above 374 * are based on the actual time elapsing between clock ticks. 375 */ 376 avg_next_update = expires + ((1 + missed_periods) * psi_period); 377 period = now - (group->avg_last_update + (missed_periods * psi_period)); 378 group->avg_last_update = now; 379 380 for (s = 0; s < NR_PSI_STATES - 1; s++) { 381 u32 sample; 382 383 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; 384 /* 385 * Due to the lockless sampling of the time buckets, 386 * recorded time deltas can slip into the next period, 387 * which under full pressure can result in samples in 388 * excess of the period length. 389 * 390 * We don't want to report non-sensical pressures in 391 * excess of 100%, nor do we want to drop such events 392 * on the floor. Instead we punt any overage into the 393 * future until pressure subsides. By doing this we 394 * don't underreport the occurring pressure curve, we 395 * just report it delayed by one period length. 396 * 397 * The error isn't cumulative. As soon as another 398 * delta slips from a period P to P+1, by definition 399 * it frees up its time T in P. 400 */ 401 if (sample > period) 402 sample = period; 403 group->avg_total[s] += sample; 404 calc_avgs(group->avg[s], missed_periods, sample, period); 405 } 406 407 return avg_next_update; 408 } 409 410 static void psi_avgs_work(struct work_struct *work) 411 { 412 struct delayed_work *dwork; 413 struct psi_group *group; 414 u32 changed_states; 415 bool nonidle; 416 u64 now; 417 418 dwork = to_delayed_work(work); 419 group = container_of(dwork, struct psi_group, avgs_work); 420 421 mutex_lock(&group->avgs_lock); 422 423 now = sched_clock(); 424 425 collect_percpu_times(group, PSI_AVGS, &changed_states); 426 nonidle = changed_states & (1 << PSI_NONIDLE); 427 /* 428 * If there is task activity, periodically fold the per-cpu 429 * times and feed samples into the running averages. If things 430 * are idle and there is no data to process, stop the clock. 431 * Once restarted, we'll catch up the running averages in one 432 * go - see calc_avgs() and missed_periods. 433 */ 434 if (now >= group->avg_next_update) 435 group->avg_next_update = update_averages(group, now); 436 437 if (nonidle) { 438 schedule_delayed_work(dwork, nsecs_to_jiffies( 439 group->avg_next_update - now) + 1); 440 } 441 442 mutex_unlock(&group->avgs_lock); 443 } 444 445 /* Trigger tracking window manupulations */ 446 static void window_reset(struct psi_window *win, u64 now, u64 value, 447 u64 prev_growth) 448 { 449 win->start_time = now; 450 win->start_value = value; 451 win->prev_growth = prev_growth; 452 } 453 454 /* 455 * PSI growth tracking window update and growth calculation routine. 456 * 457 * This approximates a sliding tracking window by interpolating 458 * partially elapsed windows using historical growth data from the 459 * previous intervals. This minimizes memory requirements (by not storing 460 * all the intermediate values in the previous window) and simplifies 461 * the calculations. It works well because PSI signal changes only in 462 * positive direction and over relatively small window sizes the growth 463 * is close to linear. 464 */ 465 static u64 window_update(struct psi_window *win, u64 now, u64 value) 466 { 467 u64 elapsed; 468 u64 growth; 469 470 elapsed = now - win->start_time; 471 growth = value - win->start_value; 472 /* 473 * After each tracking window passes win->start_value and 474 * win->start_time get reset and win->prev_growth stores 475 * the average per-window growth of the previous window. 476 * win->prev_growth is then used to interpolate additional 477 * growth from the previous window assuming it was linear. 478 */ 479 if (elapsed > win->size) 480 window_reset(win, now, value, growth); 481 else { 482 u32 remaining; 483 484 remaining = win->size - elapsed; 485 growth += div64_u64(win->prev_growth * remaining, win->size); 486 } 487 488 return growth; 489 } 490 491 static void init_triggers(struct psi_group *group, u64 now) 492 { 493 struct psi_trigger *t; 494 495 list_for_each_entry(t, &group->triggers, node) 496 window_reset(&t->win, now, 497 group->total[PSI_POLL][t->state], 0); 498 memcpy(group->polling_total, group->total[PSI_POLL], 499 sizeof(group->polling_total)); 500 group->polling_next_update = now + group->poll_min_period; 501 } 502 503 static u64 update_triggers(struct psi_group *group, u64 now) 504 { 505 struct psi_trigger *t; 506 bool new_stall = false; 507 u64 *total = group->total[PSI_POLL]; 508 509 /* 510 * On subsequent updates, calculate growth deltas and let 511 * watchers know when their specified thresholds are exceeded. 512 */ 513 list_for_each_entry(t, &group->triggers, node) { 514 u64 growth; 515 516 /* Check for stall activity */ 517 if (group->polling_total[t->state] == total[t->state]) 518 continue; 519 520 /* 521 * Multiple triggers might be looking at the same state, 522 * remember to update group->polling_total[] once we've 523 * been through all of them. Also remember to extend the 524 * polling time if we see new stall activity. 525 */ 526 new_stall = true; 527 528 /* Calculate growth since last update */ 529 growth = window_update(&t->win, now, total[t->state]); 530 if (growth < t->threshold) 531 continue; 532 533 /* Limit event signaling to once per window */ 534 if (now < t->last_event_time + t->win.size) 535 continue; 536 537 /* Generate an event */ 538 if (cmpxchg(&t->event, 0, 1) == 0) 539 wake_up_interruptible(&t->event_wait); 540 t->last_event_time = now; 541 } 542 543 if (new_stall) 544 memcpy(group->polling_total, total, 545 sizeof(group->polling_total)); 546 547 return now + group->poll_min_period; 548 } 549 550 /* 551 * Schedule polling if it's not already scheduled. It's safe to call even from 552 * hotpath because even though kthread_queue_delayed_work takes worker->lock 553 * spinlock that spinlock is never contended due to poll_scheduled atomic 554 * preventing such competition. 555 */ 556 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) 557 { 558 struct kthread_worker *kworker; 559 560 /* Do not reschedule if already scheduled */ 561 if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0) 562 return; 563 564 rcu_read_lock(); 565 566 kworker = rcu_dereference(group->poll_kworker); 567 /* 568 * kworker might be NULL in case psi_trigger_destroy races with 569 * psi_task_change (hotpath) which can't use locks 570 */ 571 if (likely(kworker)) 572 kthread_queue_delayed_work(kworker, &group->poll_work, delay); 573 else 574 atomic_set(&group->poll_scheduled, 0); 575 576 rcu_read_unlock(); 577 } 578 579 static void psi_poll_work(struct kthread_work *work) 580 { 581 struct kthread_delayed_work *dwork; 582 struct psi_group *group; 583 u32 changed_states; 584 u64 now; 585 586 dwork = container_of(work, struct kthread_delayed_work, work); 587 group = container_of(dwork, struct psi_group, poll_work); 588 589 atomic_set(&group->poll_scheduled, 0); 590 591 mutex_lock(&group->trigger_lock); 592 593 now = sched_clock(); 594 595 collect_percpu_times(group, PSI_POLL, &changed_states); 596 597 if (changed_states & group->poll_states) { 598 /* Initialize trigger windows when entering polling mode */ 599 if (now > group->polling_until) 600 init_triggers(group, now); 601 602 /* 603 * Keep the monitor active for at least the duration of the 604 * minimum tracking window as long as monitor states are 605 * changing. 606 */ 607 group->polling_until = now + 608 group->poll_min_period * UPDATES_PER_WINDOW; 609 } 610 611 if (now > group->polling_until) { 612 group->polling_next_update = ULLONG_MAX; 613 goto out; 614 } 615 616 if (now >= group->polling_next_update) 617 group->polling_next_update = update_triggers(group, now); 618 619 psi_schedule_poll_work(group, 620 nsecs_to_jiffies(group->polling_next_update - now) + 1); 621 622 out: 623 mutex_unlock(&group->trigger_lock); 624 } 625 626 static void record_times(struct psi_group_cpu *groupc, int cpu, 627 bool memstall_tick) 628 { 629 u32 delta; 630 u64 now; 631 632 now = cpu_clock(cpu); 633 delta = now - groupc->state_start; 634 groupc->state_start = now; 635 636 if (groupc->state_mask & (1 << PSI_IO_SOME)) { 637 groupc->times[PSI_IO_SOME] += delta; 638 if (groupc->state_mask & (1 << PSI_IO_FULL)) 639 groupc->times[PSI_IO_FULL] += delta; 640 } 641 642 if (groupc->state_mask & (1 << PSI_MEM_SOME)) { 643 groupc->times[PSI_MEM_SOME] += delta; 644 if (groupc->state_mask & (1 << PSI_MEM_FULL)) 645 groupc->times[PSI_MEM_FULL] += delta; 646 else if (memstall_tick) { 647 u32 sample; 648 /* 649 * Since we care about lost potential, a 650 * memstall is FULL when there are no other 651 * working tasks, but also when the CPU is 652 * actively reclaiming and nothing productive 653 * could run even if it were runnable. 654 * 655 * When the timer tick sees a reclaiming CPU, 656 * regardless of runnable tasks, sample a FULL 657 * tick (or less if it hasn't been a full tick 658 * since the last state change). 659 */ 660 sample = min(delta, (u32)jiffies_to_nsecs(1)); 661 groupc->times[PSI_MEM_FULL] += sample; 662 } 663 } 664 665 if (groupc->state_mask & (1 << PSI_CPU_SOME)) 666 groupc->times[PSI_CPU_SOME] += delta; 667 668 if (groupc->state_mask & (1 << PSI_NONIDLE)) 669 groupc->times[PSI_NONIDLE] += delta; 670 } 671 672 static void psi_group_change(struct psi_group *group, int cpu, 673 unsigned int clear, unsigned int set, 674 bool wake_clock) 675 { 676 struct psi_group_cpu *groupc; 677 u32 state_mask = 0; 678 unsigned int t, m; 679 enum psi_states s; 680 681 groupc = per_cpu_ptr(group->pcpu, cpu); 682 683 /* 684 * First we assess the aggregate resource states this CPU's 685 * tasks have been in since the last change, and account any 686 * SOME and FULL time these may have resulted in. 687 * 688 * Then we update the task counts according to the state 689 * change requested through the @clear and @set bits. 690 */ 691 write_seqcount_begin(&groupc->seq); 692 693 record_times(groupc, cpu, false); 694 695 for (t = 0, m = clear; m; m &= ~(1 << t), t++) { 696 if (!(m & (1 << t))) 697 continue; 698 if (groupc->tasks[t] == 0 && !psi_bug) { 699 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n", 700 cpu, t, groupc->tasks[0], 701 groupc->tasks[1], groupc->tasks[2], 702 groupc->tasks[3], clear, set); 703 psi_bug = 1; 704 } 705 groupc->tasks[t]--; 706 } 707 708 for (t = 0; set; set &= ~(1 << t), t++) 709 if (set & (1 << t)) 710 groupc->tasks[t]++; 711 712 /* Calculate state mask representing active states */ 713 for (s = 0; s < NR_PSI_STATES; s++) { 714 if (test_state(groupc->tasks, s)) 715 state_mask |= (1 << s); 716 } 717 groupc->state_mask = state_mask; 718 719 write_seqcount_end(&groupc->seq); 720 721 if (state_mask & group->poll_states) 722 psi_schedule_poll_work(group, 1); 723 724 if (wake_clock && !delayed_work_pending(&group->avgs_work)) 725 schedule_delayed_work(&group->avgs_work, PSI_FREQ); 726 } 727 728 static struct psi_group *iterate_groups(struct task_struct *task, void **iter) 729 { 730 #ifdef CONFIG_CGROUPS 731 struct cgroup *cgroup = NULL; 732 733 if (!*iter) 734 cgroup = task->cgroups->dfl_cgrp; 735 else if (*iter == &psi_system) 736 return NULL; 737 else 738 cgroup = cgroup_parent(*iter); 739 740 if (cgroup && cgroup_parent(cgroup)) { 741 *iter = cgroup; 742 return cgroup_psi(cgroup); 743 } 744 #else 745 if (*iter) 746 return NULL; 747 #endif 748 *iter = &psi_system; 749 return &psi_system; 750 } 751 752 static void psi_flags_change(struct task_struct *task, int clear, int set) 753 { 754 if (((task->psi_flags & set) || 755 (task->psi_flags & clear) != clear) && 756 !psi_bug) { 757 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", 758 task->pid, task->comm, task_cpu(task), 759 task->psi_flags, clear, set); 760 psi_bug = 1; 761 } 762 763 task->psi_flags &= ~clear; 764 task->psi_flags |= set; 765 } 766 767 void psi_task_change(struct task_struct *task, int clear, int set) 768 { 769 int cpu = task_cpu(task); 770 struct psi_group *group; 771 bool wake_clock = true; 772 void *iter = NULL; 773 774 if (!task->pid) 775 return; 776 777 psi_flags_change(task, clear, set); 778 779 /* 780 * Periodic aggregation shuts off if there is a period of no 781 * task changes, so we wake it back up if necessary. However, 782 * don't do this if the task change is the aggregation worker 783 * itself going to sleep, or we'll ping-pong forever. 784 */ 785 if (unlikely((clear & TSK_RUNNING) && 786 (task->flags & PF_WQ_WORKER) && 787 wq_worker_last_func(task) == psi_avgs_work)) 788 wake_clock = false; 789 790 while ((group = iterate_groups(task, &iter))) 791 psi_group_change(group, cpu, clear, set, wake_clock); 792 } 793 794 void psi_task_switch(struct task_struct *prev, struct task_struct *next, 795 bool sleep) 796 { 797 struct psi_group *group, *common = NULL; 798 int cpu = task_cpu(prev); 799 void *iter; 800 801 if (next->pid) { 802 psi_flags_change(next, 0, TSK_ONCPU); 803 /* 804 * When moving state between tasks, the group that 805 * contains them both does not change: we can stop 806 * updating the tree once we reach the first common 807 * ancestor. Iterate @next's ancestors until we 808 * encounter @prev's state. 809 */ 810 iter = NULL; 811 while ((group = iterate_groups(next, &iter))) { 812 if (per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { 813 common = group; 814 break; 815 } 816 817 psi_group_change(group, cpu, 0, TSK_ONCPU, true); 818 } 819 } 820 821 /* 822 * If this is a voluntary sleep, dequeue will have taken care 823 * of the outgoing TSK_ONCPU alongside TSK_RUNNING already. We 824 * only need to deal with it during preemption. 825 */ 826 if (sleep) 827 return; 828 829 if (prev->pid) { 830 psi_flags_change(prev, TSK_ONCPU, 0); 831 832 iter = NULL; 833 while ((group = iterate_groups(prev, &iter)) && group != common) 834 psi_group_change(group, cpu, TSK_ONCPU, 0, true); 835 } 836 } 837 838 void psi_memstall_tick(struct task_struct *task, int cpu) 839 { 840 struct psi_group *group; 841 void *iter = NULL; 842 843 while ((group = iterate_groups(task, &iter))) { 844 struct psi_group_cpu *groupc; 845 846 groupc = per_cpu_ptr(group->pcpu, cpu); 847 write_seqcount_begin(&groupc->seq); 848 record_times(groupc, cpu, true); 849 write_seqcount_end(&groupc->seq); 850 } 851 } 852 853 /** 854 * psi_memstall_enter - mark the beginning of a memory stall section 855 * @flags: flags to handle nested sections 856 * 857 * Marks the calling task as being stalled due to a lack of memory, 858 * such as waiting for a refault or performing reclaim. 859 */ 860 void psi_memstall_enter(unsigned long *flags) 861 { 862 struct rq_flags rf; 863 struct rq *rq; 864 865 if (static_branch_likely(&psi_disabled)) 866 return; 867 868 *flags = current->in_memstall; 869 if (*flags) 870 return; 871 /* 872 * in_memstall setting & accounting needs to be atomic wrt 873 * changes to the task's scheduling state, otherwise we can 874 * race with CPU migration. 875 */ 876 rq = this_rq_lock_irq(&rf); 877 878 current->in_memstall = 1; 879 psi_task_change(current, 0, TSK_MEMSTALL); 880 881 rq_unlock_irq(rq, &rf); 882 } 883 884 /** 885 * psi_memstall_leave - mark the end of an memory stall section 886 * @flags: flags to handle nested memdelay sections 887 * 888 * Marks the calling task as no longer stalled due to lack of memory. 889 */ 890 void psi_memstall_leave(unsigned long *flags) 891 { 892 struct rq_flags rf; 893 struct rq *rq; 894 895 if (static_branch_likely(&psi_disabled)) 896 return; 897 898 if (*flags) 899 return; 900 /* 901 * in_memstall clearing & accounting needs to be atomic wrt 902 * changes to the task's scheduling state, otherwise we could 903 * race with CPU migration. 904 */ 905 rq = this_rq_lock_irq(&rf); 906 907 current->in_memstall = 0; 908 psi_task_change(current, TSK_MEMSTALL, 0); 909 910 rq_unlock_irq(rq, &rf); 911 } 912 913 #ifdef CONFIG_CGROUPS 914 int psi_cgroup_alloc(struct cgroup *cgroup) 915 { 916 if (static_branch_likely(&psi_disabled)) 917 return 0; 918 919 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); 920 if (!cgroup->psi.pcpu) 921 return -ENOMEM; 922 group_init(&cgroup->psi); 923 return 0; 924 } 925 926 void psi_cgroup_free(struct cgroup *cgroup) 927 { 928 if (static_branch_likely(&psi_disabled)) 929 return; 930 931 cancel_delayed_work_sync(&cgroup->psi.avgs_work); 932 free_percpu(cgroup->psi.pcpu); 933 /* All triggers must be removed by now */ 934 WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n"); 935 } 936 937 /** 938 * cgroup_move_task - move task to a different cgroup 939 * @task: the task 940 * @to: the target css_set 941 * 942 * Move task to a new cgroup and safely migrate its associated stall 943 * state between the different groups. 944 * 945 * This function acquires the task's rq lock to lock out concurrent 946 * changes to the task's scheduling state and - in case the task is 947 * running - concurrent changes to its stall state. 948 */ 949 void cgroup_move_task(struct task_struct *task, struct css_set *to) 950 { 951 unsigned int task_flags = 0; 952 struct rq_flags rf; 953 struct rq *rq; 954 955 if (static_branch_likely(&psi_disabled)) { 956 /* 957 * Lame to do this here, but the scheduler cannot be locked 958 * from the outside, so we move cgroups from inside sched/. 959 */ 960 rcu_assign_pointer(task->cgroups, to); 961 return; 962 } 963 964 rq = task_rq_lock(task, &rf); 965 966 if (task_on_rq_queued(task)) { 967 task_flags = TSK_RUNNING; 968 if (task_current(rq, task)) 969 task_flags |= TSK_ONCPU; 970 } else if (task->in_iowait) 971 task_flags = TSK_IOWAIT; 972 973 if (task->in_memstall) 974 task_flags |= TSK_MEMSTALL; 975 976 if (task_flags) 977 psi_task_change(task, task_flags, 0); 978 979 /* See comment above */ 980 rcu_assign_pointer(task->cgroups, to); 981 982 if (task_flags) 983 psi_task_change(task, 0, task_flags); 984 985 task_rq_unlock(rq, task, &rf); 986 } 987 #endif /* CONFIG_CGROUPS */ 988 989 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) 990 { 991 int full; 992 u64 now; 993 994 if (static_branch_likely(&psi_disabled)) 995 return -EOPNOTSUPP; 996 997 /* Update averages before reporting them */ 998 mutex_lock(&group->avgs_lock); 999 now = sched_clock(); 1000 collect_percpu_times(group, PSI_AVGS, NULL); 1001 if (now >= group->avg_next_update) 1002 group->avg_next_update = update_averages(group, now); 1003 mutex_unlock(&group->avgs_lock); 1004 1005 for (full = 0; full < 2 - (res == PSI_CPU); full++) { 1006 unsigned long avg[3]; 1007 u64 total; 1008 int w; 1009 1010 for (w = 0; w < 3; w++) 1011 avg[w] = group->avg[res * 2 + full][w]; 1012 total = div_u64(group->total[PSI_AVGS][res * 2 + full], 1013 NSEC_PER_USEC); 1014 1015 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", 1016 full ? "full" : "some", 1017 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), 1018 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), 1019 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), 1020 total); 1021 } 1022 1023 return 0; 1024 } 1025 1026 static int psi_io_show(struct seq_file *m, void *v) 1027 { 1028 return psi_show(m, &psi_system, PSI_IO); 1029 } 1030 1031 static int psi_memory_show(struct seq_file *m, void *v) 1032 { 1033 return psi_show(m, &psi_system, PSI_MEM); 1034 } 1035 1036 static int psi_cpu_show(struct seq_file *m, void *v) 1037 { 1038 return psi_show(m, &psi_system, PSI_CPU); 1039 } 1040 1041 static int psi_io_open(struct inode *inode, struct file *file) 1042 { 1043 return single_open(file, psi_io_show, NULL); 1044 } 1045 1046 static int psi_memory_open(struct inode *inode, struct file *file) 1047 { 1048 return single_open(file, psi_memory_show, NULL); 1049 } 1050 1051 static int psi_cpu_open(struct inode *inode, struct file *file) 1052 { 1053 return single_open(file, psi_cpu_show, NULL); 1054 } 1055 1056 struct psi_trigger *psi_trigger_create(struct psi_group *group, 1057 char *buf, size_t nbytes, enum psi_res res) 1058 { 1059 struct psi_trigger *t; 1060 enum psi_states state; 1061 u32 threshold_us; 1062 u32 window_us; 1063 1064 if (static_branch_likely(&psi_disabled)) 1065 return ERR_PTR(-EOPNOTSUPP); 1066 1067 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2) 1068 state = PSI_IO_SOME + res * 2; 1069 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2) 1070 state = PSI_IO_FULL + res * 2; 1071 else 1072 return ERR_PTR(-EINVAL); 1073 1074 if (state >= PSI_NONIDLE) 1075 return ERR_PTR(-EINVAL); 1076 1077 if (window_us < WINDOW_MIN_US || 1078 window_us > WINDOW_MAX_US) 1079 return ERR_PTR(-EINVAL); 1080 1081 /* Check threshold */ 1082 if (threshold_us == 0 || threshold_us > window_us) 1083 return ERR_PTR(-EINVAL); 1084 1085 t = kmalloc(sizeof(*t), GFP_KERNEL); 1086 if (!t) 1087 return ERR_PTR(-ENOMEM); 1088 1089 t->group = group; 1090 t->state = state; 1091 t->threshold = threshold_us * NSEC_PER_USEC; 1092 t->win.size = window_us * NSEC_PER_USEC; 1093 window_reset(&t->win, 0, 0, 0); 1094 1095 t->event = 0; 1096 t->last_event_time = 0; 1097 init_waitqueue_head(&t->event_wait); 1098 kref_init(&t->refcount); 1099 1100 mutex_lock(&group->trigger_lock); 1101 1102 if (!rcu_access_pointer(group->poll_kworker)) { 1103 struct sched_param param = { 1104 .sched_priority = 1, 1105 }; 1106 struct kthread_worker *kworker; 1107 1108 kworker = kthread_create_worker(0, "psimon"); 1109 if (IS_ERR(kworker)) { 1110 kfree(t); 1111 mutex_unlock(&group->trigger_lock); 1112 return ERR_CAST(kworker); 1113 } 1114 sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m); 1115 kthread_init_delayed_work(&group->poll_work, 1116 psi_poll_work); 1117 rcu_assign_pointer(group->poll_kworker, kworker); 1118 } 1119 1120 list_add(&t->node, &group->triggers); 1121 group->poll_min_period = min(group->poll_min_period, 1122 div_u64(t->win.size, UPDATES_PER_WINDOW)); 1123 group->nr_triggers[t->state]++; 1124 group->poll_states |= (1 << t->state); 1125 1126 mutex_unlock(&group->trigger_lock); 1127 1128 return t; 1129 } 1130 1131 static void psi_trigger_destroy(struct kref *ref) 1132 { 1133 struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount); 1134 struct psi_group *group = t->group; 1135 struct kthread_worker *kworker_to_destroy = NULL; 1136 1137 if (static_branch_likely(&psi_disabled)) 1138 return; 1139 1140 /* 1141 * Wakeup waiters to stop polling. Can happen if cgroup is deleted 1142 * from under a polling process. 1143 */ 1144 wake_up_interruptible(&t->event_wait); 1145 1146 mutex_lock(&group->trigger_lock); 1147 1148 if (!list_empty(&t->node)) { 1149 struct psi_trigger *tmp; 1150 u64 period = ULLONG_MAX; 1151 1152 list_del(&t->node); 1153 group->nr_triggers[t->state]--; 1154 if (!group->nr_triggers[t->state]) 1155 group->poll_states &= ~(1 << t->state); 1156 /* reset min update period for the remaining triggers */ 1157 list_for_each_entry(tmp, &group->triggers, node) 1158 period = min(period, div_u64(tmp->win.size, 1159 UPDATES_PER_WINDOW)); 1160 group->poll_min_period = period; 1161 /* Destroy poll_kworker when the last trigger is destroyed */ 1162 if (group->poll_states == 0) { 1163 group->polling_until = 0; 1164 kworker_to_destroy = rcu_dereference_protected( 1165 group->poll_kworker, 1166 lockdep_is_held(&group->trigger_lock)); 1167 rcu_assign_pointer(group->poll_kworker, NULL); 1168 } 1169 } 1170 1171 mutex_unlock(&group->trigger_lock); 1172 1173 /* 1174 * Wait for both *trigger_ptr from psi_trigger_replace and 1175 * poll_kworker RCUs to complete their read-side critical sections 1176 * before destroying the trigger and optionally the poll_kworker 1177 */ 1178 synchronize_rcu(); 1179 /* 1180 * Destroy the kworker after releasing trigger_lock to prevent a 1181 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1182 */ 1183 if (kworker_to_destroy) { 1184 /* 1185 * After the RCU grace period has expired, the worker 1186 * can no longer be found through group->poll_kworker. 1187 * But it might have been already scheduled before 1188 * that - deschedule it cleanly before destroying it. 1189 */ 1190 kthread_cancel_delayed_work_sync(&group->poll_work); 1191 atomic_set(&group->poll_scheduled, 0); 1192 1193 kthread_destroy_worker(kworker_to_destroy); 1194 } 1195 kfree(t); 1196 } 1197 1198 void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new) 1199 { 1200 struct psi_trigger *old = *trigger_ptr; 1201 1202 if (static_branch_likely(&psi_disabled)) 1203 return; 1204 1205 rcu_assign_pointer(*trigger_ptr, new); 1206 if (old) 1207 kref_put(&old->refcount, psi_trigger_destroy); 1208 } 1209 1210 __poll_t psi_trigger_poll(void **trigger_ptr, 1211 struct file *file, poll_table *wait) 1212 { 1213 __poll_t ret = DEFAULT_POLLMASK; 1214 struct psi_trigger *t; 1215 1216 if (static_branch_likely(&psi_disabled)) 1217 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; 1218 1219 rcu_read_lock(); 1220 1221 t = rcu_dereference(*(void __rcu __force **)trigger_ptr); 1222 if (!t) { 1223 rcu_read_unlock(); 1224 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; 1225 } 1226 kref_get(&t->refcount); 1227 1228 rcu_read_unlock(); 1229 1230 poll_wait(file, &t->event_wait, wait); 1231 1232 if (cmpxchg(&t->event, 1, 0) == 1) 1233 ret |= EPOLLPRI; 1234 1235 kref_put(&t->refcount, psi_trigger_destroy); 1236 1237 return ret; 1238 } 1239 1240 static ssize_t psi_write(struct file *file, const char __user *user_buf, 1241 size_t nbytes, enum psi_res res) 1242 { 1243 char buf[32]; 1244 size_t buf_size; 1245 struct seq_file *seq; 1246 struct psi_trigger *new; 1247 1248 if (static_branch_likely(&psi_disabled)) 1249 return -EOPNOTSUPP; 1250 1251 if (!nbytes) 1252 return -EINVAL; 1253 1254 buf_size = min(nbytes, sizeof(buf)); 1255 if (copy_from_user(buf, user_buf, buf_size)) 1256 return -EFAULT; 1257 1258 buf[buf_size - 1] = '\0'; 1259 1260 new = psi_trigger_create(&psi_system, buf, nbytes, res); 1261 if (IS_ERR(new)) 1262 return PTR_ERR(new); 1263 1264 seq = file->private_data; 1265 /* Take seq->lock to protect seq->private from concurrent writes */ 1266 mutex_lock(&seq->lock); 1267 psi_trigger_replace(&seq->private, new); 1268 mutex_unlock(&seq->lock); 1269 1270 return nbytes; 1271 } 1272 1273 static ssize_t psi_io_write(struct file *file, const char __user *user_buf, 1274 size_t nbytes, loff_t *ppos) 1275 { 1276 return psi_write(file, user_buf, nbytes, PSI_IO); 1277 } 1278 1279 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf, 1280 size_t nbytes, loff_t *ppos) 1281 { 1282 return psi_write(file, user_buf, nbytes, PSI_MEM); 1283 } 1284 1285 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf, 1286 size_t nbytes, loff_t *ppos) 1287 { 1288 return psi_write(file, user_buf, nbytes, PSI_CPU); 1289 } 1290 1291 static __poll_t psi_fop_poll(struct file *file, poll_table *wait) 1292 { 1293 struct seq_file *seq = file->private_data; 1294 1295 return psi_trigger_poll(&seq->private, file, wait); 1296 } 1297 1298 static int psi_fop_release(struct inode *inode, struct file *file) 1299 { 1300 struct seq_file *seq = file->private_data; 1301 1302 psi_trigger_replace(&seq->private, NULL); 1303 return single_release(inode, file); 1304 } 1305 1306 static const struct proc_ops psi_io_proc_ops = { 1307 .proc_open = psi_io_open, 1308 .proc_read = seq_read, 1309 .proc_lseek = seq_lseek, 1310 .proc_write = psi_io_write, 1311 .proc_poll = psi_fop_poll, 1312 .proc_release = psi_fop_release, 1313 }; 1314 1315 static const struct proc_ops psi_memory_proc_ops = { 1316 .proc_open = psi_memory_open, 1317 .proc_read = seq_read, 1318 .proc_lseek = seq_lseek, 1319 .proc_write = psi_memory_write, 1320 .proc_poll = psi_fop_poll, 1321 .proc_release = psi_fop_release, 1322 }; 1323 1324 static const struct proc_ops psi_cpu_proc_ops = { 1325 .proc_open = psi_cpu_open, 1326 .proc_read = seq_read, 1327 .proc_lseek = seq_lseek, 1328 .proc_write = psi_cpu_write, 1329 .proc_poll = psi_fop_poll, 1330 .proc_release = psi_fop_release, 1331 }; 1332 1333 static int __init psi_proc_init(void) 1334 { 1335 if (psi_enable) { 1336 proc_mkdir("pressure", NULL); 1337 proc_create("pressure/io", 0, NULL, &psi_io_proc_ops); 1338 proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops); 1339 proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops); 1340 } 1341 return 0; 1342 } 1343 module_init(psi_proc_init); 1344