1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Pressure stall information for CPU, memory and IO 4 * 5 * Copyright (c) 2018 Facebook, Inc. 6 * Author: Johannes Weiner <hannes@cmpxchg.org> 7 * 8 * Polling support by Suren Baghdasaryan <surenb@google.com> 9 * Copyright (c) 2018 Google, Inc. 10 * 11 * When CPU, memory and IO are contended, tasks experience delays that 12 * reduce throughput and introduce latencies into the workload. Memory 13 * and IO contention, in addition, can cause a full loss of forward 14 * progress in which the CPU goes idle. 15 * 16 * This code aggregates individual task delays into resource pressure 17 * metrics that indicate problems with both workload health and 18 * resource utilization. 19 * 20 * Model 21 * 22 * The time in which a task can execute on a CPU is our baseline for 23 * productivity. Pressure expresses the amount of time in which this 24 * potential cannot be realized due to resource contention. 25 * 26 * This concept of productivity has two components: the workload and 27 * the CPU. To measure the impact of pressure on both, we define two 28 * contention states for a resource: SOME and FULL. 29 * 30 * In the SOME state of a given resource, one or more tasks are 31 * delayed on that resource. This affects the workload's ability to 32 * perform work, but the CPU may still be executing other tasks. 33 * 34 * In the FULL state of a given resource, all non-idle tasks are 35 * delayed on that resource such that nobody is advancing and the CPU 36 * goes idle. This leaves both workload and CPU unproductive. 37 * 38 * SOME = nr_delayed_tasks != 0 39 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0 40 * 41 * What it means for a task to be productive is defined differently 42 * for each resource. For IO, productive means a running task. For 43 * memory, productive means a running task that isn't a reclaimer. For 44 * CPU, productive means an oncpu task. 45 * 46 * Naturally, the FULL state doesn't exist for the CPU resource at the 47 * system level, but exist at the cgroup level. At the cgroup level, 48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU 49 * resource which is being used by others outside of the cgroup or 50 * throttled by the cgroup cpu.max configuration. 51 * 52 * The percentage of wallclock time spent in those compound stall 53 * states gives pressure numbers between 0 and 100 for each resource, 54 * where the SOME percentage indicates workload slowdowns and the FULL 55 * percentage indicates reduced CPU utilization: 56 * 57 * %SOME = time(SOME) / period 58 * %FULL = time(FULL) / period 59 * 60 * Multiple CPUs 61 * 62 * The more tasks and available CPUs there are, the more work can be 63 * performed concurrently. This means that the potential that can go 64 * unrealized due to resource contention *also* scales with non-idle 65 * tasks and CPUs. 66 * 67 * Consider a scenario where 257 number crunching tasks are trying to 68 * run concurrently on 256 CPUs. If we simply aggregated the task 69 * states, we would have to conclude a CPU SOME pressure number of 70 * 100%, since *somebody* is waiting on a runqueue at all 71 * times. However, that is clearly not the amount of contention the 72 * workload is experiencing: only one out of 256 possible execution 73 * threads will be contended at any given time, or about 0.4%. 74 * 75 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any 76 * given time *one* of the tasks is delayed due to a lack of memory. 77 * Again, looking purely at the task state would yield a memory FULL 78 * pressure number of 0%, since *somebody* is always making forward 79 * progress. But again this wouldn't capture the amount of execution 80 * potential lost, which is 1 out of 4 CPUs, or 25%. 81 * 82 * To calculate wasted potential (pressure) with multiple processors, 83 * we have to base our calculation on the number of non-idle tasks in 84 * conjunction with the number of available CPUs, which is the number 85 * of potential execution threads. SOME becomes then the proportion of 86 * delayed tasks to possible threads, and FULL is the share of possible 87 * threads that are unproductive due to delays: 88 * 89 * threads = min(nr_nonidle_tasks, nr_cpus) 90 * SOME = min(nr_delayed_tasks / threads, 1) 91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads 92 * 93 * For the 257 number crunchers on 256 CPUs, this yields: 94 * 95 * threads = min(257, 256) 96 * SOME = min(1 / 256, 1) = 0.4% 97 * FULL = (256 - min(256, 256)) / 256 = 0% 98 * 99 * For the 1 out of 4 memory-delayed tasks, this yields: 100 * 101 * threads = min(4, 4) 102 * SOME = min(1 / 4, 1) = 25% 103 * FULL = (4 - min(3, 4)) / 4 = 25% 104 * 105 * [ Substitute nr_cpus with 1, and you can see that it's a natural 106 * extension of the single-CPU model. ] 107 * 108 * Implementation 109 * 110 * To assess the precise time spent in each such state, we would have 111 * to freeze the system on task changes and start/stop the state 112 * clocks accordingly. Obviously that doesn't scale in practice. 113 * 114 * Because the scheduler aims to distribute the compute load evenly 115 * among the available CPUs, we can track task state locally to each 116 * CPU and, at much lower frequency, extrapolate the global state for 117 * the cumulative stall times and the running averages. 118 * 119 * For each runqueue, we track: 120 * 121 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) 122 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu]) 123 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) 124 * 125 * and then periodically aggregate: 126 * 127 * tNONIDLE = sum(tNONIDLE[i]) 128 * 129 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE 130 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE 131 * 132 * %SOME = tSOME / period 133 * %FULL = tFULL / period 134 * 135 * This gives us an approximation of pressure that is practical 136 * cost-wise, yet way more sensitive and accurate than periodic 137 * sampling of the aggregate task states would be. 138 */ 139 140 static int psi_bug __read_mostly; 141 142 DEFINE_STATIC_KEY_FALSE(psi_disabled); 143 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled); 144 145 #ifdef CONFIG_PSI_DEFAULT_DISABLED 146 static bool psi_enable; 147 #else 148 static bool psi_enable = true; 149 #endif 150 static int __init setup_psi(char *str) 151 { 152 return kstrtobool(str, &psi_enable) == 0; 153 } 154 __setup("psi=", setup_psi); 155 156 /* Running averages - we need to be higher-res than loadavg */ 157 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ 158 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ 159 #define EXP_60s 1981 /* 1/exp(2s/60s) */ 160 #define EXP_300s 2034 /* 1/exp(2s/300s) */ 161 162 /* PSI trigger definitions */ 163 #define WINDOW_MIN_US 500000 /* Min window size is 500ms */ 164 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */ 165 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */ 166 167 /* Sampling frequency in nanoseconds */ 168 static u64 psi_period __read_mostly; 169 170 /* System-level pressure and stall tracking */ 171 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); 172 struct psi_group psi_system = { 173 .pcpu = &system_group_pcpu, 174 }; 175 176 static void psi_avgs_work(struct work_struct *work); 177 178 static void poll_timer_fn(struct timer_list *t); 179 180 static void group_init(struct psi_group *group) 181 { 182 int cpu; 183 184 group->enabled = true; 185 for_each_possible_cpu(cpu) 186 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); 187 group->avg_last_update = sched_clock(); 188 group->avg_next_update = group->avg_last_update + psi_period; 189 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); 190 mutex_init(&group->avgs_lock); 191 /* Init trigger-related members */ 192 mutex_init(&group->trigger_lock); 193 INIT_LIST_HEAD(&group->triggers); 194 group->poll_min_period = U32_MAX; 195 group->polling_next_update = ULLONG_MAX; 196 init_waitqueue_head(&group->poll_wait); 197 timer_setup(&group->poll_timer, poll_timer_fn, 0); 198 rcu_assign_pointer(group->poll_task, NULL); 199 } 200 201 void __init psi_init(void) 202 { 203 if (!psi_enable) { 204 static_branch_enable(&psi_disabled); 205 static_branch_disable(&psi_cgroups_enabled); 206 return; 207 } 208 209 if (!cgroup_psi_enabled()) 210 static_branch_disable(&psi_cgroups_enabled); 211 212 psi_period = jiffies_to_nsecs(PSI_FREQ); 213 group_init(&psi_system); 214 } 215 216 static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu) 217 { 218 switch (state) { 219 case PSI_IO_SOME: 220 return unlikely(tasks[NR_IOWAIT]); 221 case PSI_IO_FULL: 222 return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); 223 case PSI_MEM_SOME: 224 return unlikely(tasks[NR_MEMSTALL]); 225 case PSI_MEM_FULL: 226 return unlikely(tasks[NR_MEMSTALL] && 227 tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]); 228 case PSI_CPU_SOME: 229 return unlikely(tasks[NR_RUNNING] > oncpu); 230 case PSI_CPU_FULL: 231 return unlikely(tasks[NR_RUNNING] && !oncpu); 232 case PSI_NONIDLE: 233 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || 234 tasks[NR_RUNNING]; 235 default: 236 return false; 237 } 238 } 239 240 static void get_recent_times(struct psi_group *group, int cpu, 241 enum psi_aggregators aggregator, u32 *times, 242 u32 *pchanged_states) 243 { 244 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); 245 u64 now, state_start; 246 enum psi_states s; 247 unsigned int seq; 248 u32 state_mask; 249 250 *pchanged_states = 0; 251 252 /* Snapshot a coherent view of the CPU state */ 253 do { 254 seq = read_seqcount_begin(&groupc->seq); 255 now = cpu_clock(cpu); 256 memcpy(times, groupc->times, sizeof(groupc->times)); 257 state_mask = groupc->state_mask; 258 state_start = groupc->state_start; 259 } while (read_seqcount_retry(&groupc->seq, seq)); 260 261 /* Calculate state time deltas against the previous snapshot */ 262 for (s = 0; s < NR_PSI_STATES; s++) { 263 u32 delta; 264 /* 265 * In addition to already concluded states, we also 266 * incorporate currently active states on the CPU, 267 * since states may last for many sampling periods. 268 * 269 * This way we keep our delta sampling buckets small 270 * (u32) and our reported pressure close to what's 271 * actually happening. 272 */ 273 if (state_mask & (1 << s)) 274 times[s] += now - state_start; 275 276 delta = times[s] - groupc->times_prev[aggregator][s]; 277 groupc->times_prev[aggregator][s] = times[s]; 278 279 times[s] = delta; 280 if (delta) 281 *pchanged_states |= (1 << s); 282 } 283 } 284 285 static void calc_avgs(unsigned long avg[3], int missed_periods, 286 u64 time, u64 period) 287 { 288 unsigned long pct; 289 290 /* Fill in zeroes for periods of no activity */ 291 if (missed_periods) { 292 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); 293 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); 294 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); 295 } 296 297 /* Sample the most recent active period */ 298 pct = div_u64(time * 100, period); 299 pct *= FIXED_1; 300 avg[0] = calc_load(avg[0], EXP_10s, pct); 301 avg[1] = calc_load(avg[1], EXP_60s, pct); 302 avg[2] = calc_load(avg[2], EXP_300s, pct); 303 } 304 305 static void collect_percpu_times(struct psi_group *group, 306 enum psi_aggregators aggregator, 307 u32 *pchanged_states) 308 { 309 u64 deltas[NR_PSI_STATES - 1] = { 0, }; 310 unsigned long nonidle_total = 0; 311 u32 changed_states = 0; 312 int cpu; 313 int s; 314 315 /* 316 * Collect the per-cpu time buckets and average them into a 317 * single time sample that is normalized to wallclock time. 318 * 319 * For averaging, each CPU is weighted by its non-idle time in 320 * the sampling period. This eliminates artifacts from uneven 321 * loading, or even entirely idle CPUs. 322 */ 323 for_each_possible_cpu(cpu) { 324 u32 times[NR_PSI_STATES]; 325 u32 nonidle; 326 u32 cpu_changed_states; 327 328 get_recent_times(group, cpu, aggregator, times, 329 &cpu_changed_states); 330 changed_states |= cpu_changed_states; 331 332 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); 333 nonidle_total += nonidle; 334 335 for (s = 0; s < PSI_NONIDLE; s++) 336 deltas[s] += (u64)times[s] * nonidle; 337 } 338 339 /* 340 * Integrate the sample into the running statistics that are 341 * reported to userspace: the cumulative stall times and the 342 * decaying averages. 343 * 344 * Pressure percentages are sampled at PSI_FREQ. We might be 345 * called more often when the user polls more frequently than 346 * that; we might be called less often when there is no task 347 * activity, thus no data, and clock ticks are sporadic. The 348 * below handles both. 349 */ 350 351 /* total= */ 352 for (s = 0; s < NR_PSI_STATES - 1; s++) 353 group->total[aggregator][s] += 354 div_u64(deltas[s], max(nonidle_total, 1UL)); 355 356 if (pchanged_states) 357 *pchanged_states = changed_states; 358 } 359 360 static u64 update_averages(struct psi_group *group, u64 now) 361 { 362 unsigned long missed_periods = 0; 363 u64 expires, period; 364 u64 avg_next_update; 365 int s; 366 367 /* avgX= */ 368 expires = group->avg_next_update; 369 if (now - expires >= psi_period) 370 missed_periods = div_u64(now - expires, psi_period); 371 372 /* 373 * The periodic clock tick can get delayed for various 374 * reasons, especially on loaded systems. To avoid clock 375 * drift, we schedule the clock in fixed psi_period intervals. 376 * But the deltas we sample out of the per-cpu buckets above 377 * are based on the actual time elapsing between clock ticks. 378 */ 379 avg_next_update = expires + ((1 + missed_periods) * psi_period); 380 period = now - (group->avg_last_update + (missed_periods * psi_period)); 381 group->avg_last_update = now; 382 383 for (s = 0; s < NR_PSI_STATES - 1; s++) { 384 u32 sample; 385 386 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; 387 /* 388 * Due to the lockless sampling of the time buckets, 389 * recorded time deltas can slip into the next period, 390 * which under full pressure can result in samples in 391 * excess of the period length. 392 * 393 * We don't want to report non-sensical pressures in 394 * excess of 100%, nor do we want to drop such events 395 * on the floor. Instead we punt any overage into the 396 * future until pressure subsides. By doing this we 397 * don't underreport the occurring pressure curve, we 398 * just report it delayed by one period length. 399 * 400 * The error isn't cumulative. As soon as another 401 * delta slips from a period P to P+1, by definition 402 * it frees up its time T in P. 403 */ 404 if (sample > period) 405 sample = period; 406 group->avg_total[s] += sample; 407 calc_avgs(group->avg[s], missed_periods, sample, period); 408 } 409 410 return avg_next_update; 411 } 412 413 static void psi_avgs_work(struct work_struct *work) 414 { 415 struct delayed_work *dwork; 416 struct psi_group *group; 417 u32 changed_states; 418 bool nonidle; 419 u64 now; 420 421 dwork = to_delayed_work(work); 422 group = container_of(dwork, struct psi_group, avgs_work); 423 424 mutex_lock(&group->avgs_lock); 425 426 now = sched_clock(); 427 428 collect_percpu_times(group, PSI_AVGS, &changed_states); 429 nonidle = changed_states & (1 << PSI_NONIDLE); 430 /* 431 * If there is task activity, periodically fold the per-cpu 432 * times and feed samples into the running averages. If things 433 * are idle and there is no data to process, stop the clock. 434 * Once restarted, we'll catch up the running averages in one 435 * go - see calc_avgs() and missed_periods. 436 */ 437 if (now >= group->avg_next_update) 438 group->avg_next_update = update_averages(group, now); 439 440 if (nonidle) { 441 schedule_delayed_work(dwork, nsecs_to_jiffies( 442 group->avg_next_update - now) + 1); 443 } 444 445 mutex_unlock(&group->avgs_lock); 446 } 447 448 /* Trigger tracking window manipulations */ 449 static void window_reset(struct psi_window *win, u64 now, u64 value, 450 u64 prev_growth) 451 { 452 win->start_time = now; 453 win->start_value = value; 454 win->prev_growth = prev_growth; 455 } 456 457 /* 458 * PSI growth tracking window update and growth calculation routine. 459 * 460 * This approximates a sliding tracking window by interpolating 461 * partially elapsed windows using historical growth data from the 462 * previous intervals. This minimizes memory requirements (by not storing 463 * all the intermediate values in the previous window) and simplifies 464 * the calculations. It works well because PSI signal changes only in 465 * positive direction and over relatively small window sizes the growth 466 * is close to linear. 467 */ 468 static u64 window_update(struct psi_window *win, u64 now, u64 value) 469 { 470 u64 elapsed; 471 u64 growth; 472 473 elapsed = now - win->start_time; 474 growth = value - win->start_value; 475 /* 476 * After each tracking window passes win->start_value and 477 * win->start_time get reset and win->prev_growth stores 478 * the average per-window growth of the previous window. 479 * win->prev_growth is then used to interpolate additional 480 * growth from the previous window assuming it was linear. 481 */ 482 if (elapsed > win->size) 483 window_reset(win, now, value, growth); 484 else { 485 u32 remaining; 486 487 remaining = win->size - elapsed; 488 growth += div64_u64(win->prev_growth * remaining, win->size); 489 } 490 491 return growth; 492 } 493 494 static void init_triggers(struct psi_group *group, u64 now) 495 { 496 struct psi_trigger *t; 497 498 list_for_each_entry(t, &group->triggers, node) 499 window_reset(&t->win, now, 500 group->total[PSI_POLL][t->state], 0); 501 memcpy(group->polling_total, group->total[PSI_POLL], 502 sizeof(group->polling_total)); 503 group->polling_next_update = now + group->poll_min_period; 504 } 505 506 static u64 update_triggers(struct psi_group *group, u64 now) 507 { 508 struct psi_trigger *t; 509 bool update_total = false; 510 u64 *total = group->total[PSI_POLL]; 511 512 /* 513 * On subsequent updates, calculate growth deltas and let 514 * watchers know when their specified thresholds are exceeded. 515 */ 516 list_for_each_entry(t, &group->triggers, node) { 517 u64 growth; 518 bool new_stall; 519 520 new_stall = group->polling_total[t->state] != total[t->state]; 521 522 /* Check for stall activity or a previous threshold breach */ 523 if (!new_stall && !t->pending_event) 524 continue; 525 /* 526 * Check for new stall activity, as well as deferred 527 * events that occurred in the last window after the 528 * trigger had already fired (we want to ratelimit 529 * events without dropping any). 530 */ 531 if (new_stall) { 532 /* 533 * Multiple triggers might be looking at the same state, 534 * remember to update group->polling_total[] once we've 535 * been through all of them. Also remember to extend the 536 * polling time if we see new stall activity. 537 */ 538 update_total = true; 539 540 /* Calculate growth since last update */ 541 growth = window_update(&t->win, now, total[t->state]); 542 if (growth < t->threshold) 543 continue; 544 545 t->pending_event = true; 546 } 547 /* Limit event signaling to once per window */ 548 if (now < t->last_event_time + t->win.size) 549 continue; 550 551 /* Generate an event */ 552 if (cmpxchg(&t->event, 0, 1) == 0) 553 wake_up_interruptible(&t->event_wait); 554 t->last_event_time = now; 555 /* Reset threshold breach flag once event got generated */ 556 t->pending_event = false; 557 } 558 559 if (update_total) 560 memcpy(group->polling_total, total, 561 sizeof(group->polling_total)); 562 563 return now + group->poll_min_period; 564 } 565 566 /* Schedule polling if it's not already scheduled. */ 567 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) 568 { 569 struct task_struct *task; 570 571 /* 572 * Do not reschedule if already scheduled. 573 * Possible race with a timer scheduled after this check but before 574 * mod_timer below can be tolerated because group->polling_next_update 575 * will keep updates on schedule. 576 */ 577 if (timer_pending(&group->poll_timer)) 578 return; 579 580 rcu_read_lock(); 581 582 task = rcu_dereference(group->poll_task); 583 /* 584 * kworker might be NULL in case psi_trigger_destroy races with 585 * psi_task_change (hotpath) which can't use locks 586 */ 587 if (likely(task)) 588 mod_timer(&group->poll_timer, jiffies + delay); 589 590 rcu_read_unlock(); 591 } 592 593 static void psi_poll_work(struct psi_group *group) 594 { 595 u32 changed_states; 596 u64 now; 597 598 mutex_lock(&group->trigger_lock); 599 600 now = sched_clock(); 601 602 collect_percpu_times(group, PSI_POLL, &changed_states); 603 604 if (changed_states & group->poll_states) { 605 /* Initialize trigger windows when entering polling mode */ 606 if (now > group->polling_until) 607 init_triggers(group, now); 608 609 /* 610 * Keep the monitor active for at least the duration of the 611 * minimum tracking window as long as monitor states are 612 * changing. 613 */ 614 group->polling_until = now + 615 group->poll_min_period * UPDATES_PER_WINDOW; 616 } 617 618 if (now > group->polling_until) { 619 group->polling_next_update = ULLONG_MAX; 620 goto out; 621 } 622 623 if (now >= group->polling_next_update) 624 group->polling_next_update = update_triggers(group, now); 625 626 psi_schedule_poll_work(group, 627 nsecs_to_jiffies(group->polling_next_update - now) + 1); 628 629 out: 630 mutex_unlock(&group->trigger_lock); 631 } 632 633 static int psi_poll_worker(void *data) 634 { 635 struct psi_group *group = (struct psi_group *)data; 636 637 sched_set_fifo_low(current); 638 639 while (true) { 640 wait_event_interruptible(group->poll_wait, 641 atomic_cmpxchg(&group->poll_wakeup, 1, 0) || 642 kthread_should_stop()); 643 if (kthread_should_stop()) 644 break; 645 646 psi_poll_work(group); 647 } 648 return 0; 649 } 650 651 static void poll_timer_fn(struct timer_list *t) 652 { 653 struct psi_group *group = from_timer(group, t, poll_timer); 654 655 atomic_set(&group->poll_wakeup, 1); 656 wake_up_interruptible(&group->poll_wait); 657 } 658 659 static void record_times(struct psi_group_cpu *groupc, u64 now) 660 { 661 u32 delta; 662 663 delta = now - groupc->state_start; 664 groupc->state_start = now; 665 666 if (groupc->state_mask & (1 << PSI_IO_SOME)) { 667 groupc->times[PSI_IO_SOME] += delta; 668 if (groupc->state_mask & (1 << PSI_IO_FULL)) 669 groupc->times[PSI_IO_FULL] += delta; 670 } 671 672 if (groupc->state_mask & (1 << PSI_MEM_SOME)) { 673 groupc->times[PSI_MEM_SOME] += delta; 674 if (groupc->state_mask & (1 << PSI_MEM_FULL)) 675 groupc->times[PSI_MEM_FULL] += delta; 676 } 677 678 if (groupc->state_mask & (1 << PSI_CPU_SOME)) { 679 groupc->times[PSI_CPU_SOME] += delta; 680 if (groupc->state_mask & (1 << PSI_CPU_FULL)) 681 groupc->times[PSI_CPU_FULL] += delta; 682 } 683 684 if (groupc->state_mask & (1 << PSI_NONIDLE)) 685 groupc->times[PSI_NONIDLE] += delta; 686 } 687 688 static void psi_group_change(struct psi_group *group, int cpu, 689 unsigned int clear, unsigned int set, u64 now, 690 bool wake_clock) 691 { 692 struct psi_group_cpu *groupc; 693 unsigned int t, m; 694 enum psi_states s; 695 u32 state_mask; 696 697 groupc = per_cpu_ptr(group->pcpu, cpu); 698 699 /* 700 * First we update the task counts according to the state 701 * change requested through the @clear and @set bits. 702 * 703 * Then if the cgroup PSI stats accounting enabled, we 704 * assess the aggregate resource states this CPU's tasks 705 * have been in since the last change, and account any 706 * SOME and FULL time these may have resulted in. 707 */ 708 write_seqcount_begin(&groupc->seq); 709 710 /* 711 * Start with TSK_ONCPU, which doesn't have a corresponding 712 * task count - it's just a boolean flag directly encoded in 713 * the state mask. Clear, set, or carry the current state if 714 * no changes are requested. 715 */ 716 if (unlikely(clear & TSK_ONCPU)) { 717 state_mask = 0; 718 clear &= ~TSK_ONCPU; 719 } else if (unlikely(set & TSK_ONCPU)) { 720 state_mask = PSI_ONCPU; 721 set &= ~TSK_ONCPU; 722 } else { 723 state_mask = groupc->state_mask & PSI_ONCPU; 724 } 725 726 /* 727 * The rest of the state mask is calculated based on the task 728 * counts. Update those first, then construct the mask. 729 */ 730 for (t = 0, m = clear; m; m &= ~(1 << t), t++) { 731 if (!(m & (1 << t))) 732 continue; 733 if (groupc->tasks[t]) { 734 groupc->tasks[t]--; 735 } else if (!psi_bug) { 736 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n", 737 cpu, t, groupc->tasks[0], 738 groupc->tasks[1], groupc->tasks[2], 739 groupc->tasks[3], clear, set); 740 psi_bug = 1; 741 } 742 } 743 744 for (t = 0; set; set &= ~(1 << t), t++) 745 if (set & (1 << t)) 746 groupc->tasks[t]++; 747 748 if (!group->enabled) { 749 /* 750 * On the first group change after disabling PSI, conclude 751 * the current state and flush its time. This is unlikely 752 * to matter to the user, but aggregation (get_recent_times) 753 * may have already incorporated the live state into times_prev; 754 * avoid a delta sample underflow when PSI is later re-enabled. 755 */ 756 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE))) 757 record_times(groupc, now); 758 759 groupc->state_mask = state_mask; 760 761 write_seqcount_end(&groupc->seq); 762 return; 763 } 764 765 for (s = 0; s < NR_PSI_STATES; s++) { 766 if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU)) 767 state_mask |= (1 << s); 768 } 769 770 /* 771 * Since we care about lost potential, a memstall is FULL 772 * when there are no other working tasks, but also when 773 * the CPU is actively reclaiming and nothing productive 774 * could run even if it were runnable. So when the current 775 * task in a cgroup is in_memstall, the corresponding groupc 776 * on that cpu is in PSI_MEM_FULL state. 777 */ 778 if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall)) 779 state_mask |= (1 << PSI_MEM_FULL); 780 781 record_times(groupc, now); 782 783 groupc->state_mask = state_mask; 784 785 write_seqcount_end(&groupc->seq); 786 787 if (state_mask & group->poll_states) 788 psi_schedule_poll_work(group, 1); 789 790 if (wake_clock && !delayed_work_pending(&group->avgs_work)) 791 schedule_delayed_work(&group->avgs_work, PSI_FREQ); 792 } 793 794 static inline struct psi_group *task_psi_group(struct task_struct *task) 795 { 796 #ifdef CONFIG_CGROUPS 797 if (static_branch_likely(&psi_cgroups_enabled)) 798 return cgroup_psi(task_dfl_cgroup(task)); 799 #endif 800 return &psi_system; 801 } 802 803 static void psi_flags_change(struct task_struct *task, int clear, int set) 804 { 805 if (((task->psi_flags & set) || 806 (task->psi_flags & clear) != clear) && 807 !psi_bug) { 808 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", 809 task->pid, task->comm, task_cpu(task), 810 task->psi_flags, clear, set); 811 psi_bug = 1; 812 } 813 814 task->psi_flags &= ~clear; 815 task->psi_flags |= set; 816 } 817 818 void psi_task_change(struct task_struct *task, int clear, int set) 819 { 820 int cpu = task_cpu(task); 821 struct psi_group *group; 822 u64 now; 823 824 if (!task->pid) 825 return; 826 827 psi_flags_change(task, clear, set); 828 829 now = cpu_clock(cpu); 830 831 group = task_psi_group(task); 832 do { 833 psi_group_change(group, cpu, clear, set, now, true); 834 } while ((group = group->parent)); 835 } 836 837 void psi_task_switch(struct task_struct *prev, struct task_struct *next, 838 bool sleep) 839 { 840 struct psi_group *group, *common = NULL; 841 int cpu = task_cpu(prev); 842 u64 now = cpu_clock(cpu); 843 844 if (next->pid) { 845 psi_flags_change(next, 0, TSK_ONCPU); 846 /* 847 * Set TSK_ONCPU on @next's cgroups. If @next shares any 848 * ancestors with @prev, those will already have @prev's 849 * TSK_ONCPU bit set, and we can stop the iteration there. 850 */ 851 group = task_psi_group(next); 852 do { 853 if (per_cpu_ptr(group->pcpu, cpu)->state_mask & 854 PSI_ONCPU) { 855 common = group; 856 break; 857 } 858 859 psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); 860 } while ((group = group->parent)); 861 } 862 863 if (prev->pid) { 864 int clear = TSK_ONCPU, set = 0; 865 bool wake_clock = true; 866 867 /* 868 * When we're going to sleep, psi_dequeue() lets us 869 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and 870 * TSK_IOWAIT here, where we can combine it with 871 * TSK_ONCPU and save walking common ancestors twice. 872 */ 873 if (sleep) { 874 clear |= TSK_RUNNING; 875 if (prev->in_memstall) 876 clear |= TSK_MEMSTALL_RUNNING; 877 if (prev->in_iowait) 878 set |= TSK_IOWAIT; 879 880 /* 881 * Periodic aggregation shuts off if there is a period of no 882 * task changes, so we wake it back up if necessary. However, 883 * don't do this if the task change is the aggregation worker 884 * itself going to sleep, or we'll ping-pong forever. 885 */ 886 if (unlikely((prev->flags & PF_WQ_WORKER) && 887 wq_worker_last_func(prev) == psi_avgs_work)) 888 wake_clock = false; 889 } 890 891 psi_flags_change(prev, clear, set); 892 893 group = task_psi_group(prev); 894 do { 895 if (group == common) 896 break; 897 psi_group_change(group, cpu, clear, set, now, wake_clock); 898 } while ((group = group->parent)); 899 900 /* 901 * TSK_ONCPU is handled up to the common ancestor. If there are 902 * any other differences between the two tasks (e.g. prev goes 903 * to sleep, or only one task is memstall), finish propagating 904 * those differences all the way up to the root. 905 */ 906 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { 907 clear &= ~TSK_ONCPU; 908 for (; group; group = group->parent) 909 psi_group_change(group, cpu, clear, set, now, wake_clock); 910 } 911 } 912 } 913 914 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 915 void psi_account_irqtime(struct task_struct *task, u32 delta) 916 { 917 int cpu = task_cpu(task); 918 struct psi_group *group; 919 struct psi_group_cpu *groupc; 920 u64 now; 921 922 if (!task->pid) 923 return; 924 925 now = cpu_clock(cpu); 926 927 group = task_psi_group(task); 928 do { 929 if (!group->enabled) 930 continue; 931 932 groupc = per_cpu_ptr(group->pcpu, cpu); 933 934 write_seqcount_begin(&groupc->seq); 935 936 record_times(groupc, now); 937 groupc->times[PSI_IRQ_FULL] += delta; 938 939 write_seqcount_end(&groupc->seq); 940 941 if (group->poll_states & (1 << PSI_IRQ_FULL)) 942 psi_schedule_poll_work(group, 1); 943 } while ((group = group->parent)); 944 } 945 #endif 946 947 /** 948 * psi_memstall_enter - mark the beginning of a memory stall section 949 * @flags: flags to handle nested sections 950 * 951 * Marks the calling task as being stalled due to a lack of memory, 952 * such as waiting for a refault or performing reclaim. 953 */ 954 void psi_memstall_enter(unsigned long *flags) 955 { 956 struct rq_flags rf; 957 struct rq *rq; 958 959 if (static_branch_likely(&psi_disabled)) 960 return; 961 962 *flags = current->in_memstall; 963 if (*flags) 964 return; 965 /* 966 * in_memstall setting & accounting needs to be atomic wrt 967 * changes to the task's scheduling state, otherwise we can 968 * race with CPU migration. 969 */ 970 rq = this_rq_lock_irq(&rf); 971 972 current->in_memstall = 1; 973 psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); 974 975 rq_unlock_irq(rq, &rf); 976 } 977 EXPORT_SYMBOL_GPL(psi_memstall_enter); 978 979 /** 980 * psi_memstall_leave - mark the end of an memory stall section 981 * @flags: flags to handle nested memdelay sections 982 * 983 * Marks the calling task as no longer stalled due to lack of memory. 984 */ 985 void psi_memstall_leave(unsigned long *flags) 986 { 987 struct rq_flags rf; 988 struct rq *rq; 989 990 if (static_branch_likely(&psi_disabled)) 991 return; 992 993 if (*flags) 994 return; 995 /* 996 * in_memstall clearing & accounting needs to be atomic wrt 997 * changes to the task's scheduling state, otherwise we could 998 * race with CPU migration. 999 */ 1000 rq = this_rq_lock_irq(&rf); 1001 1002 current->in_memstall = 0; 1003 psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0); 1004 1005 rq_unlock_irq(rq, &rf); 1006 } 1007 EXPORT_SYMBOL_GPL(psi_memstall_leave); 1008 1009 #ifdef CONFIG_CGROUPS 1010 int psi_cgroup_alloc(struct cgroup *cgroup) 1011 { 1012 if (!static_branch_likely(&psi_cgroups_enabled)) 1013 return 0; 1014 1015 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL); 1016 if (!cgroup->psi) 1017 return -ENOMEM; 1018 1019 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu); 1020 if (!cgroup->psi->pcpu) { 1021 kfree(cgroup->psi); 1022 return -ENOMEM; 1023 } 1024 group_init(cgroup->psi); 1025 cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup)); 1026 return 0; 1027 } 1028 1029 void psi_cgroup_free(struct cgroup *cgroup) 1030 { 1031 if (!static_branch_likely(&psi_cgroups_enabled)) 1032 return; 1033 1034 cancel_delayed_work_sync(&cgroup->psi->avgs_work); 1035 free_percpu(cgroup->psi->pcpu); 1036 /* All triggers must be removed by now */ 1037 WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n"); 1038 kfree(cgroup->psi); 1039 } 1040 1041 /** 1042 * cgroup_move_task - move task to a different cgroup 1043 * @task: the task 1044 * @to: the target css_set 1045 * 1046 * Move task to a new cgroup and safely migrate its associated stall 1047 * state between the different groups. 1048 * 1049 * This function acquires the task's rq lock to lock out concurrent 1050 * changes to the task's scheduling state and - in case the task is 1051 * running - concurrent changes to its stall state. 1052 */ 1053 void cgroup_move_task(struct task_struct *task, struct css_set *to) 1054 { 1055 unsigned int task_flags; 1056 struct rq_flags rf; 1057 struct rq *rq; 1058 1059 if (!static_branch_likely(&psi_cgroups_enabled)) { 1060 /* 1061 * Lame to do this here, but the scheduler cannot be locked 1062 * from the outside, so we move cgroups from inside sched/. 1063 */ 1064 rcu_assign_pointer(task->cgroups, to); 1065 return; 1066 } 1067 1068 rq = task_rq_lock(task, &rf); 1069 1070 /* 1071 * We may race with schedule() dropping the rq lock between 1072 * deactivating prev and switching to next. Because the psi 1073 * updates from the deactivation are deferred to the switch 1074 * callback to save cgroup tree updates, the task's scheduling 1075 * state here is not coherent with its psi state: 1076 * 1077 * schedule() cgroup_move_task() 1078 * rq_lock() 1079 * deactivate_task() 1080 * p->on_rq = 0 1081 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates 1082 * pick_next_task() 1083 * rq_unlock() 1084 * rq_lock() 1085 * psi_task_change() // old cgroup 1086 * task->cgroups = to 1087 * psi_task_change() // new cgroup 1088 * rq_unlock() 1089 * rq_lock() 1090 * psi_sched_switch() // does deferred updates in new cgroup 1091 * 1092 * Don't rely on the scheduling state. Use psi_flags instead. 1093 */ 1094 task_flags = task->psi_flags; 1095 1096 if (task_flags) 1097 psi_task_change(task, task_flags, 0); 1098 1099 /* See comment above */ 1100 rcu_assign_pointer(task->cgroups, to); 1101 1102 if (task_flags) 1103 psi_task_change(task, 0, task_flags); 1104 1105 task_rq_unlock(rq, task, &rf); 1106 } 1107 1108 void psi_cgroup_restart(struct psi_group *group) 1109 { 1110 int cpu; 1111 1112 /* 1113 * After we disable psi_group->enabled, we don't actually 1114 * stop percpu tasks accounting in each psi_group_cpu, 1115 * instead only stop test_state() loop, record_times() 1116 * and averaging worker, see psi_group_change() for details. 1117 * 1118 * When disable cgroup PSI, this function has nothing to sync 1119 * since cgroup pressure files are hidden and percpu psi_group_cpu 1120 * would see !psi_group->enabled and only do task accounting. 1121 * 1122 * When re-enable cgroup PSI, this function use psi_group_change() 1123 * to get correct state mask from test_state() loop on tasks[], 1124 * and restart groupc->state_start from now, use .clear = .set = 0 1125 * here since no task status really changed. 1126 */ 1127 if (!group->enabled) 1128 return; 1129 1130 for_each_possible_cpu(cpu) { 1131 struct rq *rq = cpu_rq(cpu); 1132 struct rq_flags rf; 1133 u64 now; 1134 1135 rq_lock_irq(rq, &rf); 1136 now = cpu_clock(cpu); 1137 psi_group_change(group, cpu, 0, 0, now, true); 1138 rq_unlock_irq(rq, &rf); 1139 } 1140 } 1141 #endif /* CONFIG_CGROUPS */ 1142 1143 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) 1144 { 1145 bool only_full = false; 1146 int full; 1147 u64 now; 1148 1149 if (static_branch_likely(&psi_disabled)) 1150 return -EOPNOTSUPP; 1151 1152 /* Update averages before reporting them */ 1153 mutex_lock(&group->avgs_lock); 1154 now = sched_clock(); 1155 collect_percpu_times(group, PSI_AVGS, NULL); 1156 if (now >= group->avg_next_update) 1157 group->avg_next_update = update_averages(group, now); 1158 mutex_unlock(&group->avgs_lock); 1159 1160 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1161 only_full = res == PSI_IRQ; 1162 #endif 1163 1164 for (full = 0; full < 2 - only_full; full++) { 1165 unsigned long avg[3] = { 0, }; 1166 u64 total = 0; 1167 int w; 1168 1169 /* CPU FULL is undefined at the system level */ 1170 if (!(group == &psi_system && res == PSI_CPU && full)) { 1171 for (w = 0; w < 3; w++) 1172 avg[w] = group->avg[res * 2 + full][w]; 1173 total = div_u64(group->total[PSI_AVGS][res * 2 + full], 1174 NSEC_PER_USEC); 1175 } 1176 1177 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", 1178 full || only_full ? "full" : "some", 1179 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), 1180 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), 1181 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), 1182 total); 1183 } 1184 1185 return 0; 1186 } 1187 1188 struct psi_trigger *psi_trigger_create(struct psi_group *group, 1189 char *buf, enum psi_res res) 1190 { 1191 struct psi_trigger *t; 1192 enum psi_states state; 1193 u32 threshold_us; 1194 u32 window_us; 1195 1196 if (static_branch_likely(&psi_disabled)) 1197 return ERR_PTR(-EOPNOTSUPP); 1198 1199 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2) 1200 state = PSI_IO_SOME + res * 2; 1201 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2) 1202 state = PSI_IO_FULL + res * 2; 1203 else 1204 return ERR_PTR(-EINVAL); 1205 1206 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1207 if (res == PSI_IRQ && --state != PSI_IRQ_FULL) 1208 return ERR_PTR(-EINVAL); 1209 #endif 1210 1211 if (state >= PSI_NONIDLE) 1212 return ERR_PTR(-EINVAL); 1213 1214 if (window_us < WINDOW_MIN_US || 1215 window_us > WINDOW_MAX_US) 1216 return ERR_PTR(-EINVAL); 1217 1218 /* Check threshold */ 1219 if (threshold_us == 0 || threshold_us > window_us) 1220 return ERR_PTR(-EINVAL); 1221 1222 t = kmalloc(sizeof(*t), GFP_KERNEL); 1223 if (!t) 1224 return ERR_PTR(-ENOMEM); 1225 1226 t->group = group; 1227 t->state = state; 1228 t->threshold = threshold_us * NSEC_PER_USEC; 1229 t->win.size = window_us * NSEC_PER_USEC; 1230 window_reset(&t->win, sched_clock(), 1231 group->total[PSI_POLL][t->state], 0); 1232 1233 t->event = 0; 1234 t->last_event_time = 0; 1235 init_waitqueue_head(&t->event_wait); 1236 t->pending_event = false; 1237 1238 mutex_lock(&group->trigger_lock); 1239 1240 if (!rcu_access_pointer(group->poll_task)) { 1241 struct task_struct *task; 1242 1243 task = kthread_create(psi_poll_worker, group, "psimon"); 1244 if (IS_ERR(task)) { 1245 kfree(t); 1246 mutex_unlock(&group->trigger_lock); 1247 return ERR_CAST(task); 1248 } 1249 atomic_set(&group->poll_wakeup, 0); 1250 wake_up_process(task); 1251 rcu_assign_pointer(group->poll_task, task); 1252 } 1253 1254 list_add(&t->node, &group->triggers); 1255 group->poll_min_period = min(group->poll_min_period, 1256 div_u64(t->win.size, UPDATES_PER_WINDOW)); 1257 group->nr_triggers[t->state]++; 1258 group->poll_states |= (1 << t->state); 1259 1260 mutex_unlock(&group->trigger_lock); 1261 1262 return t; 1263 } 1264 1265 void psi_trigger_destroy(struct psi_trigger *t) 1266 { 1267 struct psi_group *group; 1268 struct task_struct *task_to_destroy = NULL; 1269 1270 /* 1271 * We do not check psi_disabled since it might have been disabled after 1272 * the trigger got created. 1273 */ 1274 if (!t) 1275 return; 1276 1277 group = t->group; 1278 /* 1279 * Wakeup waiters to stop polling. Can happen if cgroup is deleted 1280 * from under a polling process. 1281 */ 1282 wake_up_interruptible(&t->event_wait); 1283 1284 mutex_lock(&group->trigger_lock); 1285 1286 if (!list_empty(&t->node)) { 1287 struct psi_trigger *tmp; 1288 u64 period = ULLONG_MAX; 1289 1290 list_del(&t->node); 1291 group->nr_triggers[t->state]--; 1292 if (!group->nr_triggers[t->state]) 1293 group->poll_states &= ~(1 << t->state); 1294 /* reset min update period for the remaining triggers */ 1295 list_for_each_entry(tmp, &group->triggers, node) 1296 period = min(period, div_u64(tmp->win.size, 1297 UPDATES_PER_WINDOW)); 1298 group->poll_min_period = period; 1299 /* Destroy poll_task when the last trigger is destroyed */ 1300 if (group->poll_states == 0) { 1301 group->polling_until = 0; 1302 task_to_destroy = rcu_dereference_protected( 1303 group->poll_task, 1304 lockdep_is_held(&group->trigger_lock)); 1305 rcu_assign_pointer(group->poll_task, NULL); 1306 del_timer(&group->poll_timer); 1307 } 1308 } 1309 1310 mutex_unlock(&group->trigger_lock); 1311 1312 /* 1313 * Wait for psi_schedule_poll_work RCU to complete its read-side 1314 * critical section before destroying the trigger and optionally the 1315 * poll_task. 1316 */ 1317 synchronize_rcu(); 1318 /* 1319 * Stop kthread 'psimon' after releasing trigger_lock to prevent a 1320 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1321 */ 1322 if (task_to_destroy) { 1323 /* 1324 * After the RCU grace period has expired, the worker 1325 * can no longer be found through group->poll_task. 1326 */ 1327 kthread_stop(task_to_destroy); 1328 } 1329 kfree(t); 1330 } 1331 1332 __poll_t psi_trigger_poll(void **trigger_ptr, 1333 struct file *file, poll_table *wait) 1334 { 1335 __poll_t ret = DEFAULT_POLLMASK; 1336 struct psi_trigger *t; 1337 1338 if (static_branch_likely(&psi_disabled)) 1339 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; 1340 1341 t = smp_load_acquire(trigger_ptr); 1342 if (!t) 1343 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; 1344 1345 poll_wait(file, &t->event_wait, wait); 1346 1347 if (cmpxchg(&t->event, 1, 0) == 1) 1348 ret |= EPOLLPRI; 1349 1350 return ret; 1351 } 1352 1353 #ifdef CONFIG_PROC_FS 1354 static int psi_io_show(struct seq_file *m, void *v) 1355 { 1356 return psi_show(m, &psi_system, PSI_IO); 1357 } 1358 1359 static int psi_memory_show(struct seq_file *m, void *v) 1360 { 1361 return psi_show(m, &psi_system, PSI_MEM); 1362 } 1363 1364 static int psi_cpu_show(struct seq_file *m, void *v) 1365 { 1366 return psi_show(m, &psi_system, PSI_CPU); 1367 } 1368 1369 static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *)) 1370 { 1371 if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE)) 1372 return -EPERM; 1373 1374 return single_open(file, psi_show, NULL); 1375 } 1376 1377 static int psi_io_open(struct inode *inode, struct file *file) 1378 { 1379 return psi_open(file, psi_io_show); 1380 } 1381 1382 static int psi_memory_open(struct inode *inode, struct file *file) 1383 { 1384 return psi_open(file, psi_memory_show); 1385 } 1386 1387 static int psi_cpu_open(struct inode *inode, struct file *file) 1388 { 1389 return psi_open(file, psi_cpu_show); 1390 } 1391 1392 static ssize_t psi_write(struct file *file, const char __user *user_buf, 1393 size_t nbytes, enum psi_res res) 1394 { 1395 char buf[32]; 1396 size_t buf_size; 1397 struct seq_file *seq; 1398 struct psi_trigger *new; 1399 1400 if (static_branch_likely(&psi_disabled)) 1401 return -EOPNOTSUPP; 1402 1403 if (!nbytes) 1404 return -EINVAL; 1405 1406 buf_size = min(nbytes, sizeof(buf)); 1407 if (copy_from_user(buf, user_buf, buf_size)) 1408 return -EFAULT; 1409 1410 buf[buf_size - 1] = '\0'; 1411 1412 seq = file->private_data; 1413 1414 /* Take seq->lock to protect seq->private from concurrent writes */ 1415 mutex_lock(&seq->lock); 1416 1417 /* Allow only one trigger per file descriptor */ 1418 if (seq->private) { 1419 mutex_unlock(&seq->lock); 1420 return -EBUSY; 1421 } 1422 1423 new = psi_trigger_create(&psi_system, buf, res); 1424 if (IS_ERR(new)) { 1425 mutex_unlock(&seq->lock); 1426 return PTR_ERR(new); 1427 } 1428 1429 smp_store_release(&seq->private, new); 1430 mutex_unlock(&seq->lock); 1431 1432 return nbytes; 1433 } 1434 1435 static ssize_t psi_io_write(struct file *file, const char __user *user_buf, 1436 size_t nbytes, loff_t *ppos) 1437 { 1438 return psi_write(file, user_buf, nbytes, PSI_IO); 1439 } 1440 1441 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf, 1442 size_t nbytes, loff_t *ppos) 1443 { 1444 return psi_write(file, user_buf, nbytes, PSI_MEM); 1445 } 1446 1447 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf, 1448 size_t nbytes, loff_t *ppos) 1449 { 1450 return psi_write(file, user_buf, nbytes, PSI_CPU); 1451 } 1452 1453 static __poll_t psi_fop_poll(struct file *file, poll_table *wait) 1454 { 1455 struct seq_file *seq = file->private_data; 1456 1457 return psi_trigger_poll(&seq->private, file, wait); 1458 } 1459 1460 static int psi_fop_release(struct inode *inode, struct file *file) 1461 { 1462 struct seq_file *seq = file->private_data; 1463 1464 psi_trigger_destroy(seq->private); 1465 return single_release(inode, file); 1466 } 1467 1468 static const struct proc_ops psi_io_proc_ops = { 1469 .proc_open = psi_io_open, 1470 .proc_read = seq_read, 1471 .proc_lseek = seq_lseek, 1472 .proc_write = psi_io_write, 1473 .proc_poll = psi_fop_poll, 1474 .proc_release = psi_fop_release, 1475 }; 1476 1477 static const struct proc_ops psi_memory_proc_ops = { 1478 .proc_open = psi_memory_open, 1479 .proc_read = seq_read, 1480 .proc_lseek = seq_lseek, 1481 .proc_write = psi_memory_write, 1482 .proc_poll = psi_fop_poll, 1483 .proc_release = psi_fop_release, 1484 }; 1485 1486 static const struct proc_ops psi_cpu_proc_ops = { 1487 .proc_open = psi_cpu_open, 1488 .proc_read = seq_read, 1489 .proc_lseek = seq_lseek, 1490 .proc_write = psi_cpu_write, 1491 .proc_poll = psi_fop_poll, 1492 .proc_release = psi_fop_release, 1493 }; 1494 1495 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1496 static int psi_irq_show(struct seq_file *m, void *v) 1497 { 1498 return psi_show(m, &psi_system, PSI_IRQ); 1499 } 1500 1501 static int psi_irq_open(struct inode *inode, struct file *file) 1502 { 1503 return psi_open(file, psi_irq_show); 1504 } 1505 1506 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf, 1507 size_t nbytes, loff_t *ppos) 1508 { 1509 return psi_write(file, user_buf, nbytes, PSI_IRQ); 1510 } 1511 1512 static const struct proc_ops psi_irq_proc_ops = { 1513 .proc_open = psi_irq_open, 1514 .proc_read = seq_read, 1515 .proc_lseek = seq_lseek, 1516 .proc_write = psi_irq_write, 1517 .proc_poll = psi_fop_poll, 1518 .proc_release = psi_fop_release, 1519 }; 1520 #endif 1521 1522 static int __init psi_proc_init(void) 1523 { 1524 if (psi_enable) { 1525 proc_mkdir("pressure", NULL); 1526 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops); 1527 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops); 1528 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops); 1529 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 1530 proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops); 1531 #endif 1532 } 1533 return 0; 1534 } 1535 module_init(psi_proc_init); 1536 1537 #endif /* CONFIG_PROC_FS */ 1538