1 /* 2 * Pressure stall information for CPU, memory and IO 3 * 4 * Copyright (c) 2018 Facebook, Inc. 5 * Author: Johannes Weiner <hannes@cmpxchg.org> 6 * 7 * When CPU, memory and IO are contended, tasks experience delays that 8 * reduce throughput and introduce latencies into the workload. Memory 9 * and IO contention, in addition, can cause a full loss of forward 10 * progress in which the CPU goes idle. 11 * 12 * This code aggregates individual task delays into resource pressure 13 * metrics that indicate problems with both workload health and 14 * resource utilization. 15 * 16 * Model 17 * 18 * The time in which a task can execute on a CPU is our baseline for 19 * productivity. Pressure expresses the amount of time in which this 20 * potential cannot be realized due to resource contention. 21 * 22 * This concept of productivity has two components: the workload and 23 * the CPU. To measure the impact of pressure on both, we define two 24 * contention states for a resource: SOME and FULL. 25 * 26 * In the SOME state of a given resource, one or more tasks are 27 * delayed on that resource. This affects the workload's ability to 28 * perform work, but the CPU may still be executing other tasks. 29 * 30 * In the FULL state of a given resource, all non-idle tasks are 31 * delayed on that resource such that nobody is advancing and the CPU 32 * goes idle. This leaves both workload and CPU unproductive. 33 * 34 * (Naturally, the FULL state doesn't exist for the CPU resource.) 35 * 36 * SOME = nr_delayed_tasks != 0 37 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0 38 * 39 * The percentage of wallclock time spent in those compound stall 40 * states gives pressure numbers between 0 and 100 for each resource, 41 * where the SOME percentage indicates workload slowdowns and the FULL 42 * percentage indicates reduced CPU utilization: 43 * 44 * %SOME = time(SOME) / period 45 * %FULL = time(FULL) / period 46 * 47 * Multiple CPUs 48 * 49 * The more tasks and available CPUs there are, the more work can be 50 * performed concurrently. This means that the potential that can go 51 * unrealized due to resource contention *also* scales with non-idle 52 * tasks and CPUs. 53 * 54 * Consider a scenario where 257 number crunching tasks are trying to 55 * run concurrently on 256 CPUs. If we simply aggregated the task 56 * states, we would have to conclude a CPU SOME pressure number of 57 * 100%, since *somebody* is waiting on a runqueue at all 58 * times. However, that is clearly not the amount of contention the 59 * workload is experiencing: only one out of 256 possible exceution 60 * threads will be contended at any given time, or about 0.4%. 61 * 62 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any 63 * given time *one* of the tasks is delayed due to a lack of memory. 64 * Again, looking purely at the task state would yield a memory FULL 65 * pressure number of 0%, since *somebody* is always making forward 66 * progress. But again this wouldn't capture the amount of execution 67 * potential lost, which is 1 out of 4 CPUs, or 25%. 68 * 69 * To calculate wasted potential (pressure) with multiple processors, 70 * we have to base our calculation on the number of non-idle tasks in 71 * conjunction with the number of available CPUs, which is the number 72 * of potential execution threads. SOME becomes then the proportion of 73 * delayed tasks to possibe threads, and FULL is the share of possible 74 * threads that are unproductive due to delays: 75 * 76 * threads = min(nr_nonidle_tasks, nr_cpus) 77 * SOME = min(nr_delayed_tasks / threads, 1) 78 * FULL = (threads - min(nr_running_tasks, threads)) / threads 79 * 80 * For the 257 number crunchers on 256 CPUs, this yields: 81 * 82 * threads = min(257, 256) 83 * SOME = min(1 / 256, 1) = 0.4% 84 * FULL = (256 - min(257, 256)) / 256 = 0% 85 * 86 * For the 1 out of 4 memory-delayed tasks, this yields: 87 * 88 * threads = min(4, 4) 89 * SOME = min(1 / 4, 1) = 25% 90 * FULL = (4 - min(3, 4)) / 4 = 25% 91 * 92 * [ Substitute nr_cpus with 1, and you can see that it's a natural 93 * extension of the single-CPU model. ] 94 * 95 * Implementation 96 * 97 * To assess the precise time spent in each such state, we would have 98 * to freeze the system on task changes and start/stop the state 99 * clocks accordingly. Obviously that doesn't scale in practice. 100 * 101 * Because the scheduler aims to distribute the compute load evenly 102 * among the available CPUs, we can track task state locally to each 103 * CPU and, at much lower frequency, extrapolate the global state for 104 * the cumulative stall times and the running averages. 105 * 106 * For each runqueue, we track: 107 * 108 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) 109 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu]) 110 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) 111 * 112 * and then periodically aggregate: 113 * 114 * tNONIDLE = sum(tNONIDLE[i]) 115 * 116 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE 117 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE 118 * 119 * %SOME = tSOME / period 120 * %FULL = tFULL / period 121 * 122 * This gives us an approximation of pressure that is practical 123 * cost-wise, yet way more sensitive and accurate than periodic 124 * sampling of the aggregate task states would be. 125 */ 126 127 #include <linux/sched/loadavg.h> 128 #include <linux/seq_file.h> 129 #include <linux/proc_fs.h> 130 #include <linux/seqlock.h> 131 #include <linux/cgroup.h> 132 #include <linux/module.h> 133 #include <linux/sched.h> 134 #include <linux/psi.h> 135 #include "sched.h" 136 137 static int psi_bug __read_mostly; 138 139 DEFINE_STATIC_KEY_FALSE(psi_disabled); 140 141 #ifdef CONFIG_PSI_DEFAULT_DISABLED 142 bool psi_enable; 143 #else 144 bool psi_enable = true; 145 #endif 146 static int __init setup_psi(char *str) 147 { 148 return kstrtobool(str, &psi_enable) == 0; 149 } 150 __setup("psi=", setup_psi); 151 152 /* Running averages - we need to be higher-res than loadavg */ 153 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ 154 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ 155 #define EXP_60s 1981 /* 1/exp(2s/60s) */ 156 #define EXP_300s 2034 /* 1/exp(2s/300s) */ 157 158 /* Sampling frequency in nanoseconds */ 159 static u64 psi_period __read_mostly; 160 161 /* System-level pressure and stall tracking */ 162 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); 163 static struct psi_group psi_system = { 164 .pcpu = &system_group_pcpu, 165 }; 166 167 static void psi_update_work(struct work_struct *work); 168 169 static void group_init(struct psi_group *group) 170 { 171 int cpu; 172 173 for_each_possible_cpu(cpu) 174 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); 175 group->next_update = sched_clock() + psi_period; 176 INIT_DELAYED_WORK(&group->clock_work, psi_update_work); 177 mutex_init(&group->stat_lock); 178 } 179 180 void __init psi_init(void) 181 { 182 if (!psi_enable) { 183 static_branch_enable(&psi_disabled); 184 return; 185 } 186 187 psi_period = jiffies_to_nsecs(PSI_FREQ); 188 group_init(&psi_system); 189 } 190 191 static bool test_state(unsigned int *tasks, enum psi_states state) 192 { 193 switch (state) { 194 case PSI_IO_SOME: 195 return tasks[NR_IOWAIT]; 196 case PSI_IO_FULL: 197 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING]; 198 case PSI_MEM_SOME: 199 return tasks[NR_MEMSTALL]; 200 case PSI_MEM_FULL: 201 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]; 202 case PSI_CPU_SOME: 203 return tasks[NR_RUNNING] > 1; 204 case PSI_NONIDLE: 205 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || 206 tasks[NR_RUNNING]; 207 default: 208 return false; 209 } 210 } 211 212 static void get_recent_times(struct psi_group *group, int cpu, u32 *times) 213 { 214 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); 215 unsigned int tasks[NR_PSI_TASK_COUNTS]; 216 u64 now, state_start; 217 unsigned int seq; 218 int s; 219 220 /* Snapshot a coherent view of the CPU state */ 221 do { 222 seq = read_seqcount_begin(&groupc->seq); 223 now = cpu_clock(cpu); 224 memcpy(times, groupc->times, sizeof(groupc->times)); 225 memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); 226 state_start = groupc->state_start; 227 } while (read_seqcount_retry(&groupc->seq, seq)); 228 229 /* Calculate state time deltas against the previous snapshot */ 230 for (s = 0; s < NR_PSI_STATES; s++) { 231 u32 delta; 232 /* 233 * In addition to already concluded states, we also 234 * incorporate currently active states on the CPU, 235 * since states may last for many sampling periods. 236 * 237 * This way we keep our delta sampling buckets small 238 * (u32) and our reported pressure close to what's 239 * actually happening. 240 */ 241 if (test_state(tasks, s)) 242 times[s] += now - state_start; 243 244 delta = times[s] - groupc->times_prev[s]; 245 groupc->times_prev[s] = times[s]; 246 247 times[s] = delta; 248 } 249 } 250 251 static void calc_avgs(unsigned long avg[3], int missed_periods, 252 u64 time, u64 period) 253 { 254 unsigned long pct; 255 256 /* Fill in zeroes for periods of no activity */ 257 if (missed_periods) { 258 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); 259 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); 260 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); 261 } 262 263 /* Sample the most recent active period */ 264 pct = div_u64(time * 100, period); 265 pct *= FIXED_1; 266 avg[0] = calc_load(avg[0], EXP_10s, pct); 267 avg[1] = calc_load(avg[1], EXP_60s, pct); 268 avg[2] = calc_load(avg[2], EXP_300s, pct); 269 } 270 271 static bool update_stats(struct psi_group *group) 272 { 273 u64 deltas[NR_PSI_STATES - 1] = { 0, }; 274 unsigned long missed_periods = 0; 275 unsigned long nonidle_total = 0; 276 u64 now, expires, period; 277 int cpu; 278 int s; 279 280 mutex_lock(&group->stat_lock); 281 282 /* 283 * Collect the per-cpu time buckets and average them into a 284 * single time sample that is normalized to wallclock time. 285 * 286 * For averaging, each CPU is weighted by its non-idle time in 287 * the sampling period. This eliminates artifacts from uneven 288 * loading, or even entirely idle CPUs. 289 */ 290 for_each_possible_cpu(cpu) { 291 u32 times[NR_PSI_STATES]; 292 u32 nonidle; 293 294 get_recent_times(group, cpu, times); 295 296 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); 297 nonidle_total += nonidle; 298 299 for (s = 0; s < PSI_NONIDLE; s++) 300 deltas[s] += (u64)times[s] * nonidle; 301 } 302 303 /* 304 * Integrate the sample into the running statistics that are 305 * reported to userspace: the cumulative stall times and the 306 * decaying averages. 307 * 308 * Pressure percentages are sampled at PSI_FREQ. We might be 309 * called more often when the user polls more frequently than 310 * that; we might be called less often when there is no task 311 * activity, thus no data, and clock ticks are sporadic. The 312 * below handles both. 313 */ 314 315 /* total= */ 316 for (s = 0; s < NR_PSI_STATES - 1; s++) 317 group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL)); 318 319 /* avgX= */ 320 now = sched_clock(); 321 expires = group->next_update; 322 if (now < expires) 323 goto out; 324 if (now - expires > psi_period) 325 missed_periods = div_u64(now - expires, psi_period); 326 327 /* 328 * The periodic clock tick can get delayed for various 329 * reasons, especially on loaded systems. To avoid clock 330 * drift, we schedule the clock in fixed psi_period intervals. 331 * But the deltas we sample out of the per-cpu buckets above 332 * are based on the actual time elapsing between clock ticks. 333 */ 334 group->next_update = expires + ((1 + missed_periods) * psi_period); 335 period = now - (group->last_update + (missed_periods * psi_period)); 336 group->last_update = now; 337 338 for (s = 0; s < NR_PSI_STATES - 1; s++) { 339 u32 sample; 340 341 sample = group->total[s] - group->total_prev[s]; 342 /* 343 * Due to the lockless sampling of the time buckets, 344 * recorded time deltas can slip into the next period, 345 * which under full pressure can result in samples in 346 * excess of the period length. 347 * 348 * We don't want to report non-sensical pressures in 349 * excess of 100%, nor do we want to drop such events 350 * on the floor. Instead we punt any overage into the 351 * future until pressure subsides. By doing this we 352 * don't underreport the occurring pressure curve, we 353 * just report it delayed by one period length. 354 * 355 * The error isn't cumulative. As soon as another 356 * delta slips from a period P to P+1, by definition 357 * it frees up its time T in P. 358 */ 359 if (sample > period) 360 sample = period; 361 group->total_prev[s] += sample; 362 calc_avgs(group->avg[s], missed_periods, sample, period); 363 } 364 out: 365 mutex_unlock(&group->stat_lock); 366 return nonidle_total; 367 } 368 369 static void psi_update_work(struct work_struct *work) 370 { 371 struct delayed_work *dwork; 372 struct psi_group *group; 373 bool nonidle; 374 375 dwork = to_delayed_work(work); 376 group = container_of(dwork, struct psi_group, clock_work); 377 378 /* 379 * If there is task activity, periodically fold the per-cpu 380 * times and feed samples into the running averages. If things 381 * are idle and there is no data to process, stop the clock. 382 * Once restarted, we'll catch up the running averages in one 383 * go - see calc_avgs() and missed_periods. 384 */ 385 386 nonidle = update_stats(group); 387 388 if (nonidle) { 389 unsigned long delay = 0; 390 u64 now; 391 392 now = sched_clock(); 393 if (group->next_update > now) 394 delay = nsecs_to_jiffies(group->next_update - now) + 1; 395 schedule_delayed_work(dwork, delay); 396 } 397 } 398 399 static void record_times(struct psi_group_cpu *groupc, int cpu, 400 bool memstall_tick) 401 { 402 u32 delta; 403 u64 now; 404 405 now = cpu_clock(cpu); 406 delta = now - groupc->state_start; 407 groupc->state_start = now; 408 409 if (test_state(groupc->tasks, PSI_IO_SOME)) { 410 groupc->times[PSI_IO_SOME] += delta; 411 if (test_state(groupc->tasks, PSI_IO_FULL)) 412 groupc->times[PSI_IO_FULL] += delta; 413 } 414 415 if (test_state(groupc->tasks, PSI_MEM_SOME)) { 416 groupc->times[PSI_MEM_SOME] += delta; 417 if (test_state(groupc->tasks, PSI_MEM_FULL)) 418 groupc->times[PSI_MEM_FULL] += delta; 419 else if (memstall_tick) { 420 u32 sample; 421 /* 422 * Since we care about lost potential, a 423 * memstall is FULL when there are no other 424 * working tasks, but also when the CPU is 425 * actively reclaiming and nothing productive 426 * could run even if it were runnable. 427 * 428 * When the timer tick sees a reclaiming CPU, 429 * regardless of runnable tasks, sample a FULL 430 * tick (or less if it hasn't been a full tick 431 * since the last state change). 432 */ 433 sample = min(delta, (u32)jiffies_to_nsecs(1)); 434 groupc->times[PSI_MEM_FULL] += sample; 435 } 436 } 437 438 if (test_state(groupc->tasks, PSI_CPU_SOME)) 439 groupc->times[PSI_CPU_SOME] += delta; 440 441 if (test_state(groupc->tasks, PSI_NONIDLE)) 442 groupc->times[PSI_NONIDLE] += delta; 443 } 444 445 static void psi_group_change(struct psi_group *group, int cpu, 446 unsigned int clear, unsigned int set) 447 { 448 struct psi_group_cpu *groupc; 449 unsigned int t, m; 450 451 groupc = per_cpu_ptr(group->pcpu, cpu); 452 453 /* 454 * First we assess the aggregate resource states this CPU's 455 * tasks have been in since the last change, and account any 456 * SOME and FULL time these may have resulted in. 457 * 458 * Then we update the task counts according to the state 459 * change requested through the @clear and @set bits. 460 */ 461 write_seqcount_begin(&groupc->seq); 462 463 record_times(groupc, cpu, false); 464 465 for (t = 0, m = clear; m; m &= ~(1 << t), t++) { 466 if (!(m & (1 << t))) 467 continue; 468 if (groupc->tasks[t] == 0 && !psi_bug) { 469 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n", 470 cpu, t, groupc->tasks[0], 471 groupc->tasks[1], groupc->tasks[2], 472 clear, set); 473 psi_bug = 1; 474 } 475 groupc->tasks[t]--; 476 } 477 478 for (t = 0; set; set &= ~(1 << t), t++) 479 if (set & (1 << t)) 480 groupc->tasks[t]++; 481 482 write_seqcount_end(&groupc->seq); 483 484 if (!delayed_work_pending(&group->clock_work)) 485 schedule_delayed_work(&group->clock_work, PSI_FREQ); 486 } 487 488 static struct psi_group *iterate_groups(struct task_struct *task, void **iter) 489 { 490 #ifdef CONFIG_CGROUPS 491 struct cgroup *cgroup = NULL; 492 493 if (!*iter) 494 cgroup = task->cgroups->dfl_cgrp; 495 else if (*iter == &psi_system) 496 return NULL; 497 else 498 cgroup = cgroup_parent(*iter); 499 500 if (cgroup && cgroup_parent(cgroup)) { 501 *iter = cgroup; 502 return cgroup_psi(cgroup); 503 } 504 #else 505 if (*iter) 506 return NULL; 507 #endif 508 *iter = &psi_system; 509 return &psi_system; 510 } 511 512 void psi_task_change(struct task_struct *task, int clear, int set) 513 { 514 int cpu = task_cpu(task); 515 struct psi_group *group; 516 void *iter = NULL; 517 518 if (!task->pid) 519 return; 520 521 if (((task->psi_flags & set) || 522 (task->psi_flags & clear) != clear) && 523 !psi_bug) { 524 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", 525 task->pid, task->comm, cpu, 526 task->psi_flags, clear, set); 527 psi_bug = 1; 528 } 529 530 task->psi_flags &= ~clear; 531 task->psi_flags |= set; 532 533 while ((group = iterate_groups(task, &iter))) 534 psi_group_change(group, cpu, clear, set); 535 } 536 537 void psi_memstall_tick(struct task_struct *task, int cpu) 538 { 539 struct psi_group *group; 540 void *iter = NULL; 541 542 while ((group = iterate_groups(task, &iter))) { 543 struct psi_group_cpu *groupc; 544 545 groupc = per_cpu_ptr(group->pcpu, cpu); 546 write_seqcount_begin(&groupc->seq); 547 record_times(groupc, cpu, true); 548 write_seqcount_end(&groupc->seq); 549 } 550 } 551 552 /** 553 * psi_memstall_enter - mark the beginning of a memory stall section 554 * @flags: flags to handle nested sections 555 * 556 * Marks the calling task as being stalled due to a lack of memory, 557 * such as waiting for a refault or performing reclaim. 558 */ 559 void psi_memstall_enter(unsigned long *flags) 560 { 561 struct rq_flags rf; 562 struct rq *rq; 563 564 if (static_branch_likely(&psi_disabled)) 565 return; 566 567 *flags = current->flags & PF_MEMSTALL; 568 if (*flags) 569 return; 570 /* 571 * PF_MEMSTALL setting & accounting needs to be atomic wrt 572 * changes to the task's scheduling state, otherwise we can 573 * race with CPU migration. 574 */ 575 rq = this_rq_lock_irq(&rf); 576 577 current->flags |= PF_MEMSTALL; 578 psi_task_change(current, 0, TSK_MEMSTALL); 579 580 rq_unlock_irq(rq, &rf); 581 } 582 583 /** 584 * psi_memstall_leave - mark the end of an memory stall section 585 * @flags: flags to handle nested memdelay sections 586 * 587 * Marks the calling task as no longer stalled due to lack of memory. 588 */ 589 void psi_memstall_leave(unsigned long *flags) 590 { 591 struct rq_flags rf; 592 struct rq *rq; 593 594 if (static_branch_likely(&psi_disabled)) 595 return; 596 597 if (*flags) 598 return; 599 /* 600 * PF_MEMSTALL clearing & accounting needs to be atomic wrt 601 * changes to the task's scheduling state, otherwise we could 602 * race with CPU migration. 603 */ 604 rq = this_rq_lock_irq(&rf); 605 606 current->flags &= ~PF_MEMSTALL; 607 psi_task_change(current, TSK_MEMSTALL, 0); 608 609 rq_unlock_irq(rq, &rf); 610 } 611 612 #ifdef CONFIG_CGROUPS 613 int psi_cgroup_alloc(struct cgroup *cgroup) 614 { 615 if (static_branch_likely(&psi_disabled)) 616 return 0; 617 618 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); 619 if (!cgroup->psi.pcpu) 620 return -ENOMEM; 621 group_init(&cgroup->psi); 622 return 0; 623 } 624 625 void psi_cgroup_free(struct cgroup *cgroup) 626 { 627 if (static_branch_likely(&psi_disabled)) 628 return; 629 630 cancel_delayed_work_sync(&cgroup->psi.clock_work); 631 free_percpu(cgroup->psi.pcpu); 632 } 633 634 /** 635 * cgroup_move_task - move task to a different cgroup 636 * @task: the task 637 * @to: the target css_set 638 * 639 * Move task to a new cgroup and safely migrate its associated stall 640 * state between the different groups. 641 * 642 * This function acquires the task's rq lock to lock out concurrent 643 * changes to the task's scheduling state and - in case the task is 644 * running - concurrent changes to its stall state. 645 */ 646 void cgroup_move_task(struct task_struct *task, struct css_set *to) 647 { 648 unsigned int task_flags = 0; 649 struct rq_flags rf; 650 struct rq *rq; 651 652 if (static_branch_likely(&psi_disabled)) { 653 /* 654 * Lame to do this here, but the scheduler cannot be locked 655 * from the outside, so we move cgroups from inside sched/. 656 */ 657 rcu_assign_pointer(task->cgroups, to); 658 return; 659 } 660 661 rq = task_rq_lock(task, &rf); 662 663 if (task_on_rq_queued(task)) 664 task_flags = TSK_RUNNING; 665 else if (task->in_iowait) 666 task_flags = TSK_IOWAIT; 667 668 if (task->flags & PF_MEMSTALL) 669 task_flags |= TSK_MEMSTALL; 670 671 if (task_flags) 672 psi_task_change(task, task_flags, 0); 673 674 /* See comment above */ 675 rcu_assign_pointer(task->cgroups, to); 676 677 if (task_flags) 678 psi_task_change(task, 0, task_flags); 679 680 task_rq_unlock(rq, task, &rf); 681 } 682 #endif /* CONFIG_CGROUPS */ 683 684 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) 685 { 686 int full; 687 688 if (static_branch_likely(&psi_disabled)) 689 return -EOPNOTSUPP; 690 691 update_stats(group); 692 693 for (full = 0; full < 2 - (res == PSI_CPU); full++) { 694 unsigned long avg[3]; 695 u64 total; 696 int w; 697 698 for (w = 0; w < 3; w++) 699 avg[w] = group->avg[res * 2 + full][w]; 700 total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC); 701 702 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", 703 full ? "full" : "some", 704 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), 705 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), 706 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), 707 total); 708 } 709 710 return 0; 711 } 712 713 static int psi_io_show(struct seq_file *m, void *v) 714 { 715 return psi_show(m, &psi_system, PSI_IO); 716 } 717 718 static int psi_memory_show(struct seq_file *m, void *v) 719 { 720 return psi_show(m, &psi_system, PSI_MEM); 721 } 722 723 static int psi_cpu_show(struct seq_file *m, void *v) 724 { 725 return psi_show(m, &psi_system, PSI_CPU); 726 } 727 728 static int psi_io_open(struct inode *inode, struct file *file) 729 { 730 return single_open(file, psi_io_show, NULL); 731 } 732 733 static int psi_memory_open(struct inode *inode, struct file *file) 734 { 735 return single_open(file, psi_memory_show, NULL); 736 } 737 738 static int psi_cpu_open(struct inode *inode, struct file *file) 739 { 740 return single_open(file, psi_cpu_show, NULL); 741 } 742 743 static const struct file_operations psi_io_fops = { 744 .open = psi_io_open, 745 .read = seq_read, 746 .llseek = seq_lseek, 747 .release = single_release, 748 }; 749 750 static const struct file_operations psi_memory_fops = { 751 .open = psi_memory_open, 752 .read = seq_read, 753 .llseek = seq_lseek, 754 .release = single_release, 755 }; 756 757 static const struct file_operations psi_cpu_fops = { 758 .open = psi_cpu_open, 759 .read = seq_read, 760 .llseek = seq_lseek, 761 .release = single_release, 762 }; 763 764 static int __init psi_proc_init(void) 765 { 766 proc_mkdir("pressure", NULL); 767 proc_create("pressure/io", 0, NULL, &psi_io_fops); 768 proc_create("pressure/memory", 0, NULL, &psi_memory_fops); 769 proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops); 770 return 0; 771 } 772 module_init(psi_proc_init); 773