1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Pressure stall information for CPU, memory and IO 4 * 5 * Copyright (c) 2018 Facebook, Inc. 6 * Author: Johannes Weiner <hannes@cmpxchg.org> 7 * 8 * Polling support by Suren Baghdasaryan <surenb@google.com> 9 * Copyright (c) 2018 Google, Inc. 10 * 11 * When CPU, memory and IO are contended, tasks experience delays that 12 * reduce throughput and introduce latencies into the workload. Memory 13 * and IO contention, in addition, can cause a full loss of forward 14 * progress in which the CPU goes idle. 15 * 16 * This code aggregates individual task delays into resource pressure 17 * metrics that indicate problems with both workload health and 18 * resource utilization. 19 * 20 * Model 21 * 22 * The time in which a task can execute on a CPU is our baseline for 23 * productivity. Pressure expresses the amount of time in which this 24 * potential cannot be realized due to resource contention. 25 * 26 * This concept of productivity has two components: the workload and 27 * the CPU. To measure the impact of pressure on both, we define two 28 * contention states for a resource: SOME and FULL. 29 * 30 * In the SOME state of a given resource, one or more tasks are 31 * delayed on that resource. This affects the workload's ability to 32 * perform work, but the CPU may still be executing other tasks. 33 * 34 * In the FULL state of a given resource, all non-idle tasks are 35 * delayed on that resource such that nobody is advancing and the CPU 36 * goes idle. This leaves both workload and CPU unproductive. 37 * 38 * SOME = nr_delayed_tasks != 0 39 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0 40 * 41 * What it means for a task to be productive is defined differently 42 * for each resource. For IO, productive means a running task. For 43 * memory, productive means a running task that isn't a reclaimer. For 44 * CPU, productive means an oncpu task. 45 * 46 * Naturally, the FULL state doesn't exist for the CPU resource at the 47 * system level, but exist at the cgroup level. At the cgroup level, 48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU 49 * resource which is being used by others outside of the cgroup or 50 * throttled by the cgroup cpu.max configuration. 51 * 52 * The percentage of wallclock time spent in those compound stall 53 * states gives pressure numbers between 0 and 100 for each resource, 54 * where the SOME percentage indicates workload slowdowns and the FULL 55 * percentage indicates reduced CPU utilization: 56 * 57 * %SOME = time(SOME) / period 58 * %FULL = time(FULL) / period 59 * 60 * Multiple CPUs 61 * 62 * The more tasks and available CPUs there are, the more work can be 63 * performed concurrently. This means that the potential that can go 64 * unrealized due to resource contention *also* scales with non-idle 65 * tasks and CPUs. 66 * 67 * Consider a scenario where 257 number crunching tasks are trying to 68 * run concurrently on 256 CPUs. If we simply aggregated the task 69 * states, we would have to conclude a CPU SOME pressure number of 70 * 100%, since *somebody* is waiting on a runqueue at all 71 * times. However, that is clearly not the amount of contention the 72 * workload is experiencing: only one out of 256 possible execution 73 * threads will be contended at any given time, or about 0.4%. 74 * 75 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any 76 * given time *one* of the tasks is delayed due to a lack of memory. 77 * Again, looking purely at the task state would yield a memory FULL 78 * pressure number of 0%, since *somebody* is always making forward 79 * progress. But again this wouldn't capture the amount of execution 80 * potential lost, which is 1 out of 4 CPUs, or 25%. 81 * 82 * To calculate wasted potential (pressure) with multiple processors, 83 * we have to base our calculation on the number of non-idle tasks in 84 * conjunction with the number of available CPUs, which is the number 85 * of potential execution threads. SOME becomes then the proportion of 86 * delayed tasks to possible threads, and FULL is the share of possible 87 * threads that are unproductive due to delays: 88 * 89 * threads = min(nr_nonidle_tasks, nr_cpus) 90 * SOME = min(nr_delayed_tasks / threads, 1) 91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads 92 * 93 * For the 257 number crunchers on 256 CPUs, this yields: 94 * 95 * threads = min(257, 256) 96 * SOME = min(1 / 256, 1) = 0.4% 97 * FULL = (256 - min(256, 256)) / 256 = 0% 98 * 99 * For the 1 out of 4 memory-delayed tasks, this yields: 100 * 101 * threads = min(4, 4) 102 * SOME = min(1 / 4, 1) = 25% 103 * FULL = (4 - min(3, 4)) / 4 = 25% 104 * 105 * [ Substitute nr_cpus with 1, and you can see that it's a natural 106 * extension of the single-CPU model. ] 107 * 108 * Implementation 109 * 110 * To assess the precise time spent in each such state, we would have 111 * to freeze the system on task changes and start/stop the state 112 * clocks accordingly. Obviously that doesn't scale in practice. 113 * 114 * Because the scheduler aims to distribute the compute load evenly 115 * among the available CPUs, we can track task state locally to each 116 * CPU and, at much lower frequency, extrapolate the global state for 117 * the cumulative stall times and the running averages. 118 * 119 * For each runqueue, we track: 120 * 121 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0) 122 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu]) 123 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0) 124 * 125 * and then periodically aggregate: 126 * 127 * tNONIDLE = sum(tNONIDLE[i]) 128 * 129 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE 130 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE 131 * 132 * %SOME = tSOME / period 133 * %FULL = tFULL / period 134 * 135 * This gives us an approximation of pressure that is practical 136 * cost-wise, yet way more sensitive and accurate than periodic 137 * sampling of the aggregate task states would be. 138 */ 139 140 #include "../workqueue_internal.h" 141 #include <linux/sched/loadavg.h> 142 #include <linux/seq_file.h> 143 #include <linux/proc_fs.h> 144 #include <linux/seqlock.h> 145 #include <linux/uaccess.h> 146 #include <linux/cgroup.h> 147 #include <linux/module.h> 148 #include <linux/sched.h> 149 #include <linux/ctype.h> 150 #include <linux/file.h> 151 #include <linux/poll.h> 152 #include <linux/psi.h> 153 #include "sched.h" 154 155 static int psi_bug __read_mostly; 156 157 DEFINE_STATIC_KEY_FALSE(psi_disabled); 158 DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled); 159 160 #ifdef CONFIG_PSI_DEFAULT_DISABLED 161 static bool psi_enable; 162 #else 163 static bool psi_enable = true; 164 #endif 165 static int __init setup_psi(char *str) 166 { 167 return kstrtobool(str, &psi_enable) == 0; 168 } 169 __setup("psi=", setup_psi); 170 171 /* Running averages - we need to be higher-res than loadavg */ 172 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */ 173 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */ 174 #define EXP_60s 1981 /* 1/exp(2s/60s) */ 175 #define EXP_300s 2034 /* 1/exp(2s/300s) */ 176 177 /* PSI trigger definitions */ 178 #define WINDOW_MIN_US 500000 /* Min window size is 500ms */ 179 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */ 180 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */ 181 182 /* Sampling frequency in nanoseconds */ 183 static u64 psi_period __read_mostly; 184 185 /* System-level pressure and stall tracking */ 186 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu); 187 struct psi_group psi_system = { 188 .pcpu = &system_group_pcpu, 189 }; 190 191 static void psi_avgs_work(struct work_struct *work); 192 193 static void poll_timer_fn(struct timer_list *t); 194 195 static void group_init(struct psi_group *group) 196 { 197 int cpu; 198 199 for_each_possible_cpu(cpu) 200 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); 201 group->avg_last_update = sched_clock(); 202 group->avg_next_update = group->avg_last_update + psi_period; 203 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); 204 mutex_init(&group->avgs_lock); 205 /* Init trigger-related members */ 206 mutex_init(&group->trigger_lock); 207 INIT_LIST_HEAD(&group->triggers); 208 memset(group->nr_triggers, 0, sizeof(group->nr_triggers)); 209 group->poll_states = 0; 210 group->poll_min_period = U32_MAX; 211 memset(group->polling_total, 0, sizeof(group->polling_total)); 212 group->polling_next_update = ULLONG_MAX; 213 group->polling_until = 0; 214 init_waitqueue_head(&group->poll_wait); 215 timer_setup(&group->poll_timer, poll_timer_fn, 0); 216 rcu_assign_pointer(group->poll_task, NULL); 217 } 218 219 void __init psi_init(void) 220 { 221 if (!psi_enable) { 222 static_branch_enable(&psi_disabled); 223 return; 224 } 225 226 if (!cgroup_psi_enabled()) 227 static_branch_disable(&psi_cgroups_enabled); 228 229 psi_period = jiffies_to_nsecs(PSI_FREQ); 230 group_init(&psi_system); 231 } 232 233 static bool test_state(unsigned int *tasks, enum psi_states state) 234 { 235 switch (state) { 236 case PSI_IO_SOME: 237 return unlikely(tasks[NR_IOWAIT]); 238 case PSI_IO_FULL: 239 return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]); 240 case PSI_MEM_SOME: 241 return unlikely(tasks[NR_MEMSTALL]); 242 case PSI_MEM_FULL: 243 return unlikely(tasks[NR_MEMSTALL] && 244 tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]); 245 case PSI_CPU_SOME: 246 return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]); 247 case PSI_CPU_FULL: 248 return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]); 249 case PSI_NONIDLE: 250 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || 251 tasks[NR_RUNNING]; 252 default: 253 return false; 254 } 255 } 256 257 static void get_recent_times(struct psi_group *group, int cpu, 258 enum psi_aggregators aggregator, u32 *times, 259 u32 *pchanged_states) 260 { 261 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); 262 u64 now, state_start; 263 enum psi_states s; 264 unsigned int seq; 265 u32 state_mask; 266 267 *pchanged_states = 0; 268 269 /* Snapshot a coherent view of the CPU state */ 270 do { 271 seq = read_seqcount_begin(&groupc->seq); 272 now = cpu_clock(cpu); 273 memcpy(times, groupc->times, sizeof(groupc->times)); 274 state_mask = groupc->state_mask; 275 state_start = groupc->state_start; 276 } while (read_seqcount_retry(&groupc->seq, seq)); 277 278 /* Calculate state time deltas against the previous snapshot */ 279 for (s = 0; s < NR_PSI_STATES; s++) { 280 u32 delta; 281 /* 282 * In addition to already concluded states, we also 283 * incorporate currently active states on the CPU, 284 * since states may last for many sampling periods. 285 * 286 * This way we keep our delta sampling buckets small 287 * (u32) and our reported pressure close to what's 288 * actually happening. 289 */ 290 if (state_mask & (1 << s)) 291 times[s] += now - state_start; 292 293 delta = times[s] - groupc->times_prev[aggregator][s]; 294 groupc->times_prev[aggregator][s] = times[s]; 295 296 times[s] = delta; 297 if (delta) 298 *pchanged_states |= (1 << s); 299 } 300 } 301 302 static void calc_avgs(unsigned long avg[3], int missed_periods, 303 u64 time, u64 period) 304 { 305 unsigned long pct; 306 307 /* Fill in zeroes for periods of no activity */ 308 if (missed_periods) { 309 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); 310 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); 311 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); 312 } 313 314 /* Sample the most recent active period */ 315 pct = div_u64(time * 100, period); 316 pct *= FIXED_1; 317 avg[0] = calc_load(avg[0], EXP_10s, pct); 318 avg[1] = calc_load(avg[1], EXP_60s, pct); 319 avg[2] = calc_load(avg[2], EXP_300s, pct); 320 } 321 322 static void collect_percpu_times(struct psi_group *group, 323 enum psi_aggregators aggregator, 324 u32 *pchanged_states) 325 { 326 u64 deltas[NR_PSI_STATES - 1] = { 0, }; 327 unsigned long nonidle_total = 0; 328 u32 changed_states = 0; 329 int cpu; 330 int s; 331 332 /* 333 * Collect the per-cpu time buckets and average them into a 334 * single time sample that is normalized to wallclock time. 335 * 336 * For averaging, each CPU is weighted by its non-idle time in 337 * the sampling period. This eliminates artifacts from uneven 338 * loading, or even entirely idle CPUs. 339 */ 340 for_each_possible_cpu(cpu) { 341 u32 times[NR_PSI_STATES]; 342 u32 nonidle; 343 u32 cpu_changed_states; 344 345 get_recent_times(group, cpu, aggregator, times, 346 &cpu_changed_states); 347 changed_states |= cpu_changed_states; 348 349 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]); 350 nonidle_total += nonidle; 351 352 for (s = 0; s < PSI_NONIDLE; s++) 353 deltas[s] += (u64)times[s] * nonidle; 354 } 355 356 /* 357 * Integrate the sample into the running statistics that are 358 * reported to userspace: the cumulative stall times and the 359 * decaying averages. 360 * 361 * Pressure percentages are sampled at PSI_FREQ. We might be 362 * called more often when the user polls more frequently than 363 * that; we might be called less often when there is no task 364 * activity, thus no data, and clock ticks are sporadic. The 365 * below handles both. 366 */ 367 368 /* total= */ 369 for (s = 0; s < NR_PSI_STATES - 1; s++) 370 group->total[aggregator][s] += 371 div_u64(deltas[s], max(nonidle_total, 1UL)); 372 373 if (pchanged_states) 374 *pchanged_states = changed_states; 375 } 376 377 static u64 update_averages(struct psi_group *group, u64 now) 378 { 379 unsigned long missed_periods = 0; 380 u64 expires, period; 381 u64 avg_next_update; 382 int s; 383 384 /* avgX= */ 385 expires = group->avg_next_update; 386 if (now - expires >= psi_period) 387 missed_periods = div_u64(now - expires, psi_period); 388 389 /* 390 * The periodic clock tick can get delayed for various 391 * reasons, especially on loaded systems. To avoid clock 392 * drift, we schedule the clock in fixed psi_period intervals. 393 * But the deltas we sample out of the per-cpu buckets above 394 * are based on the actual time elapsing between clock ticks. 395 */ 396 avg_next_update = expires + ((1 + missed_periods) * psi_period); 397 period = now - (group->avg_last_update + (missed_periods * psi_period)); 398 group->avg_last_update = now; 399 400 for (s = 0; s < NR_PSI_STATES - 1; s++) { 401 u32 sample; 402 403 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; 404 /* 405 * Due to the lockless sampling of the time buckets, 406 * recorded time deltas can slip into the next period, 407 * which under full pressure can result in samples in 408 * excess of the period length. 409 * 410 * We don't want to report non-sensical pressures in 411 * excess of 100%, nor do we want to drop such events 412 * on the floor. Instead we punt any overage into the 413 * future until pressure subsides. By doing this we 414 * don't underreport the occurring pressure curve, we 415 * just report it delayed by one period length. 416 * 417 * The error isn't cumulative. As soon as another 418 * delta slips from a period P to P+1, by definition 419 * it frees up its time T in P. 420 */ 421 if (sample > period) 422 sample = period; 423 group->avg_total[s] += sample; 424 calc_avgs(group->avg[s], missed_periods, sample, period); 425 } 426 427 return avg_next_update; 428 } 429 430 static void psi_avgs_work(struct work_struct *work) 431 { 432 struct delayed_work *dwork; 433 struct psi_group *group; 434 u32 changed_states; 435 bool nonidle; 436 u64 now; 437 438 dwork = to_delayed_work(work); 439 group = container_of(dwork, struct psi_group, avgs_work); 440 441 mutex_lock(&group->avgs_lock); 442 443 now = sched_clock(); 444 445 collect_percpu_times(group, PSI_AVGS, &changed_states); 446 nonidle = changed_states & (1 << PSI_NONIDLE); 447 /* 448 * If there is task activity, periodically fold the per-cpu 449 * times and feed samples into the running averages. If things 450 * are idle and there is no data to process, stop the clock. 451 * Once restarted, we'll catch up the running averages in one 452 * go - see calc_avgs() and missed_periods. 453 */ 454 if (now >= group->avg_next_update) 455 group->avg_next_update = update_averages(group, now); 456 457 if (nonidle) { 458 schedule_delayed_work(dwork, nsecs_to_jiffies( 459 group->avg_next_update - now) + 1); 460 } 461 462 mutex_unlock(&group->avgs_lock); 463 } 464 465 /* Trigger tracking window manipulations */ 466 static void window_reset(struct psi_window *win, u64 now, u64 value, 467 u64 prev_growth) 468 { 469 win->start_time = now; 470 win->start_value = value; 471 win->prev_growth = prev_growth; 472 } 473 474 /* 475 * PSI growth tracking window update and growth calculation routine. 476 * 477 * This approximates a sliding tracking window by interpolating 478 * partially elapsed windows using historical growth data from the 479 * previous intervals. This minimizes memory requirements (by not storing 480 * all the intermediate values in the previous window) and simplifies 481 * the calculations. It works well because PSI signal changes only in 482 * positive direction and over relatively small window sizes the growth 483 * is close to linear. 484 */ 485 static u64 window_update(struct psi_window *win, u64 now, u64 value) 486 { 487 u64 elapsed; 488 u64 growth; 489 490 elapsed = now - win->start_time; 491 growth = value - win->start_value; 492 /* 493 * After each tracking window passes win->start_value and 494 * win->start_time get reset and win->prev_growth stores 495 * the average per-window growth of the previous window. 496 * win->prev_growth is then used to interpolate additional 497 * growth from the previous window assuming it was linear. 498 */ 499 if (elapsed > win->size) 500 window_reset(win, now, value, growth); 501 else { 502 u32 remaining; 503 504 remaining = win->size - elapsed; 505 growth += div64_u64(win->prev_growth * remaining, win->size); 506 } 507 508 return growth; 509 } 510 511 static void init_triggers(struct psi_group *group, u64 now) 512 { 513 struct psi_trigger *t; 514 515 list_for_each_entry(t, &group->triggers, node) 516 window_reset(&t->win, now, 517 group->total[PSI_POLL][t->state], 0); 518 memcpy(group->polling_total, group->total[PSI_POLL], 519 sizeof(group->polling_total)); 520 group->polling_next_update = now + group->poll_min_period; 521 } 522 523 static u64 update_triggers(struct psi_group *group, u64 now) 524 { 525 struct psi_trigger *t; 526 bool new_stall = false; 527 u64 *total = group->total[PSI_POLL]; 528 529 /* 530 * On subsequent updates, calculate growth deltas and let 531 * watchers know when their specified thresholds are exceeded. 532 */ 533 list_for_each_entry(t, &group->triggers, node) { 534 u64 growth; 535 536 /* Check for stall activity */ 537 if (group->polling_total[t->state] == total[t->state]) 538 continue; 539 540 /* 541 * Multiple triggers might be looking at the same state, 542 * remember to update group->polling_total[] once we've 543 * been through all of them. Also remember to extend the 544 * polling time if we see new stall activity. 545 */ 546 new_stall = true; 547 548 /* Calculate growth since last update */ 549 growth = window_update(&t->win, now, total[t->state]); 550 if (growth < t->threshold) 551 continue; 552 553 /* Limit event signaling to once per window */ 554 if (now < t->last_event_time + t->win.size) 555 continue; 556 557 /* Generate an event */ 558 if (cmpxchg(&t->event, 0, 1) == 0) 559 wake_up_interruptible(&t->event_wait); 560 t->last_event_time = now; 561 } 562 563 if (new_stall) 564 memcpy(group->polling_total, total, 565 sizeof(group->polling_total)); 566 567 return now + group->poll_min_period; 568 } 569 570 /* Schedule polling if it's not already scheduled. */ 571 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay) 572 { 573 struct task_struct *task; 574 575 /* 576 * Do not reschedule if already scheduled. 577 * Possible race with a timer scheduled after this check but before 578 * mod_timer below can be tolerated because group->polling_next_update 579 * will keep updates on schedule. 580 */ 581 if (timer_pending(&group->poll_timer)) 582 return; 583 584 rcu_read_lock(); 585 586 task = rcu_dereference(group->poll_task); 587 /* 588 * kworker might be NULL in case psi_trigger_destroy races with 589 * psi_task_change (hotpath) which can't use locks 590 */ 591 if (likely(task)) 592 mod_timer(&group->poll_timer, jiffies + delay); 593 594 rcu_read_unlock(); 595 } 596 597 static void psi_poll_work(struct psi_group *group) 598 { 599 u32 changed_states; 600 u64 now; 601 602 mutex_lock(&group->trigger_lock); 603 604 now = sched_clock(); 605 606 collect_percpu_times(group, PSI_POLL, &changed_states); 607 608 if (changed_states & group->poll_states) { 609 /* Initialize trigger windows when entering polling mode */ 610 if (now > group->polling_until) 611 init_triggers(group, now); 612 613 /* 614 * Keep the monitor active for at least the duration of the 615 * minimum tracking window as long as monitor states are 616 * changing. 617 */ 618 group->polling_until = now + 619 group->poll_min_period * UPDATES_PER_WINDOW; 620 } 621 622 if (now > group->polling_until) { 623 group->polling_next_update = ULLONG_MAX; 624 goto out; 625 } 626 627 if (now >= group->polling_next_update) 628 group->polling_next_update = update_triggers(group, now); 629 630 psi_schedule_poll_work(group, 631 nsecs_to_jiffies(group->polling_next_update - now) + 1); 632 633 out: 634 mutex_unlock(&group->trigger_lock); 635 } 636 637 static int psi_poll_worker(void *data) 638 { 639 struct psi_group *group = (struct psi_group *)data; 640 641 sched_set_fifo_low(current); 642 643 while (true) { 644 wait_event_interruptible(group->poll_wait, 645 atomic_cmpxchg(&group->poll_wakeup, 1, 0) || 646 kthread_should_stop()); 647 if (kthread_should_stop()) 648 break; 649 650 psi_poll_work(group); 651 } 652 return 0; 653 } 654 655 static void poll_timer_fn(struct timer_list *t) 656 { 657 struct psi_group *group = from_timer(group, t, poll_timer); 658 659 atomic_set(&group->poll_wakeup, 1); 660 wake_up_interruptible(&group->poll_wait); 661 } 662 663 static void record_times(struct psi_group_cpu *groupc, u64 now) 664 { 665 u32 delta; 666 667 delta = now - groupc->state_start; 668 groupc->state_start = now; 669 670 if (groupc->state_mask & (1 << PSI_IO_SOME)) { 671 groupc->times[PSI_IO_SOME] += delta; 672 if (groupc->state_mask & (1 << PSI_IO_FULL)) 673 groupc->times[PSI_IO_FULL] += delta; 674 } 675 676 if (groupc->state_mask & (1 << PSI_MEM_SOME)) { 677 groupc->times[PSI_MEM_SOME] += delta; 678 if (groupc->state_mask & (1 << PSI_MEM_FULL)) 679 groupc->times[PSI_MEM_FULL] += delta; 680 } 681 682 if (groupc->state_mask & (1 << PSI_CPU_SOME)) { 683 groupc->times[PSI_CPU_SOME] += delta; 684 if (groupc->state_mask & (1 << PSI_CPU_FULL)) 685 groupc->times[PSI_CPU_FULL] += delta; 686 } 687 688 if (groupc->state_mask & (1 << PSI_NONIDLE)) 689 groupc->times[PSI_NONIDLE] += delta; 690 } 691 692 static void psi_group_change(struct psi_group *group, int cpu, 693 unsigned int clear, unsigned int set, u64 now, 694 bool wake_clock) 695 { 696 struct psi_group_cpu *groupc; 697 u32 state_mask = 0; 698 unsigned int t, m; 699 enum psi_states s; 700 701 groupc = per_cpu_ptr(group->pcpu, cpu); 702 703 /* 704 * First we assess the aggregate resource states this CPU's 705 * tasks have been in since the last change, and account any 706 * SOME and FULL time these may have resulted in. 707 * 708 * Then we update the task counts according to the state 709 * change requested through the @clear and @set bits. 710 */ 711 write_seqcount_begin(&groupc->seq); 712 713 record_times(groupc, now); 714 715 for (t = 0, m = clear; m; m &= ~(1 << t), t++) { 716 if (!(m & (1 << t))) 717 continue; 718 if (groupc->tasks[t]) { 719 groupc->tasks[t]--; 720 } else if (!psi_bug) { 721 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u %u] clear=%x set=%x\n", 722 cpu, t, groupc->tasks[0], 723 groupc->tasks[1], groupc->tasks[2], 724 groupc->tasks[3], groupc->tasks[4], 725 clear, set); 726 psi_bug = 1; 727 } 728 } 729 730 for (t = 0; set; set &= ~(1 << t), t++) 731 if (set & (1 << t)) 732 groupc->tasks[t]++; 733 734 /* Calculate state mask representing active states */ 735 for (s = 0; s < NR_PSI_STATES; s++) { 736 if (test_state(groupc->tasks, s)) 737 state_mask |= (1 << s); 738 } 739 740 /* 741 * Since we care about lost potential, a memstall is FULL 742 * when there are no other working tasks, but also when 743 * the CPU is actively reclaiming and nothing productive 744 * could run even if it were runnable. So when the current 745 * task in a cgroup is in_memstall, the corresponding groupc 746 * on that cpu is in PSI_MEM_FULL state. 747 */ 748 if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall)) 749 state_mask |= (1 << PSI_MEM_FULL); 750 751 groupc->state_mask = state_mask; 752 753 write_seqcount_end(&groupc->seq); 754 755 if (state_mask & group->poll_states) 756 psi_schedule_poll_work(group, 1); 757 758 if (wake_clock && !delayed_work_pending(&group->avgs_work)) 759 schedule_delayed_work(&group->avgs_work, PSI_FREQ); 760 } 761 762 static struct psi_group *iterate_groups(struct task_struct *task, void **iter) 763 { 764 if (*iter == &psi_system) 765 return NULL; 766 767 #ifdef CONFIG_CGROUPS 768 if (static_branch_likely(&psi_cgroups_enabled)) { 769 struct cgroup *cgroup = NULL; 770 771 if (!*iter) 772 cgroup = task->cgroups->dfl_cgrp; 773 else 774 cgroup = cgroup_parent(*iter); 775 776 if (cgroup && cgroup_parent(cgroup)) { 777 *iter = cgroup; 778 return cgroup_psi(cgroup); 779 } 780 } 781 #endif 782 *iter = &psi_system; 783 return &psi_system; 784 } 785 786 static void psi_flags_change(struct task_struct *task, int clear, int set) 787 { 788 if (((task->psi_flags & set) || 789 (task->psi_flags & clear) != clear) && 790 !psi_bug) { 791 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n", 792 task->pid, task->comm, task_cpu(task), 793 task->psi_flags, clear, set); 794 psi_bug = 1; 795 } 796 797 task->psi_flags &= ~clear; 798 task->psi_flags |= set; 799 } 800 801 void psi_task_change(struct task_struct *task, int clear, int set) 802 { 803 int cpu = task_cpu(task); 804 struct psi_group *group; 805 bool wake_clock = true; 806 void *iter = NULL; 807 u64 now; 808 809 if (!task->pid) 810 return; 811 812 psi_flags_change(task, clear, set); 813 814 now = cpu_clock(cpu); 815 /* 816 * Periodic aggregation shuts off if there is a period of no 817 * task changes, so we wake it back up if necessary. However, 818 * don't do this if the task change is the aggregation worker 819 * itself going to sleep, or we'll ping-pong forever. 820 */ 821 if (unlikely((clear & TSK_RUNNING) && 822 (task->flags & PF_WQ_WORKER) && 823 wq_worker_last_func(task) == psi_avgs_work)) 824 wake_clock = false; 825 826 while ((group = iterate_groups(task, &iter))) 827 psi_group_change(group, cpu, clear, set, now, wake_clock); 828 } 829 830 void psi_task_switch(struct task_struct *prev, struct task_struct *next, 831 bool sleep) 832 { 833 struct psi_group *group, *common = NULL; 834 int cpu = task_cpu(prev); 835 void *iter; 836 u64 now = cpu_clock(cpu); 837 838 if (next->pid) { 839 bool identical_state; 840 841 psi_flags_change(next, 0, TSK_ONCPU); 842 /* 843 * When switching between tasks that have an identical 844 * runtime state, the cgroup that contains both tasks 845 * we reach the first common ancestor. Iterate @next's 846 * ancestors only until we encounter @prev's ONCPU. 847 */ 848 identical_state = prev->psi_flags == next->psi_flags; 849 iter = NULL; 850 while ((group = iterate_groups(next, &iter))) { 851 if (identical_state && 852 per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) { 853 common = group; 854 break; 855 } 856 857 psi_group_change(group, cpu, 0, TSK_ONCPU, now, true); 858 } 859 } 860 861 if (prev->pid) { 862 int clear = TSK_ONCPU, set = 0; 863 864 /* 865 * When we're going to sleep, psi_dequeue() lets us 866 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and 867 * TSK_IOWAIT here, where we can combine it with 868 * TSK_ONCPU and save walking common ancestors twice. 869 */ 870 if (sleep) { 871 clear |= TSK_RUNNING; 872 if (prev->in_memstall) 873 clear |= TSK_MEMSTALL_RUNNING; 874 if (prev->in_iowait) 875 set |= TSK_IOWAIT; 876 } 877 878 psi_flags_change(prev, clear, set); 879 880 iter = NULL; 881 while ((group = iterate_groups(prev, &iter)) && group != common) 882 psi_group_change(group, cpu, clear, set, now, true); 883 884 /* 885 * TSK_ONCPU is handled up to the common ancestor. If we're tasked 886 * with dequeuing too, finish that for the rest of the hierarchy. 887 */ 888 if (sleep) { 889 clear &= ~TSK_ONCPU; 890 for (; group; group = iterate_groups(prev, &iter)) 891 psi_group_change(group, cpu, clear, set, now, true); 892 } 893 } 894 } 895 896 /** 897 * psi_memstall_enter - mark the beginning of a memory stall section 898 * @flags: flags to handle nested sections 899 * 900 * Marks the calling task as being stalled due to a lack of memory, 901 * such as waiting for a refault or performing reclaim. 902 */ 903 void psi_memstall_enter(unsigned long *flags) 904 { 905 struct rq_flags rf; 906 struct rq *rq; 907 908 if (static_branch_likely(&psi_disabled)) 909 return; 910 911 *flags = current->in_memstall; 912 if (*flags) 913 return; 914 /* 915 * in_memstall setting & accounting needs to be atomic wrt 916 * changes to the task's scheduling state, otherwise we can 917 * race with CPU migration. 918 */ 919 rq = this_rq_lock_irq(&rf); 920 921 current->in_memstall = 1; 922 psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); 923 924 rq_unlock_irq(rq, &rf); 925 } 926 927 /** 928 * psi_memstall_leave - mark the end of an memory stall section 929 * @flags: flags to handle nested memdelay sections 930 * 931 * Marks the calling task as no longer stalled due to lack of memory. 932 */ 933 void psi_memstall_leave(unsigned long *flags) 934 { 935 struct rq_flags rf; 936 struct rq *rq; 937 938 if (static_branch_likely(&psi_disabled)) 939 return; 940 941 if (*flags) 942 return; 943 /* 944 * in_memstall clearing & accounting needs to be atomic wrt 945 * changes to the task's scheduling state, otherwise we could 946 * race with CPU migration. 947 */ 948 rq = this_rq_lock_irq(&rf); 949 950 current->in_memstall = 0; 951 psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0); 952 953 rq_unlock_irq(rq, &rf); 954 } 955 956 #ifdef CONFIG_CGROUPS 957 int psi_cgroup_alloc(struct cgroup *cgroup) 958 { 959 if (static_branch_likely(&psi_disabled)) 960 return 0; 961 962 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu); 963 if (!cgroup->psi.pcpu) 964 return -ENOMEM; 965 group_init(&cgroup->psi); 966 return 0; 967 } 968 969 void psi_cgroup_free(struct cgroup *cgroup) 970 { 971 if (static_branch_likely(&psi_disabled)) 972 return; 973 974 cancel_delayed_work_sync(&cgroup->psi.avgs_work); 975 free_percpu(cgroup->psi.pcpu); 976 /* All triggers must be removed by now */ 977 WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n"); 978 } 979 980 /** 981 * cgroup_move_task - move task to a different cgroup 982 * @task: the task 983 * @to: the target css_set 984 * 985 * Move task to a new cgroup and safely migrate its associated stall 986 * state between the different groups. 987 * 988 * This function acquires the task's rq lock to lock out concurrent 989 * changes to the task's scheduling state and - in case the task is 990 * running - concurrent changes to its stall state. 991 */ 992 void cgroup_move_task(struct task_struct *task, struct css_set *to) 993 { 994 unsigned int task_flags; 995 struct rq_flags rf; 996 struct rq *rq; 997 998 if (static_branch_likely(&psi_disabled)) { 999 /* 1000 * Lame to do this here, but the scheduler cannot be locked 1001 * from the outside, so we move cgroups from inside sched/. 1002 */ 1003 rcu_assign_pointer(task->cgroups, to); 1004 return; 1005 } 1006 1007 rq = task_rq_lock(task, &rf); 1008 1009 /* 1010 * We may race with schedule() dropping the rq lock between 1011 * deactivating prev and switching to next. Because the psi 1012 * updates from the deactivation are deferred to the switch 1013 * callback to save cgroup tree updates, the task's scheduling 1014 * state here is not coherent with its psi state: 1015 * 1016 * schedule() cgroup_move_task() 1017 * rq_lock() 1018 * deactivate_task() 1019 * p->on_rq = 0 1020 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates 1021 * pick_next_task() 1022 * rq_unlock() 1023 * rq_lock() 1024 * psi_task_change() // old cgroup 1025 * task->cgroups = to 1026 * psi_task_change() // new cgroup 1027 * rq_unlock() 1028 * rq_lock() 1029 * psi_sched_switch() // does deferred updates in new cgroup 1030 * 1031 * Don't rely on the scheduling state. Use psi_flags instead. 1032 */ 1033 task_flags = task->psi_flags; 1034 1035 if (task_flags) 1036 psi_task_change(task, task_flags, 0); 1037 1038 /* See comment above */ 1039 rcu_assign_pointer(task->cgroups, to); 1040 1041 if (task_flags) 1042 psi_task_change(task, 0, task_flags); 1043 1044 task_rq_unlock(rq, task, &rf); 1045 } 1046 #endif /* CONFIG_CGROUPS */ 1047 1048 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) 1049 { 1050 int full; 1051 u64 now; 1052 1053 if (static_branch_likely(&psi_disabled)) 1054 return -EOPNOTSUPP; 1055 1056 /* Update averages before reporting them */ 1057 mutex_lock(&group->avgs_lock); 1058 now = sched_clock(); 1059 collect_percpu_times(group, PSI_AVGS, NULL); 1060 if (now >= group->avg_next_update) 1061 group->avg_next_update = update_averages(group, now); 1062 mutex_unlock(&group->avgs_lock); 1063 1064 for (full = 0; full < 2; full++) { 1065 unsigned long avg[3]; 1066 u64 total; 1067 int w; 1068 1069 for (w = 0; w < 3; w++) 1070 avg[w] = group->avg[res * 2 + full][w]; 1071 total = div_u64(group->total[PSI_AVGS][res * 2 + full], 1072 NSEC_PER_USEC); 1073 1074 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n", 1075 full ? "full" : "some", 1076 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), 1077 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), 1078 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), 1079 total); 1080 } 1081 1082 return 0; 1083 } 1084 1085 struct psi_trigger *psi_trigger_create(struct psi_group *group, 1086 char *buf, size_t nbytes, enum psi_res res) 1087 { 1088 struct psi_trigger *t; 1089 enum psi_states state; 1090 u32 threshold_us; 1091 u32 window_us; 1092 1093 if (static_branch_likely(&psi_disabled)) 1094 return ERR_PTR(-EOPNOTSUPP); 1095 1096 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2) 1097 state = PSI_IO_SOME + res * 2; 1098 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2) 1099 state = PSI_IO_FULL + res * 2; 1100 else 1101 return ERR_PTR(-EINVAL); 1102 1103 if (state >= PSI_NONIDLE) 1104 return ERR_PTR(-EINVAL); 1105 1106 if (window_us < WINDOW_MIN_US || 1107 window_us > WINDOW_MAX_US) 1108 return ERR_PTR(-EINVAL); 1109 1110 /* Check threshold */ 1111 if (threshold_us == 0 || threshold_us > window_us) 1112 return ERR_PTR(-EINVAL); 1113 1114 t = kmalloc(sizeof(*t), GFP_KERNEL); 1115 if (!t) 1116 return ERR_PTR(-ENOMEM); 1117 1118 t->group = group; 1119 t->state = state; 1120 t->threshold = threshold_us * NSEC_PER_USEC; 1121 t->win.size = window_us * NSEC_PER_USEC; 1122 window_reset(&t->win, 0, 0, 0); 1123 1124 t->event = 0; 1125 t->last_event_time = 0; 1126 init_waitqueue_head(&t->event_wait); 1127 1128 mutex_lock(&group->trigger_lock); 1129 1130 if (!rcu_access_pointer(group->poll_task)) { 1131 struct task_struct *task; 1132 1133 task = kthread_create(psi_poll_worker, group, "psimon"); 1134 if (IS_ERR(task)) { 1135 kfree(t); 1136 mutex_unlock(&group->trigger_lock); 1137 return ERR_CAST(task); 1138 } 1139 atomic_set(&group->poll_wakeup, 0); 1140 wake_up_process(task); 1141 rcu_assign_pointer(group->poll_task, task); 1142 } 1143 1144 list_add(&t->node, &group->triggers); 1145 group->poll_min_period = min(group->poll_min_period, 1146 div_u64(t->win.size, UPDATES_PER_WINDOW)); 1147 group->nr_triggers[t->state]++; 1148 group->poll_states |= (1 << t->state); 1149 1150 mutex_unlock(&group->trigger_lock); 1151 1152 return t; 1153 } 1154 1155 void psi_trigger_destroy(struct psi_trigger *t) 1156 { 1157 struct psi_group *group; 1158 struct task_struct *task_to_destroy = NULL; 1159 1160 /* 1161 * We do not check psi_disabled since it might have been disabled after 1162 * the trigger got created. 1163 */ 1164 if (!t) 1165 return; 1166 1167 group = t->group; 1168 /* 1169 * Wakeup waiters to stop polling. Can happen if cgroup is deleted 1170 * from under a polling process. 1171 */ 1172 wake_up_interruptible(&t->event_wait); 1173 1174 mutex_lock(&group->trigger_lock); 1175 1176 if (!list_empty(&t->node)) { 1177 struct psi_trigger *tmp; 1178 u64 period = ULLONG_MAX; 1179 1180 list_del(&t->node); 1181 group->nr_triggers[t->state]--; 1182 if (!group->nr_triggers[t->state]) 1183 group->poll_states &= ~(1 << t->state); 1184 /* reset min update period for the remaining triggers */ 1185 list_for_each_entry(tmp, &group->triggers, node) 1186 period = min(period, div_u64(tmp->win.size, 1187 UPDATES_PER_WINDOW)); 1188 group->poll_min_period = period; 1189 /* Destroy poll_task when the last trigger is destroyed */ 1190 if (group->poll_states == 0) { 1191 group->polling_until = 0; 1192 task_to_destroy = rcu_dereference_protected( 1193 group->poll_task, 1194 lockdep_is_held(&group->trigger_lock)); 1195 rcu_assign_pointer(group->poll_task, NULL); 1196 del_timer(&group->poll_timer); 1197 } 1198 } 1199 1200 mutex_unlock(&group->trigger_lock); 1201 1202 /* 1203 * Wait for psi_schedule_poll_work RCU to complete its read-side 1204 * critical section before destroying the trigger and optionally the 1205 * poll_task. 1206 */ 1207 synchronize_rcu(); 1208 /* 1209 * Stop kthread 'psimon' after releasing trigger_lock to prevent a 1210 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1211 */ 1212 if (task_to_destroy) { 1213 /* 1214 * After the RCU grace period has expired, the worker 1215 * can no longer be found through group->poll_task. 1216 */ 1217 kthread_stop(task_to_destroy); 1218 } 1219 kfree(t); 1220 } 1221 1222 __poll_t psi_trigger_poll(void **trigger_ptr, 1223 struct file *file, poll_table *wait) 1224 { 1225 __poll_t ret = DEFAULT_POLLMASK; 1226 struct psi_trigger *t; 1227 1228 if (static_branch_likely(&psi_disabled)) 1229 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; 1230 1231 t = smp_load_acquire(trigger_ptr); 1232 if (!t) 1233 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI; 1234 1235 poll_wait(file, &t->event_wait, wait); 1236 1237 if (cmpxchg(&t->event, 1, 0) == 1) 1238 ret |= EPOLLPRI; 1239 1240 return ret; 1241 } 1242 1243 #ifdef CONFIG_PROC_FS 1244 static int psi_io_show(struct seq_file *m, void *v) 1245 { 1246 return psi_show(m, &psi_system, PSI_IO); 1247 } 1248 1249 static int psi_memory_show(struct seq_file *m, void *v) 1250 { 1251 return psi_show(m, &psi_system, PSI_MEM); 1252 } 1253 1254 static int psi_cpu_show(struct seq_file *m, void *v) 1255 { 1256 return psi_show(m, &psi_system, PSI_CPU); 1257 } 1258 1259 static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *)) 1260 { 1261 if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE)) 1262 return -EPERM; 1263 1264 return single_open(file, psi_show, NULL); 1265 } 1266 1267 static int psi_io_open(struct inode *inode, struct file *file) 1268 { 1269 return psi_open(file, psi_io_show); 1270 } 1271 1272 static int psi_memory_open(struct inode *inode, struct file *file) 1273 { 1274 return psi_open(file, psi_memory_show); 1275 } 1276 1277 static int psi_cpu_open(struct inode *inode, struct file *file) 1278 { 1279 return psi_open(file, psi_cpu_show); 1280 } 1281 1282 static ssize_t psi_write(struct file *file, const char __user *user_buf, 1283 size_t nbytes, enum psi_res res) 1284 { 1285 char buf[32]; 1286 size_t buf_size; 1287 struct seq_file *seq; 1288 struct psi_trigger *new; 1289 1290 if (static_branch_likely(&psi_disabled)) 1291 return -EOPNOTSUPP; 1292 1293 if (!nbytes) 1294 return -EINVAL; 1295 1296 buf_size = min(nbytes, sizeof(buf)); 1297 if (copy_from_user(buf, user_buf, buf_size)) 1298 return -EFAULT; 1299 1300 buf[buf_size - 1] = '\0'; 1301 1302 seq = file->private_data; 1303 1304 /* Take seq->lock to protect seq->private from concurrent writes */ 1305 mutex_lock(&seq->lock); 1306 1307 /* Allow only one trigger per file descriptor */ 1308 if (seq->private) { 1309 mutex_unlock(&seq->lock); 1310 return -EBUSY; 1311 } 1312 1313 new = psi_trigger_create(&psi_system, buf, nbytes, res); 1314 if (IS_ERR(new)) { 1315 mutex_unlock(&seq->lock); 1316 return PTR_ERR(new); 1317 } 1318 1319 smp_store_release(&seq->private, new); 1320 mutex_unlock(&seq->lock); 1321 1322 return nbytes; 1323 } 1324 1325 static ssize_t psi_io_write(struct file *file, const char __user *user_buf, 1326 size_t nbytes, loff_t *ppos) 1327 { 1328 return psi_write(file, user_buf, nbytes, PSI_IO); 1329 } 1330 1331 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf, 1332 size_t nbytes, loff_t *ppos) 1333 { 1334 return psi_write(file, user_buf, nbytes, PSI_MEM); 1335 } 1336 1337 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf, 1338 size_t nbytes, loff_t *ppos) 1339 { 1340 return psi_write(file, user_buf, nbytes, PSI_CPU); 1341 } 1342 1343 static __poll_t psi_fop_poll(struct file *file, poll_table *wait) 1344 { 1345 struct seq_file *seq = file->private_data; 1346 1347 return psi_trigger_poll(&seq->private, file, wait); 1348 } 1349 1350 static int psi_fop_release(struct inode *inode, struct file *file) 1351 { 1352 struct seq_file *seq = file->private_data; 1353 1354 psi_trigger_destroy(seq->private); 1355 return single_release(inode, file); 1356 } 1357 1358 static const struct proc_ops psi_io_proc_ops = { 1359 .proc_open = psi_io_open, 1360 .proc_read = seq_read, 1361 .proc_lseek = seq_lseek, 1362 .proc_write = psi_io_write, 1363 .proc_poll = psi_fop_poll, 1364 .proc_release = psi_fop_release, 1365 }; 1366 1367 static const struct proc_ops psi_memory_proc_ops = { 1368 .proc_open = psi_memory_open, 1369 .proc_read = seq_read, 1370 .proc_lseek = seq_lseek, 1371 .proc_write = psi_memory_write, 1372 .proc_poll = psi_fop_poll, 1373 .proc_release = psi_fop_release, 1374 }; 1375 1376 static const struct proc_ops psi_cpu_proc_ops = { 1377 .proc_open = psi_cpu_open, 1378 .proc_read = seq_read, 1379 .proc_lseek = seq_lseek, 1380 .proc_write = psi_cpu_write, 1381 .proc_poll = psi_fop_poll, 1382 .proc_release = psi_fop_release, 1383 }; 1384 1385 static int __init psi_proc_init(void) 1386 { 1387 if (psi_enable) { 1388 proc_mkdir("pressure", NULL); 1389 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops); 1390 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops); 1391 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops); 1392 } 1393 return 0; 1394 } 1395 module_init(psi_proc_init); 1396 1397 #endif /* CONFIG_PROC_FS */ 1398