1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Pressure stall information for CPU, memory and IO
4 *
5 * Copyright (c) 2018 Facebook, Inc.
6 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 *
8 * Polling support by Suren Baghdasaryan <surenb@google.com>
9 * Copyright (c) 2018 Google, Inc.
10 *
11 * When CPU, memory and IO are contended, tasks experience delays that
12 * reduce throughput and introduce latencies into the workload. Memory
13 * and IO contention, in addition, can cause a full loss of forward
14 * progress in which the CPU goes idle.
15 *
16 * This code aggregates individual task delays into resource pressure
17 * metrics that indicate problems with both workload health and
18 * resource utilization.
19 *
20 * Model
21 *
22 * The time in which a task can execute on a CPU is our baseline for
23 * productivity. Pressure expresses the amount of time in which this
24 * potential cannot be realized due to resource contention.
25 *
26 * This concept of productivity has two components: the workload and
27 * the CPU. To measure the impact of pressure on both, we define two
28 * contention states for a resource: SOME and FULL.
29 *
30 * In the SOME state of a given resource, one or more tasks are
31 * delayed on that resource. This affects the workload's ability to
32 * perform work, but the CPU may still be executing other tasks.
33 *
34 * In the FULL state of a given resource, all non-idle tasks are
35 * delayed on that resource such that nobody is advancing and the CPU
36 * goes idle. This leaves both workload and CPU unproductive.
37 *
38 * SOME = nr_delayed_tasks != 0
39 * FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
40 *
41 * What it means for a task to be productive is defined differently
42 * for each resource. For IO, productive means a running task. For
43 * memory, productive means a running task that isn't a reclaimer. For
44 * CPU, productive means an oncpu task.
45 *
46 * Naturally, the FULL state doesn't exist for the CPU resource at the
47 * system level, but exist at the cgroup level. At the cgroup level,
48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU
49 * resource which is being used by others outside of the cgroup or
50 * throttled by the cgroup cpu.max configuration.
51 *
52 * The percentage of wallclock time spent in those compound stall
53 * states gives pressure numbers between 0 and 100 for each resource,
54 * where the SOME percentage indicates workload slowdowns and the FULL
55 * percentage indicates reduced CPU utilization:
56 *
57 * %SOME = time(SOME) / period
58 * %FULL = time(FULL) / period
59 *
60 * Multiple CPUs
61 *
62 * The more tasks and available CPUs there are, the more work can be
63 * performed concurrently. This means that the potential that can go
64 * unrealized due to resource contention *also* scales with non-idle
65 * tasks and CPUs.
66 *
67 * Consider a scenario where 257 number crunching tasks are trying to
68 * run concurrently on 256 CPUs. If we simply aggregated the task
69 * states, we would have to conclude a CPU SOME pressure number of
70 * 100%, since *somebody* is waiting on a runqueue at all
71 * times. However, that is clearly not the amount of contention the
72 * workload is experiencing: only one out of 256 possible execution
73 * threads will be contended at any given time, or about 0.4%.
74 *
75 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
76 * given time *one* of the tasks is delayed due to a lack of memory.
77 * Again, looking purely at the task state would yield a memory FULL
78 * pressure number of 0%, since *somebody* is always making forward
79 * progress. But again this wouldn't capture the amount of execution
80 * potential lost, which is 1 out of 4 CPUs, or 25%.
81 *
82 * To calculate wasted potential (pressure) with multiple processors,
83 * we have to base our calculation on the number of non-idle tasks in
84 * conjunction with the number of available CPUs, which is the number
85 * of potential execution threads. SOME becomes then the proportion of
86 * delayed tasks to possible threads, and FULL is the share of possible
87 * threads that are unproductive due to delays:
88 *
89 * threads = min(nr_nonidle_tasks, nr_cpus)
90 * SOME = min(nr_delayed_tasks / threads, 1)
91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads
92 *
93 * For the 257 number crunchers on 256 CPUs, this yields:
94 *
95 * threads = min(257, 256)
96 * SOME = min(1 / 256, 1) = 0.4%
97 * FULL = (256 - min(256, 256)) / 256 = 0%
98 *
99 * For the 1 out of 4 memory-delayed tasks, this yields:
100 *
101 * threads = min(4, 4)
102 * SOME = min(1 / 4, 1) = 25%
103 * FULL = (4 - min(3, 4)) / 4 = 25%
104 *
105 * [ Substitute nr_cpus with 1, and you can see that it's a natural
106 * extension of the single-CPU model. ]
107 *
108 * Implementation
109 *
110 * To assess the precise time spent in each such state, we would have
111 * to freeze the system on task changes and start/stop the state
112 * clocks accordingly. Obviously that doesn't scale in practice.
113 *
114 * Because the scheduler aims to distribute the compute load evenly
115 * among the available CPUs, we can track task state locally to each
116 * CPU and, at much lower frequency, extrapolate the global state for
117 * the cumulative stall times and the running averages.
118 *
119 * For each runqueue, we track:
120 *
121 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124 *
125 * and then periodically aggregate:
126 *
127 * tNONIDLE = sum(tNONIDLE[i])
128 *
129 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131 *
132 * %SOME = tSOME / period
133 * %FULL = tFULL / period
134 *
135 * This gives us an approximation of pressure that is practical
136 * cost-wise, yet way more sensitive and accurate than periodic
137 * sampling of the aggregate task states would be.
138 */
139
140 static int psi_bug __read_mostly;
141
142 DEFINE_STATIC_KEY_FALSE(psi_disabled);
143 static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
144
145 #ifdef CONFIG_PSI_DEFAULT_DISABLED
146 static bool psi_enable;
147 #else
148 static bool psi_enable = true;
149 #endif
setup_psi(char * str)150 static int __init setup_psi(char *str)
151 {
152 return kstrtobool(str, &psi_enable) == 0;
153 }
154 __setup("psi=", setup_psi);
155
156 /* Running averages - we need to be higher-res than loadavg */
157 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
158 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
159 #define EXP_60s 1981 /* 1/exp(2s/60s) */
160 #define EXP_300s 2034 /* 1/exp(2s/300s) */
161
162 /* PSI trigger definitions */
163 #define WINDOW_MAX_US 10000000 /* Max window size is 10s */
164 #define UPDATES_PER_WINDOW 10 /* 10 updates per window */
165
166 /* Sampling frequency in nanoseconds */
167 static u64 psi_period __read_mostly;
168
169 /* System-level pressure and stall tracking */
170 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
171 struct psi_group psi_system = {
172 .pcpu = &system_group_pcpu,
173 };
174
175 static void psi_avgs_work(struct work_struct *work);
176
177 static void poll_timer_fn(struct timer_list *t);
178
group_init(struct psi_group * group)179 static void group_init(struct psi_group *group)
180 {
181 int cpu;
182
183 group->enabled = true;
184 for_each_possible_cpu(cpu)
185 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
186 group->avg_last_update = sched_clock();
187 group->avg_next_update = group->avg_last_update + psi_period;
188 mutex_init(&group->avgs_lock);
189
190 /* Init avg trigger-related members */
191 INIT_LIST_HEAD(&group->avg_triggers);
192 memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
193 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
194
195 /* Init rtpoll trigger-related members */
196 atomic_set(&group->rtpoll_scheduled, 0);
197 mutex_init(&group->rtpoll_trigger_lock);
198 INIT_LIST_HEAD(&group->rtpoll_triggers);
199 group->rtpoll_min_period = U32_MAX;
200 group->rtpoll_next_update = ULLONG_MAX;
201 init_waitqueue_head(&group->rtpoll_wait);
202 timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
203 rcu_assign_pointer(group->rtpoll_task, NULL);
204 }
205
psi_init(void)206 void __init psi_init(void)
207 {
208 if (!psi_enable) {
209 static_branch_enable(&psi_disabled);
210 static_branch_disable(&psi_cgroups_enabled);
211 return;
212 }
213
214 if (!cgroup_psi_enabled())
215 static_branch_disable(&psi_cgroups_enabled);
216
217 psi_period = jiffies_to_nsecs(PSI_FREQ);
218 group_init(&psi_system);
219 }
220
test_state(unsigned int * tasks,enum psi_states state,bool oncpu)221 static bool test_state(unsigned int *tasks, enum psi_states state, bool oncpu)
222 {
223 switch (state) {
224 case PSI_IO_SOME:
225 return unlikely(tasks[NR_IOWAIT]);
226 case PSI_IO_FULL:
227 return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
228 case PSI_MEM_SOME:
229 return unlikely(tasks[NR_MEMSTALL]);
230 case PSI_MEM_FULL:
231 return unlikely(tasks[NR_MEMSTALL] &&
232 tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING]);
233 case PSI_CPU_SOME:
234 return unlikely(tasks[NR_RUNNING] > oncpu);
235 case PSI_CPU_FULL:
236 return unlikely(tasks[NR_RUNNING] && !oncpu);
237 case PSI_NONIDLE:
238 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
239 tasks[NR_RUNNING];
240 default:
241 return false;
242 }
243 }
244
get_recent_times(struct psi_group * group,int cpu,enum psi_aggregators aggregator,u32 * times,u32 * pchanged_states)245 static void get_recent_times(struct psi_group *group, int cpu,
246 enum psi_aggregators aggregator, u32 *times,
247 u32 *pchanged_states)
248 {
249 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
250 int current_cpu = raw_smp_processor_id();
251 unsigned int tasks[NR_PSI_TASK_COUNTS];
252 u64 now, state_start;
253 enum psi_states s;
254 unsigned int seq;
255 u32 state_mask;
256
257 *pchanged_states = 0;
258
259 /* Snapshot a coherent view of the CPU state */
260 do {
261 seq = read_seqcount_begin(&groupc->seq);
262 now = cpu_clock(cpu);
263 memcpy(times, groupc->times, sizeof(groupc->times));
264 state_mask = groupc->state_mask;
265 state_start = groupc->state_start;
266 if (cpu == current_cpu)
267 memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
268 } while (read_seqcount_retry(&groupc->seq, seq));
269
270 /* Calculate state time deltas against the previous snapshot */
271 for (s = 0; s < NR_PSI_STATES; s++) {
272 u32 delta;
273 /*
274 * In addition to already concluded states, we also
275 * incorporate currently active states on the CPU,
276 * since states may last for many sampling periods.
277 *
278 * This way we keep our delta sampling buckets small
279 * (u32) and our reported pressure close to what's
280 * actually happening.
281 */
282 if (state_mask & (1 << s))
283 times[s] += now - state_start;
284
285 delta = times[s] - groupc->times_prev[aggregator][s];
286 groupc->times_prev[aggregator][s] = times[s];
287
288 times[s] = delta;
289 if (delta)
290 *pchanged_states |= (1 << s);
291 }
292
293 /*
294 * When collect_percpu_times() from the avgs_work, we don't want to
295 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running
296 * this avgs_work is never IDLE, cause avgs_work can't be shut off.
297 * So for the current CPU, we need to re-arm avgs_work only when
298 * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
299 * we can just check PSI_NONIDLE delta.
300 */
301 if (current_work() == &group->avgs_work.work) {
302 bool reschedule;
303
304 if (cpu == current_cpu)
305 reschedule = tasks[NR_RUNNING] +
306 tasks[NR_IOWAIT] +
307 tasks[NR_MEMSTALL] > 1;
308 else
309 reschedule = *pchanged_states & (1 << PSI_NONIDLE);
310
311 if (reschedule)
312 *pchanged_states |= PSI_STATE_RESCHEDULE;
313 }
314 }
315
calc_avgs(unsigned long avg[3],int missed_periods,u64 time,u64 period)316 static void calc_avgs(unsigned long avg[3], int missed_periods,
317 u64 time, u64 period)
318 {
319 unsigned long pct;
320
321 /* Fill in zeroes for periods of no activity */
322 if (missed_periods) {
323 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
324 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
325 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
326 }
327
328 /* Sample the most recent active period */
329 pct = div_u64(time * 100, period);
330 pct *= FIXED_1;
331 avg[0] = calc_load(avg[0], EXP_10s, pct);
332 avg[1] = calc_load(avg[1], EXP_60s, pct);
333 avg[2] = calc_load(avg[2], EXP_300s, pct);
334 }
335
collect_percpu_times(struct psi_group * group,enum psi_aggregators aggregator,u32 * pchanged_states)336 static void collect_percpu_times(struct psi_group *group,
337 enum psi_aggregators aggregator,
338 u32 *pchanged_states)
339 {
340 u64 deltas[NR_PSI_STATES - 1] = { 0, };
341 unsigned long nonidle_total = 0;
342 u32 changed_states = 0;
343 int cpu;
344 int s;
345
346 /*
347 * Collect the per-cpu time buckets and average them into a
348 * single time sample that is normalized to wallclock time.
349 *
350 * For averaging, each CPU is weighted by its non-idle time in
351 * the sampling period. This eliminates artifacts from uneven
352 * loading, or even entirely idle CPUs.
353 */
354 for_each_possible_cpu(cpu) {
355 u32 times[NR_PSI_STATES];
356 u32 nonidle;
357 u32 cpu_changed_states;
358
359 get_recent_times(group, cpu, aggregator, times,
360 &cpu_changed_states);
361 changed_states |= cpu_changed_states;
362
363 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
364 nonidle_total += nonidle;
365
366 for (s = 0; s < PSI_NONIDLE; s++)
367 deltas[s] += (u64)times[s] * nonidle;
368 }
369
370 /*
371 * Integrate the sample into the running statistics that are
372 * reported to userspace: the cumulative stall times and the
373 * decaying averages.
374 *
375 * Pressure percentages are sampled at PSI_FREQ. We might be
376 * called more often when the user polls more frequently than
377 * that; we might be called less often when there is no task
378 * activity, thus no data, and clock ticks are sporadic. The
379 * below handles both.
380 */
381
382 /* total= */
383 for (s = 0; s < NR_PSI_STATES - 1; s++)
384 group->total[aggregator][s] +=
385 div_u64(deltas[s], max(nonidle_total, 1UL));
386
387 if (pchanged_states)
388 *pchanged_states = changed_states;
389 }
390
391 /* Trigger tracking window manipulations */
window_reset(struct psi_window * win,u64 now,u64 value,u64 prev_growth)392 static void window_reset(struct psi_window *win, u64 now, u64 value,
393 u64 prev_growth)
394 {
395 win->start_time = now;
396 win->start_value = value;
397 win->prev_growth = prev_growth;
398 }
399
400 /*
401 * PSI growth tracking window update and growth calculation routine.
402 *
403 * This approximates a sliding tracking window by interpolating
404 * partially elapsed windows using historical growth data from the
405 * previous intervals. This minimizes memory requirements (by not storing
406 * all the intermediate values in the previous window) and simplifies
407 * the calculations. It works well because PSI signal changes only in
408 * positive direction and over relatively small window sizes the growth
409 * is close to linear.
410 */
window_update(struct psi_window * win,u64 now,u64 value)411 static u64 window_update(struct psi_window *win, u64 now, u64 value)
412 {
413 u64 elapsed;
414 u64 growth;
415
416 elapsed = now - win->start_time;
417 growth = value - win->start_value;
418 /*
419 * After each tracking window passes win->start_value and
420 * win->start_time get reset and win->prev_growth stores
421 * the average per-window growth of the previous window.
422 * win->prev_growth is then used to interpolate additional
423 * growth from the previous window assuming it was linear.
424 */
425 if (elapsed > win->size)
426 window_reset(win, now, value, growth);
427 else {
428 u32 remaining;
429
430 remaining = win->size - elapsed;
431 growth += div64_u64(win->prev_growth * remaining, win->size);
432 }
433
434 return growth;
435 }
436
update_triggers(struct psi_group * group,u64 now,bool * update_total,enum psi_aggregators aggregator)437 static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total,
438 enum psi_aggregators aggregator)
439 {
440 struct psi_trigger *t;
441 u64 *total = group->total[aggregator];
442 struct list_head *triggers;
443 u64 *aggregator_total;
444 *update_total = false;
445
446 if (aggregator == PSI_AVGS) {
447 triggers = &group->avg_triggers;
448 aggregator_total = group->avg_total;
449 } else {
450 triggers = &group->rtpoll_triggers;
451 aggregator_total = group->rtpoll_total;
452 }
453
454 /*
455 * On subsequent updates, calculate growth deltas and let
456 * watchers know when their specified thresholds are exceeded.
457 */
458 list_for_each_entry(t, triggers, node) {
459 u64 growth;
460 bool new_stall;
461
462 new_stall = aggregator_total[t->state] != total[t->state];
463
464 /* Check for stall activity or a previous threshold breach */
465 if (!new_stall && !t->pending_event)
466 continue;
467 /*
468 * Check for new stall activity, as well as deferred
469 * events that occurred in the last window after the
470 * trigger had already fired (we want to ratelimit
471 * events without dropping any).
472 */
473 if (new_stall) {
474 /*
475 * Multiple triggers might be looking at the same state,
476 * remember to update group->polling_total[] once we've
477 * been through all of them. Also remember to extend the
478 * polling time if we see new stall activity.
479 */
480 *update_total = true;
481
482 /* Calculate growth since last update */
483 growth = window_update(&t->win, now, total[t->state]);
484 if (!t->pending_event) {
485 if (growth < t->threshold)
486 continue;
487
488 t->pending_event = true;
489 }
490 }
491 /* Limit event signaling to once per window */
492 if (now < t->last_event_time + t->win.size)
493 continue;
494
495 /* Generate an event */
496 if (cmpxchg(&t->event, 0, 1) == 0) {
497 if (t->of)
498 kernfs_notify(t->of->kn);
499 else
500 wake_up_interruptible(&t->event_wait);
501 }
502 t->last_event_time = now;
503 /* Reset threshold breach flag once event got generated */
504 t->pending_event = false;
505 }
506
507 return now + group->rtpoll_min_period;
508 }
509
update_averages(struct psi_group * group,u64 now)510 static u64 update_averages(struct psi_group *group, u64 now)
511 {
512 unsigned long missed_periods = 0;
513 u64 expires, period;
514 u64 avg_next_update;
515 int s;
516
517 /* avgX= */
518 expires = group->avg_next_update;
519 if (now - expires >= psi_period)
520 missed_periods = div_u64(now - expires, psi_period);
521
522 /*
523 * The periodic clock tick can get delayed for various
524 * reasons, especially on loaded systems. To avoid clock
525 * drift, we schedule the clock in fixed psi_period intervals.
526 * But the deltas we sample out of the per-cpu buckets above
527 * are based on the actual time elapsing between clock ticks.
528 */
529 avg_next_update = expires + ((1 + missed_periods) * psi_period);
530 period = now - (group->avg_last_update + (missed_periods * psi_period));
531 group->avg_last_update = now;
532
533 for (s = 0; s < NR_PSI_STATES - 1; s++) {
534 u32 sample;
535
536 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
537 /*
538 * Due to the lockless sampling of the time buckets,
539 * recorded time deltas can slip into the next period,
540 * which under full pressure can result in samples in
541 * excess of the period length.
542 *
543 * We don't want to report non-sensical pressures in
544 * excess of 100%, nor do we want to drop such events
545 * on the floor. Instead we punt any overage into the
546 * future until pressure subsides. By doing this we
547 * don't underreport the occurring pressure curve, we
548 * just report it delayed by one period length.
549 *
550 * The error isn't cumulative. As soon as another
551 * delta slips from a period P to P+1, by definition
552 * it frees up its time T in P.
553 */
554 if (sample > period)
555 sample = period;
556 group->avg_total[s] += sample;
557 calc_avgs(group->avg[s], missed_periods, sample, period);
558 }
559
560 return avg_next_update;
561 }
562
psi_avgs_work(struct work_struct * work)563 static void psi_avgs_work(struct work_struct *work)
564 {
565 struct delayed_work *dwork;
566 struct psi_group *group;
567 u32 changed_states;
568 bool update_total;
569 u64 now;
570
571 dwork = to_delayed_work(work);
572 group = container_of(dwork, struct psi_group, avgs_work);
573
574 mutex_lock(&group->avgs_lock);
575
576 now = sched_clock();
577
578 collect_percpu_times(group, PSI_AVGS, &changed_states);
579 /*
580 * If there is task activity, periodically fold the per-cpu
581 * times and feed samples into the running averages. If things
582 * are idle and there is no data to process, stop the clock.
583 * Once restarted, we'll catch up the running averages in one
584 * go - see calc_avgs() and missed_periods.
585 */
586 if (now >= group->avg_next_update) {
587 update_triggers(group, now, &update_total, PSI_AVGS);
588 group->avg_next_update = update_averages(group, now);
589 }
590
591 if (changed_states & PSI_STATE_RESCHEDULE) {
592 schedule_delayed_work(dwork, nsecs_to_jiffies(
593 group->avg_next_update - now) + 1);
594 }
595
596 mutex_unlock(&group->avgs_lock);
597 }
598
init_rtpoll_triggers(struct psi_group * group,u64 now)599 static void init_rtpoll_triggers(struct psi_group *group, u64 now)
600 {
601 struct psi_trigger *t;
602
603 list_for_each_entry(t, &group->rtpoll_triggers, node)
604 window_reset(&t->win, now,
605 group->total[PSI_POLL][t->state], 0);
606 memcpy(group->rtpoll_total, group->total[PSI_POLL],
607 sizeof(group->rtpoll_total));
608 group->rtpoll_next_update = now + group->rtpoll_min_period;
609 }
610
611 /* Schedule polling if it's not already scheduled or forced. */
psi_schedule_rtpoll_work(struct psi_group * group,unsigned long delay,bool force)612 static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
613 bool force)
614 {
615 struct task_struct *task;
616
617 /*
618 * atomic_xchg should be called even when !force to provide a
619 * full memory barrier (see the comment inside psi_rtpoll_work).
620 */
621 if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
622 return;
623
624 rcu_read_lock();
625
626 task = rcu_dereference(group->rtpoll_task);
627 /*
628 * kworker might be NULL in case psi_trigger_destroy races with
629 * psi_task_change (hotpath) which can't use locks
630 */
631 if (likely(task))
632 mod_timer(&group->rtpoll_timer, jiffies + delay);
633 else
634 atomic_set(&group->rtpoll_scheduled, 0);
635
636 rcu_read_unlock();
637 }
638
psi_rtpoll_work(struct psi_group * group)639 static void psi_rtpoll_work(struct psi_group *group)
640 {
641 bool force_reschedule = false;
642 u32 changed_states;
643 bool update_total;
644 u64 now;
645
646 mutex_lock(&group->rtpoll_trigger_lock);
647
648 now = sched_clock();
649
650 if (now > group->rtpoll_until) {
651 /*
652 * We are either about to start or might stop polling if no
653 * state change was recorded. Resetting poll_scheduled leaves
654 * a small window for psi_group_change to sneak in and schedule
655 * an immediate poll_work before we get to rescheduling. One
656 * potential extra wakeup at the end of the polling window
657 * should be negligible and polling_next_update still keeps
658 * updates correctly on schedule.
659 */
660 atomic_set(&group->rtpoll_scheduled, 0);
661 /*
662 * A task change can race with the poll worker that is supposed to
663 * report on it. To avoid missing events, ensure ordering between
664 * poll_scheduled and the task state accesses, such that if the poll
665 * worker misses the state update, the task change is guaranteed to
666 * reschedule the poll worker:
667 *
668 * poll worker:
669 * atomic_set(poll_scheduled, 0)
670 * smp_mb()
671 * LOAD states
672 *
673 * task change:
674 * STORE states
675 * if atomic_xchg(poll_scheduled, 1) == 0:
676 * schedule poll worker
677 *
678 * The atomic_xchg() implies a full barrier.
679 */
680 smp_mb();
681 } else {
682 /* Polling window is not over, keep rescheduling */
683 force_reschedule = true;
684 }
685
686
687 collect_percpu_times(group, PSI_POLL, &changed_states);
688
689 if (changed_states & group->rtpoll_states) {
690 /* Initialize trigger windows when entering polling mode */
691 if (now > group->rtpoll_until)
692 init_rtpoll_triggers(group, now);
693
694 /*
695 * Keep the monitor active for at least the duration of the
696 * minimum tracking window as long as monitor states are
697 * changing.
698 */
699 group->rtpoll_until = now +
700 group->rtpoll_min_period * UPDATES_PER_WINDOW;
701 }
702
703 if (now > group->rtpoll_until) {
704 group->rtpoll_next_update = ULLONG_MAX;
705 goto out;
706 }
707
708 if (now >= group->rtpoll_next_update) {
709 group->rtpoll_next_update = update_triggers(group, now, &update_total, PSI_POLL);
710 if (update_total)
711 memcpy(group->rtpoll_total, group->total[PSI_POLL],
712 sizeof(group->rtpoll_total));
713 }
714
715 psi_schedule_rtpoll_work(group,
716 nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
717 force_reschedule);
718
719 out:
720 mutex_unlock(&group->rtpoll_trigger_lock);
721 }
722
psi_rtpoll_worker(void * data)723 static int psi_rtpoll_worker(void *data)
724 {
725 struct psi_group *group = (struct psi_group *)data;
726
727 sched_set_fifo_low(current);
728
729 while (true) {
730 wait_event_interruptible(group->rtpoll_wait,
731 atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
732 kthread_should_stop());
733 if (kthread_should_stop())
734 break;
735
736 psi_rtpoll_work(group);
737 }
738 return 0;
739 }
740
poll_timer_fn(struct timer_list * t)741 static void poll_timer_fn(struct timer_list *t)
742 {
743 struct psi_group *group = from_timer(group, t, rtpoll_timer);
744
745 atomic_set(&group->rtpoll_wakeup, 1);
746 wake_up_interruptible(&group->rtpoll_wait);
747 }
748
record_times(struct psi_group_cpu * groupc,u64 now)749 static void record_times(struct psi_group_cpu *groupc, u64 now)
750 {
751 u32 delta;
752
753 delta = now - groupc->state_start;
754 groupc->state_start = now;
755
756 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
757 groupc->times[PSI_IO_SOME] += delta;
758 if (groupc->state_mask & (1 << PSI_IO_FULL))
759 groupc->times[PSI_IO_FULL] += delta;
760 }
761
762 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
763 groupc->times[PSI_MEM_SOME] += delta;
764 if (groupc->state_mask & (1 << PSI_MEM_FULL))
765 groupc->times[PSI_MEM_FULL] += delta;
766 }
767
768 if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
769 groupc->times[PSI_CPU_SOME] += delta;
770 if (groupc->state_mask & (1 << PSI_CPU_FULL))
771 groupc->times[PSI_CPU_FULL] += delta;
772 }
773
774 if (groupc->state_mask & (1 << PSI_NONIDLE))
775 groupc->times[PSI_NONIDLE] += delta;
776 }
777
psi_group_change(struct psi_group * group,int cpu,unsigned int clear,unsigned int set,bool wake_clock)778 static void psi_group_change(struct psi_group *group, int cpu,
779 unsigned int clear, unsigned int set,
780 bool wake_clock)
781 {
782 struct psi_group_cpu *groupc;
783 unsigned int t, m;
784 enum psi_states s;
785 u32 state_mask;
786 u64 now;
787
788 lockdep_assert_rq_held(cpu_rq(cpu));
789 groupc = per_cpu_ptr(group->pcpu, cpu);
790
791 /*
792 * First we update the task counts according to the state
793 * change requested through the @clear and @set bits.
794 *
795 * Then if the cgroup PSI stats accounting enabled, we
796 * assess the aggregate resource states this CPU's tasks
797 * have been in since the last change, and account any
798 * SOME and FULL time these may have resulted in.
799 */
800 write_seqcount_begin(&groupc->seq);
801 now = cpu_clock(cpu);
802
803 /*
804 * Start with TSK_ONCPU, which doesn't have a corresponding
805 * task count - it's just a boolean flag directly encoded in
806 * the state mask. Clear, set, or carry the current state if
807 * no changes are requested.
808 */
809 if (unlikely(clear & TSK_ONCPU)) {
810 state_mask = 0;
811 clear &= ~TSK_ONCPU;
812 } else if (unlikely(set & TSK_ONCPU)) {
813 state_mask = PSI_ONCPU;
814 set &= ~TSK_ONCPU;
815 } else {
816 state_mask = groupc->state_mask & PSI_ONCPU;
817 }
818
819 /*
820 * The rest of the state mask is calculated based on the task
821 * counts. Update those first, then construct the mask.
822 */
823 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
824 if (!(m & (1 << t)))
825 continue;
826 if (groupc->tasks[t]) {
827 groupc->tasks[t]--;
828 } else if (!psi_bug) {
829 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
830 cpu, t, groupc->tasks[0],
831 groupc->tasks[1], groupc->tasks[2],
832 groupc->tasks[3], clear, set);
833 psi_bug = 1;
834 }
835 }
836
837 for (t = 0; set; set &= ~(1 << t), t++)
838 if (set & (1 << t))
839 groupc->tasks[t]++;
840
841 if (!group->enabled) {
842 /*
843 * On the first group change after disabling PSI, conclude
844 * the current state and flush its time. This is unlikely
845 * to matter to the user, but aggregation (get_recent_times)
846 * may have already incorporated the live state into times_prev;
847 * avoid a delta sample underflow when PSI is later re-enabled.
848 */
849 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
850 record_times(groupc, now);
851
852 groupc->state_mask = state_mask;
853
854 write_seqcount_end(&groupc->seq);
855 return;
856 }
857
858 for (s = 0; s < NR_PSI_STATES; s++) {
859 if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU))
860 state_mask |= (1 << s);
861 }
862
863 /*
864 * Since we care about lost potential, a memstall is FULL
865 * when there are no other working tasks, but also when
866 * the CPU is actively reclaiming and nothing productive
867 * could run even if it were runnable. So when the current
868 * task in a cgroup is in_memstall, the corresponding groupc
869 * on that cpu is in PSI_MEM_FULL state.
870 */
871 if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
872 state_mask |= (1 << PSI_MEM_FULL);
873
874 record_times(groupc, now);
875
876 groupc->state_mask = state_mask;
877
878 write_seqcount_end(&groupc->seq);
879
880 if (state_mask & group->rtpoll_states)
881 psi_schedule_rtpoll_work(group, 1, false);
882
883 if (wake_clock && !delayed_work_pending(&group->avgs_work))
884 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
885 }
886
task_psi_group(struct task_struct * task)887 static inline struct psi_group *task_psi_group(struct task_struct *task)
888 {
889 #ifdef CONFIG_CGROUPS
890 if (static_branch_likely(&psi_cgroups_enabled))
891 return cgroup_psi(task_dfl_cgroup(task));
892 #endif
893 return &psi_system;
894 }
895
psi_flags_change(struct task_struct * task,int clear,int set)896 static void psi_flags_change(struct task_struct *task, int clear, int set)
897 {
898 if (((task->psi_flags & set) ||
899 (task->psi_flags & clear) != clear) &&
900 !psi_bug) {
901 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
902 task->pid, task->comm, task_cpu(task),
903 task->psi_flags, clear, set);
904 psi_bug = 1;
905 }
906
907 task->psi_flags &= ~clear;
908 task->psi_flags |= set;
909 }
910
psi_task_change(struct task_struct * task,int clear,int set)911 void psi_task_change(struct task_struct *task, int clear, int set)
912 {
913 int cpu = task_cpu(task);
914 struct psi_group *group;
915
916 if (!task->pid)
917 return;
918
919 psi_flags_change(task, clear, set);
920
921 group = task_psi_group(task);
922 do {
923 psi_group_change(group, cpu, clear, set, true);
924 } while ((group = group->parent));
925 }
926
psi_task_switch(struct task_struct * prev,struct task_struct * next,bool sleep)927 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
928 bool sleep)
929 {
930 struct psi_group *group, *common = NULL;
931 int cpu = task_cpu(prev);
932
933 if (next->pid) {
934 psi_flags_change(next, 0, TSK_ONCPU);
935 /*
936 * Set TSK_ONCPU on @next's cgroups. If @next shares any
937 * ancestors with @prev, those will already have @prev's
938 * TSK_ONCPU bit set, and we can stop the iteration there.
939 */
940 group = task_psi_group(next);
941 do {
942 if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
943 PSI_ONCPU) {
944 common = group;
945 break;
946 }
947
948 psi_group_change(group, cpu, 0, TSK_ONCPU, true);
949 } while ((group = group->parent));
950 }
951
952 if (prev->pid) {
953 int clear = TSK_ONCPU, set = 0;
954 bool wake_clock = true;
955
956 /*
957 * When we're going to sleep, psi_dequeue() lets us
958 * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
959 * TSK_IOWAIT here, where we can combine it with
960 * TSK_ONCPU and save walking common ancestors twice.
961 */
962 if (sleep) {
963 clear |= TSK_RUNNING;
964 if (prev->in_memstall)
965 clear |= TSK_MEMSTALL_RUNNING;
966 if (prev->in_iowait)
967 set |= TSK_IOWAIT;
968
969 /*
970 * Periodic aggregation shuts off if there is a period of no
971 * task changes, so we wake it back up if necessary. However,
972 * don't do this if the task change is the aggregation worker
973 * itself going to sleep, or we'll ping-pong forever.
974 */
975 if (unlikely((prev->flags & PF_WQ_WORKER) &&
976 wq_worker_last_func(prev) == psi_avgs_work))
977 wake_clock = false;
978 }
979
980 psi_flags_change(prev, clear, set);
981
982 group = task_psi_group(prev);
983 do {
984 if (group == common)
985 break;
986 psi_group_change(group, cpu, clear, set, wake_clock);
987 } while ((group = group->parent));
988
989 /*
990 * TSK_ONCPU is handled up to the common ancestor. If there are
991 * any other differences between the two tasks (e.g. prev goes
992 * to sleep, or only one task is memstall), finish propagating
993 * those differences all the way up to the root.
994 */
995 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
996 clear &= ~TSK_ONCPU;
997 for (; group; group = group->parent)
998 psi_group_change(group, cpu, clear, set, wake_clock);
999 }
1000 }
1001 }
1002
1003 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_account_irqtime(struct rq * rq,struct task_struct * curr,struct task_struct * prev)1004 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
1005 {
1006 int cpu = task_cpu(curr);
1007 struct psi_group *group;
1008 struct psi_group_cpu *groupc;
1009 s64 delta;
1010 u64 irq;
1011
1012 if (!curr->pid)
1013 return;
1014
1015 lockdep_assert_rq_held(rq);
1016 group = task_psi_group(curr);
1017 if (prev && task_psi_group(prev) == group)
1018 return;
1019
1020 irq = irq_time_read(cpu);
1021 delta = (s64)(irq - rq->psi_irq_time);
1022 if (delta < 0)
1023 return;
1024 rq->psi_irq_time = irq;
1025
1026 do {
1027 u64 now;
1028
1029 if (!group->enabled)
1030 continue;
1031
1032 groupc = per_cpu_ptr(group->pcpu, cpu);
1033
1034 write_seqcount_begin(&groupc->seq);
1035 now = cpu_clock(cpu);
1036
1037 record_times(groupc, now);
1038 groupc->times[PSI_IRQ_FULL] += delta;
1039
1040 write_seqcount_end(&groupc->seq);
1041
1042 if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
1043 psi_schedule_rtpoll_work(group, 1, false);
1044 } while ((group = group->parent));
1045 }
1046 #endif
1047
1048 /**
1049 * psi_memstall_enter - mark the beginning of a memory stall section
1050 * @flags: flags to handle nested sections
1051 *
1052 * Marks the calling task as being stalled due to a lack of memory,
1053 * such as waiting for a refault or performing reclaim.
1054 */
psi_memstall_enter(unsigned long * flags)1055 void psi_memstall_enter(unsigned long *flags)
1056 {
1057 struct rq_flags rf;
1058 struct rq *rq;
1059
1060 if (static_branch_likely(&psi_disabled))
1061 return;
1062
1063 *flags = current->in_memstall;
1064 if (*flags)
1065 return;
1066 /*
1067 * in_memstall setting & accounting needs to be atomic wrt
1068 * changes to the task's scheduling state, otherwise we can
1069 * race with CPU migration.
1070 */
1071 rq = this_rq_lock_irq(&rf);
1072
1073 current->in_memstall = 1;
1074 psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
1075
1076 rq_unlock_irq(rq, &rf);
1077 }
1078 EXPORT_SYMBOL_GPL(psi_memstall_enter);
1079
1080 /**
1081 * psi_memstall_leave - mark the end of an memory stall section
1082 * @flags: flags to handle nested memdelay sections
1083 *
1084 * Marks the calling task as no longer stalled due to lack of memory.
1085 */
psi_memstall_leave(unsigned long * flags)1086 void psi_memstall_leave(unsigned long *flags)
1087 {
1088 struct rq_flags rf;
1089 struct rq *rq;
1090
1091 if (static_branch_likely(&psi_disabled))
1092 return;
1093
1094 if (*flags)
1095 return;
1096 /*
1097 * in_memstall clearing & accounting needs to be atomic wrt
1098 * changes to the task's scheduling state, otherwise we could
1099 * race with CPU migration.
1100 */
1101 rq = this_rq_lock_irq(&rf);
1102
1103 current->in_memstall = 0;
1104 psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1105
1106 rq_unlock_irq(rq, &rf);
1107 }
1108 EXPORT_SYMBOL_GPL(psi_memstall_leave);
1109
1110 #ifdef CONFIG_CGROUPS
psi_cgroup_alloc(struct cgroup * cgroup)1111 int psi_cgroup_alloc(struct cgroup *cgroup)
1112 {
1113 if (!static_branch_likely(&psi_cgroups_enabled))
1114 return 0;
1115
1116 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
1117 if (!cgroup->psi)
1118 return -ENOMEM;
1119
1120 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
1121 if (!cgroup->psi->pcpu) {
1122 kfree(cgroup->psi);
1123 return -ENOMEM;
1124 }
1125 group_init(cgroup->psi);
1126 cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
1127 return 0;
1128 }
1129
psi_cgroup_free(struct cgroup * cgroup)1130 void psi_cgroup_free(struct cgroup *cgroup)
1131 {
1132 if (!static_branch_likely(&psi_cgroups_enabled))
1133 return;
1134
1135 cancel_delayed_work_sync(&cgroup->psi->avgs_work);
1136 free_percpu(cgroup->psi->pcpu);
1137 /* All triggers must be removed by now */
1138 WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
1139 kfree(cgroup->psi);
1140 }
1141
1142 /**
1143 * cgroup_move_task - move task to a different cgroup
1144 * @task: the task
1145 * @to: the target css_set
1146 *
1147 * Move task to a new cgroup and safely migrate its associated stall
1148 * state between the different groups.
1149 *
1150 * This function acquires the task's rq lock to lock out concurrent
1151 * changes to the task's scheduling state and - in case the task is
1152 * running - concurrent changes to its stall state.
1153 */
cgroup_move_task(struct task_struct * task,struct css_set * to)1154 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1155 {
1156 unsigned int task_flags;
1157 struct rq_flags rf;
1158 struct rq *rq;
1159
1160 if (!static_branch_likely(&psi_cgroups_enabled)) {
1161 /*
1162 * Lame to do this here, but the scheduler cannot be locked
1163 * from the outside, so we move cgroups from inside sched/.
1164 */
1165 rcu_assign_pointer(task->cgroups, to);
1166 return;
1167 }
1168
1169 rq = task_rq_lock(task, &rf);
1170
1171 /*
1172 * We may race with schedule() dropping the rq lock between
1173 * deactivating prev and switching to next. Because the psi
1174 * updates from the deactivation are deferred to the switch
1175 * callback to save cgroup tree updates, the task's scheduling
1176 * state here is not coherent with its psi state:
1177 *
1178 * schedule() cgroup_move_task()
1179 * rq_lock()
1180 * deactivate_task()
1181 * p->on_rq = 0
1182 * psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1183 * pick_next_task()
1184 * rq_unlock()
1185 * rq_lock()
1186 * psi_task_change() // old cgroup
1187 * task->cgroups = to
1188 * psi_task_change() // new cgroup
1189 * rq_unlock()
1190 * rq_lock()
1191 * psi_sched_switch() // does deferred updates in new cgroup
1192 *
1193 * Don't rely on the scheduling state. Use psi_flags instead.
1194 */
1195 task_flags = task->psi_flags;
1196
1197 if (task_flags)
1198 psi_task_change(task, task_flags, 0);
1199
1200 /* See comment above */
1201 rcu_assign_pointer(task->cgroups, to);
1202
1203 if (task_flags)
1204 psi_task_change(task, 0, task_flags);
1205
1206 task_rq_unlock(rq, task, &rf);
1207 }
1208
psi_cgroup_restart(struct psi_group * group)1209 void psi_cgroup_restart(struct psi_group *group)
1210 {
1211 int cpu;
1212
1213 /*
1214 * After we disable psi_group->enabled, we don't actually
1215 * stop percpu tasks accounting in each psi_group_cpu,
1216 * instead only stop test_state() loop, record_times()
1217 * and averaging worker, see psi_group_change() for details.
1218 *
1219 * When disable cgroup PSI, this function has nothing to sync
1220 * since cgroup pressure files are hidden and percpu psi_group_cpu
1221 * would see !psi_group->enabled and only do task accounting.
1222 *
1223 * When re-enable cgroup PSI, this function use psi_group_change()
1224 * to get correct state mask from test_state() loop on tasks[],
1225 * and restart groupc->state_start from now, use .clear = .set = 0
1226 * here since no task status really changed.
1227 */
1228 if (!group->enabled)
1229 return;
1230
1231 for_each_possible_cpu(cpu) {
1232 struct rq *rq = cpu_rq(cpu);
1233 struct rq_flags rf;
1234
1235 rq_lock_irq(rq, &rf);
1236 psi_group_change(group, cpu, 0, 0, true);
1237 rq_unlock_irq(rq, &rf);
1238 }
1239 }
1240 #endif /* CONFIG_CGROUPS */
1241
psi_show(struct seq_file * m,struct psi_group * group,enum psi_res res)1242 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1243 {
1244 bool only_full = false;
1245 int full;
1246 u64 now;
1247
1248 if (static_branch_likely(&psi_disabled))
1249 return -EOPNOTSUPP;
1250
1251 /* Update averages before reporting them */
1252 mutex_lock(&group->avgs_lock);
1253 now = sched_clock();
1254 collect_percpu_times(group, PSI_AVGS, NULL);
1255 if (now >= group->avg_next_update)
1256 group->avg_next_update = update_averages(group, now);
1257 mutex_unlock(&group->avgs_lock);
1258
1259 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1260 only_full = res == PSI_IRQ;
1261 #endif
1262
1263 for (full = 0; full < 2 - only_full; full++) {
1264 unsigned long avg[3] = { 0, };
1265 u64 total = 0;
1266 int w;
1267
1268 /* CPU FULL is undefined at the system level */
1269 if (!(group == &psi_system && res == PSI_CPU && full)) {
1270 for (w = 0; w < 3; w++)
1271 avg[w] = group->avg[res * 2 + full][w];
1272 total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1273 NSEC_PER_USEC);
1274 }
1275
1276 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1277 full || only_full ? "full" : "some",
1278 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1279 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1280 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1281 total);
1282 }
1283
1284 return 0;
1285 }
1286
psi_trigger_create(struct psi_group * group,char * buf,enum psi_res res,struct file * file,struct kernfs_open_file * of)1287 struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
1288 enum psi_res res, struct file *file,
1289 struct kernfs_open_file *of)
1290 {
1291 struct psi_trigger *t;
1292 enum psi_states state;
1293 u32 threshold_us;
1294 bool privileged;
1295 u32 window_us;
1296
1297 if (static_branch_likely(&psi_disabled))
1298 return ERR_PTR(-EOPNOTSUPP);
1299
1300 /*
1301 * Checking the privilege here on file->f_cred implies that a privileged user
1302 * could open the file and delegate the write to an unprivileged one.
1303 */
1304 privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
1305
1306 if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1307 state = PSI_IO_SOME + res * 2;
1308 else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1309 state = PSI_IO_FULL + res * 2;
1310 else
1311 return ERR_PTR(-EINVAL);
1312
1313 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1314 if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
1315 return ERR_PTR(-EINVAL);
1316 #endif
1317
1318 if (state >= PSI_NONIDLE)
1319 return ERR_PTR(-EINVAL);
1320
1321 if (window_us == 0 || window_us > WINDOW_MAX_US)
1322 return ERR_PTR(-EINVAL);
1323
1324 /*
1325 * Unprivileged users can only use 2s windows so that averages aggregation
1326 * work is used, and no RT threads need to be spawned.
1327 */
1328 if (!privileged && window_us % 2000000)
1329 return ERR_PTR(-EINVAL);
1330
1331 /* Check threshold */
1332 if (threshold_us == 0 || threshold_us > window_us)
1333 return ERR_PTR(-EINVAL);
1334
1335 t = kmalloc(sizeof(*t), GFP_KERNEL);
1336 if (!t)
1337 return ERR_PTR(-ENOMEM);
1338
1339 t->group = group;
1340 t->state = state;
1341 t->threshold = threshold_us * NSEC_PER_USEC;
1342 t->win.size = window_us * NSEC_PER_USEC;
1343 window_reset(&t->win, sched_clock(),
1344 group->total[PSI_POLL][t->state], 0);
1345
1346 t->event = 0;
1347 t->last_event_time = 0;
1348 t->of = of;
1349 if (!of)
1350 init_waitqueue_head(&t->event_wait);
1351 t->pending_event = false;
1352 t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
1353
1354 if (privileged) {
1355 mutex_lock(&group->rtpoll_trigger_lock);
1356
1357 if (!rcu_access_pointer(group->rtpoll_task)) {
1358 struct task_struct *task;
1359
1360 task = kthread_create(psi_rtpoll_worker, group, "psimon");
1361 if (IS_ERR(task)) {
1362 kfree(t);
1363 mutex_unlock(&group->rtpoll_trigger_lock);
1364 return ERR_CAST(task);
1365 }
1366 atomic_set(&group->rtpoll_wakeup, 0);
1367 wake_up_process(task);
1368 rcu_assign_pointer(group->rtpoll_task, task);
1369 }
1370
1371 list_add(&t->node, &group->rtpoll_triggers);
1372 group->rtpoll_min_period = min(group->rtpoll_min_period,
1373 div_u64(t->win.size, UPDATES_PER_WINDOW));
1374 group->rtpoll_nr_triggers[t->state]++;
1375 group->rtpoll_states |= (1 << t->state);
1376
1377 mutex_unlock(&group->rtpoll_trigger_lock);
1378 } else {
1379 mutex_lock(&group->avgs_lock);
1380
1381 list_add(&t->node, &group->avg_triggers);
1382 group->avg_nr_triggers[t->state]++;
1383
1384 mutex_unlock(&group->avgs_lock);
1385 }
1386 return t;
1387 }
1388
psi_trigger_destroy(struct psi_trigger * t)1389 void psi_trigger_destroy(struct psi_trigger *t)
1390 {
1391 struct psi_group *group;
1392 struct task_struct *task_to_destroy = NULL;
1393
1394 /*
1395 * We do not check psi_disabled since it might have been disabled after
1396 * the trigger got created.
1397 */
1398 if (!t)
1399 return;
1400
1401 group = t->group;
1402 /*
1403 * Wakeup waiters to stop polling and clear the queue to prevent it from
1404 * being accessed later. Can happen if cgroup is deleted from under a
1405 * polling process.
1406 */
1407 if (t->of)
1408 kernfs_notify(t->of->kn);
1409 else
1410 wake_up_interruptible(&t->event_wait);
1411
1412 if (t->aggregator == PSI_AVGS) {
1413 mutex_lock(&group->avgs_lock);
1414 if (!list_empty(&t->node)) {
1415 list_del(&t->node);
1416 group->avg_nr_triggers[t->state]--;
1417 }
1418 mutex_unlock(&group->avgs_lock);
1419 } else {
1420 mutex_lock(&group->rtpoll_trigger_lock);
1421 if (!list_empty(&t->node)) {
1422 struct psi_trigger *tmp;
1423 u64 period = ULLONG_MAX;
1424
1425 list_del(&t->node);
1426 group->rtpoll_nr_triggers[t->state]--;
1427 if (!group->rtpoll_nr_triggers[t->state])
1428 group->rtpoll_states &= ~(1 << t->state);
1429 /*
1430 * Reset min update period for the remaining triggers
1431 * iff the destroying trigger had the min window size.
1432 */
1433 if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) {
1434 list_for_each_entry(tmp, &group->rtpoll_triggers, node)
1435 period = min(period, div_u64(tmp->win.size,
1436 UPDATES_PER_WINDOW));
1437 group->rtpoll_min_period = period;
1438 }
1439 /* Destroy rtpoll_task when the last trigger is destroyed */
1440 if (group->rtpoll_states == 0) {
1441 group->rtpoll_until = 0;
1442 task_to_destroy = rcu_dereference_protected(
1443 group->rtpoll_task,
1444 lockdep_is_held(&group->rtpoll_trigger_lock));
1445 rcu_assign_pointer(group->rtpoll_task, NULL);
1446 del_timer(&group->rtpoll_timer);
1447 }
1448 }
1449 mutex_unlock(&group->rtpoll_trigger_lock);
1450 }
1451
1452 /*
1453 * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
1454 * critical section before destroying the trigger and optionally the
1455 * rtpoll_task.
1456 */
1457 synchronize_rcu();
1458 /*
1459 * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
1460 * a deadlock while waiting for psi_rtpoll_work to acquire
1461 * rtpoll_trigger_lock
1462 */
1463 if (task_to_destroy) {
1464 /*
1465 * After the RCU grace period has expired, the worker
1466 * can no longer be found through group->rtpoll_task.
1467 */
1468 kthread_stop(task_to_destroy);
1469 atomic_set(&group->rtpoll_scheduled, 0);
1470 }
1471 kfree(t);
1472 }
1473
psi_trigger_poll(void ** trigger_ptr,struct file * file,poll_table * wait)1474 __poll_t psi_trigger_poll(void **trigger_ptr,
1475 struct file *file, poll_table *wait)
1476 {
1477 __poll_t ret = DEFAULT_POLLMASK;
1478 struct psi_trigger *t;
1479
1480 if (static_branch_likely(&psi_disabled))
1481 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1482
1483 t = smp_load_acquire(trigger_ptr);
1484 if (!t)
1485 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1486
1487 if (t->of)
1488 kernfs_generic_poll(t->of, wait);
1489 else
1490 poll_wait(file, &t->event_wait, wait);
1491
1492 if (cmpxchg(&t->event, 1, 0) == 1)
1493 ret |= EPOLLPRI;
1494
1495 return ret;
1496 }
1497
1498 #ifdef CONFIG_PROC_FS
psi_io_show(struct seq_file * m,void * v)1499 static int psi_io_show(struct seq_file *m, void *v)
1500 {
1501 return psi_show(m, &psi_system, PSI_IO);
1502 }
1503
psi_memory_show(struct seq_file * m,void * v)1504 static int psi_memory_show(struct seq_file *m, void *v)
1505 {
1506 return psi_show(m, &psi_system, PSI_MEM);
1507 }
1508
psi_cpu_show(struct seq_file * m,void * v)1509 static int psi_cpu_show(struct seq_file *m, void *v)
1510 {
1511 return psi_show(m, &psi_system, PSI_CPU);
1512 }
1513
psi_io_open(struct inode * inode,struct file * file)1514 static int psi_io_open(struct inode *inode, struct file *file)
1515 {
1516 return single_open(file, psi_io_show, NULL);
1517 }
1518
psi_memory_open(struct inode * inode,struct file * file)1519 static int psi_memory_open(struct inode *inode, struct file *file)
1520 {
1521 return single_open(file, psi_memory_show, NULL);
1522 }
1523
psi_cpu_open(struct inode * inode,struct file * file)1524 static int psi_cpu_open(struct inode *inode, struct file *file)
1525 {
1526 return single_open(file, psi_cpu_show, NULL);
1527 }
1528
psi_write(struct file * file,const char __user * user_buf,size_t nbytes,enum psi_res res)1529 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1530 size_t nbytes, enum psi_res res)
1531 {
1532 char buf[32];
1533 size_t buf_size;
1534 struct seq_file *seq;
1535 struct psi_trigger *new;
1536
1537 if (static_branch_likely(&psi_disabled))
1538 return -EOPNOTSUPP;
1539
1540 if (!nbytes)
1541 return -EINVAL;
1542
1543 buf_size = min(nbytes, sizeof(buf));
1544 if (copy_from_user(buf, user_buf, buf_size))
1545 return -EFAULT;
1546
1547 buf[buf_size - 1] = '\0';
1548
1549 seq = file->private_data;
1550
1551 /* Take seq->lock to protect seq->private from concurrent writes */
1552 mutex_lock(&seq->lock);
1553
1554 /* Allow only one trigger per file descriptor */
1555 if (seq->private) {
1556 mutex_unlock(&seq->lock);
1557 return -EBUSY;
1558 }
1559
1560 new = psi_trigger_create(&psi_system, buf, res, file, NULL);
1561 if (IS_ERR(new)) {
1562 mutex_unlock(&seq->lock);
1563 return PTR_ERR(new);
1564 }
1565
1566 smp_store_release(&seq->private, new);
1567 mutex_unlock(&seq->lock);
1568
1569 return nbytes;
1570 }
1571
psi_io_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1572 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1573 size_t nbytes, loff_t *ppos)
1574 {
1575 return psi_write(file, user_buf, nbytes, PSI_IO);
1576 }
1577
psi_memory_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1578 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1579 size_t nbytes, loff_t *ppos)
1580 {
1581 return psi_write(file, user_buf, nbytes, PSI_MEM);
1582 }
1583
psi_cpu_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1584 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1585 size_t nbytes, loff_t *ppos)
1586 {
1587 return psi_write(file, user_buf, nbytes, PSI_CPU);
1588 }
1589
psi_fop_poll(struct file * file,poll_table * wait)1590 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1591 {
1592 struct seq_file *seq = file->private_data;
1593
1594 return psi_trigger_poll(&seq->private, file, wait);
1595 }
1596
psi_fop_release(struct inode * inode,struct file * file)1597 static int psi_fop_release(struct inode *inode, struct file *file)
1598 {
1599 struct seq_file *seq = file->private_data;
1600
1601 psi_trigger_destroy(seq->private);
1602 return single_release(inode, file);
1603 }
1604
1605 static const struct proc_ops psi_io_proc_ops = {
1606 .proc_open = psi_io_open,
1607 .proc_read = seq_read,
1608 .proc_lseek = seq_lseek,
1609 .proc_write = psi_io_write,
1610 .proc_poll = psi_fop_poll,
1611 .proc_release = psi_fop_release,
1612 };
1613
1614 static const struct proc_ops psi_memory_proc_ops = {
1615 .proc_open = psi_memory_open,
1616 .proc_read = seq_read,
1617 .proc_lseek = seq_lseek,
1618 .proc_write = psi_memory_write,
1619 .proc_poll = psi_fop_poll,
1620 .proc_release = psi_fop_release,
1621 };
1622
1623 static const struct proc_ops psi_cpu_proc_ops = {
1624 .proc_open = psi_cpu_open,
1625 .proc_read = seq_read,
1626 .proc_lseek = seq_lseek,
1627 .proc_write = psi_cpu_write,
1628 .proc_poll = psi_fop_poll,
1629 .proc_release = psi_fop_release,
1630 };
1631
1632 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
psi_irq_show(struct seq_file * m,void * v)1633 static int psi_irq_show(struct seq_file *m, void *v)
1634 {
1635 return psi_show(m, &psi_system, PSI_IRQ);
1636 }
1637
psi_irq_open(struct inode * inode,struct file * file)1638 static int psi_irq_open(struct inode *inode, struct file *file)
1639 {
1640 return single_open(file, psi_irq_show, NULL);
1641 }
1642
psi_irq_write(struct file * file,const char __user * user_buf,size_t nbytes,loff_t * ppos)1643 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
1644 size_t nbytes, loff_t *ppos)
1645 {
1646 return psi_write(file, user_buf, nbytes, PSI_IRQ);
1647 }
1648
1649 static const struct proc_ops psi_irq_proc_ops = {
1650 .proc_open = psi_irq_open,
1651 .proc_read = seq_read,
1652 .proc_lseek = seq_lseek,
1653 .proc_write = psi_irq_write,
1654 .proc_poll = psi_fop_poll,
1655 .proc_release = psi_fop_release,
1656 };
1657 #endif
1658
psi_proc_init(void)1659 static int __init psi_proc_init(void)
1660 {
1661 if (psi_enable) {
1662 proc_mkdir("pressure", NULL);
1663 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1664 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1665 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1666 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1667 proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
1668 #endif
1669 }
1670 return 0;
1671 }
1672 module_init(psi_proc_init);
1673
1674 #endif /* CONFIG_PROC_FS */
1675