xref: /openbmc/linux/kernel/sched/psi.c (revision 19b438592238b3b40c3f945bb5f9c4ca971c0c45)
1 /*
2  * Pressure stall information for CPU, memory and IO
3  *
4  * Copyright (c) 2018 Facebook, Inc.
5  * Author: Johannes Weiner <hannes@cmpxchg.org>
6  *
7  * Polling support by Suren Baghdasaryan <surenb@google.com>
8  * Copyright (c) 2018 Google, Inc.
9  *
10  * When CPU, memory and IO are contended, tasks experience delays that
11  * reduce throughput and introduce latencies into the workload. Memory
12  * and IO contention, in addition, can cause a full loss of forward
13  * progress in which the CPU goes idle.
14  *
15  * This code aggregates individual task delays into resource pressure
16  * metrics that indicate problems with both workload health and
17  * resource utilization.
18  *
19  *			Model
20  *
21  * The time in which a task can execute on a CPU is our baseline for
22  * productivity. Pressure expresses the amount of time in which this
23  * potential cannot be realized due to resource contention.
24  *
25  * This concept of productivity has two components: the workload and
26  * the CPU. To measure the impact of pressure on both, we define two
27  * contention states for a resource: SOME and FULL.
28  *
29  * In the SOME state of a given resource, one or more tasks are
30  * delayed on that resource. This affects the workload's ability to
31  * perform work, but the CPU may still be executing other tasks.
32  *
33  * In the FULL state of a given resource, all non-idle tasks are
34  * delayed on that resource such that nobody is advancing and the CPU
35  * goes idle. This leaves both workload and CPU unproductive.
36  *
37  * Naturally, the FULL state doesn't exist for the CPU resource at the
38  * system level, but exist at the cgroup level, means all non-idle tasks
39  * in a cgroup are delayed on the CPU resource which used by others outside
40  * of the cgroup or throttled by the cgroup cpu.max configuration.
41  *
42  *	SOME = nr_delayed_tasks != 0
43  *	FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
44  *
45  * The percentage of wallclock time spent in those compound stall
46  * states gives pressure numbers between 0 and 100 for each resource,
47  * where the SOME percentage indicates workload slowdowns and the FULL
48  * percentage indicates reduced CPU utilization:
49  *
50  *	%SOME = time(SOME) / period
51  *	%FULL = time(FULL) / period
52  *
53  *			Multiple CPUs
54  *
55  * The more tasks and available CPUs there are, the more work can be
56  * performed concurrently. This means that the potential that can go
57  * unrealized due to resource contention *also* scales with non-idle
58  * tasks and CPUs.
59  *
60  * Consider a scenario where 257 number crunching tasks are trying to
61  * run concurrently on 256 CPUs. If we simply aggregated the task
62  * states, we would have to conclude a CPU SOME pressure number of
63  * 100%, since *somebody* is waiting on a runqueue at all
64  * times. However, that is clearly not the amount of contention the
65  * workload is experiencing: only one out of 256 possible execution
66  * threads will be contended at any given time, or about 0.4%.
67  *
68  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
69  * given time *one* of the tasks is delayed due to a lack of memory.
70  * Again, looking purely at the task state would yield a memory FULL
71  * pressure number of 0%, since *somebody* is always making forward
72  * progress. But again this wouldn't capture the amount of execution
73  * potential lost, which is 1 out of 4 CPUs, or 25%.
74  *
75  * To calculate wasted potential (pressure) with multiple processors,
76  * we have to base our calculation on the number of non-idle tasks in
77  * conjunction with the number of available CPUs, which is the number
78  * of potential execution threads. SOME becomes then the proportion of
79  * delayed tasks to possible threads, and FULL is the share of possible
80  * threads that are unproductive due to delays:
81  *
82  *	threads = min(nr_nonidle_tasks, nr_cpus)
83  *	   SOME = min(nr_delayed_tasks / threads, 1)
84  *	   FULL = (threads - min(nr_running_tasks, threads)) / threads
85  *
86  * For the 257 number crunchers on 256 CPUs, this yields:
87  *
88  *	threads = min(257, 256)
89  *	   SOME = min(1 / 256, 1)             = 0.4%
90  *	   FULL = (256 - min(257, 256)) / 256 = 0%
91  *
92  * For the 1 out of 4 memory-delayed tasks, this yields:
93  *
94  *	threads = min(4, 4)
95  *	   SOME = min(1 / 4, 1)               = 25%
96  *	   FULL = (4 - min(3, 4)) / 4         = 25%
97  *
98  * [ Substitute nr_cpus with 1, and you can see that it's a natural
99  *   extension of the single-CPU model. ]
100  *
101  *			Implementation
102  *
103  * To assess the precise time spent in each such state, we would have
104  * to freeze the system on task changes and start/stop the state
105  * clocks accordingly. Obviously that doesn't scale in practice.
106  *
107  * Because the scheduler aims to distribute the compute load evenly
108  * among the available CPUs, we can track task state locally to each
109  * CPU and, at much lower frequency, extrapolate the global state for
110  * the cumulative stall times and the running averages.
111  *
112  * For each runqueue, we track:
113  *
114  *	   tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
115  *	   tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
116  *	tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
117  *
118  * and then periodically aggregate:
119  *
120  *	tNONIDLE = sum(tNONIDLE[i])
121  *
122  *	   tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
123  *	   tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
124  *
125  *	   %SOME = tSOME / period
126  *	   %FULL = tFULL / period
127  *
128  * This gives us an approximation of pressure that is practical
129  * cost-wise, yet way more sensitive and accurate than periodic
130  * sampling of the aggregate task states would be.
131  */
132 
133 #include "../workqueue_internal.h"
134 #include <linux/sched/loadavg.h>
135 #include <linux/seq_file.h>
136 #include <linux/proc_fs.h>
137 #include <linux/seqlock.h>
138 #include <linux/uaccess.h>
139 #include <linux/cgroup.h>
140 #include <linux/module.h>
141 #include <linux/sched.h>
142 #include <linux/ctype.h>
143 #include <linux/file.h>
144 #include <linux/poll.h>
145 #include <linux/psi.h>
146 #include "sched.h"
147 
148 static int psi_bug __read_mostly;
149 
150 DEFINE_STATIC_KEY_FALSE(psi_disabled);
151 
152 #ifdef CONFIG_PSI_DEFAULT_DISABLED
153 static bool psi_enable;
154 #else
155 static bool psi_enable = true;
156 #endif
157 static int __init setup_psi(char *str)
158 {
159 	return kstrtobool(str, &psi_enable) == 0;
160 }
161 __setup("psi=", setup_psi);
162 
163 /* Running averages - we need to be higher-res than loadavg */
164 #define PSI_FREQ	(2*HZ+1)	/* 2 sec intervals */
165 #define EXP_10s		1677		/* 1/exp(2s/10s) as fixed-point */
166 #define EXP_60s		1981		/* 1/exp(2s/60s) */
167 #define EXP_300s	2034		/* 1/exp(2s/300s) */
168 
169 /* PSI trigger definitions */
170 #define WINDOW_MIN_US 500000	/* Min window size is 500ms */
171 #define WINDOW_MAX_US 10000000	/* Max window size is 10s */
172 #define UPDATES_PER_WINDOW 10	/* 10 updates per window */
173 
174 /* Sampling frequency in nanoseconds */
175 static u64 psi_period __read_mostly;
176 
177 /* System-level pressure and stall tracking */
178 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
179 struct psi_group psi_system = {
180 	.pcpu = &system_group_pcpu,
181 };
182 
183 static void psi_avgs_work(struct work_struct *work);
184 
185 static void poll_timer_fn(struct timer_list *t);
186 
187 static void group_init(struct psi_group *group)
188 {
189 	int cpu;
190 
191 	for_each_possible_cpu(cpu)
192 		seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
193 	group->avg_last_update = sched_clock();
194 	group->avg_next_update = group->avg_last_update + psi_period;
195 	INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
196 	mutex_init(&group->avgs_lock);
197 	/* Init trigger-related members */
198 	mutex_init(&group->trigger_lock);
199 	INIT_LIST_HEAD(&group->triggers);
200 	memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
201 	group->poll_states = 0;
202 	group->poll_min_period = U32_MAX;
203 	memset(group->polling_total, 0, sizeof(group->polling_total));
204 	group->polling_next_update = ULLONG_MAX;
205 	group->polling_until = 0;
206 	init_waitqueue_head(&group->poll_wait);
207 	timer_setup(&group->poll_timer, poll_timer_fn, 0);
208 	rcu_assign_pointer(group->poll_task, NULL);
209 }
210 
211 void __init psi_init(void)
212 {
213 	if (!psi_enable) {
214 		static_branch_enable(&psi_disabled);
215 		return;
216 	}
217 
218 	psi_period = jiffies_to_nsecs(PSI_FREQ);
219 	group_init(&psi_system);
220 }
221 
222 static bool test_state(unsigned int *tasks, enum psi_states state)
223 {
224 	switch (state) {
225 	case PSI_IO_SOME:
226 		return unlikely(tasks[NR_IOWAIT]);
227 	case PSI_IO_FULL:
228 		return unlikely(tasks[NR_IOWAIT] && !tasks[NR_RUNNING]);
229 	case PSI_MEM_SOME:
230 		return unlikely(tasks[NR_MEMSTALL]);
231 	case PSI_MEM_FULL:
232 		return unlikely(tasks[NR_MEMSTALL] && !tasks[NR_RUNNING]);
233 	case PSI_CPU_SOME:
234 		return unlikely(tasks[NR_RUNNING] > tasks[NR_ONCPU]);
235 	case PSI_CPU_FULL:
236 		return unlikely(tasks[NR_RUNNING] && !tasks[NR_ONCPU]);
237 	case PSI_NONIDLE:
238 		return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
239 			tasks[NR_RUNNING];
240 	default:
241 		return false;
242 	}
243 }
244 
245 static void get_recent_times(struct psi_group *group, int cpu,
246 			     enum psi_aggregators aggregator, u32 *times,
247 			     u32 *pchanged_states)
248 {
249 	struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
250 	u64 now, state_start;
251 	enum psi_states s;
252 	unsigned int seq;
253 	u32 state_mask;
254 
255 	*pchanged_states = 0;
256 
257 	/* Snapshot a coherent view of the CPU state */
258 	do {
259 		seq = read_seqcount_begin(&groupc->seq);
260 		now = cpu_clock(cpu);
261 		memcpy(times, groupc->times, sizeof(groupc->times));
262 		state_mask = groupc->state_mask;
263 		state_start = groupc->state_start;
264 	} while (read_seqcount_retry(&groupc->seq, seq));
265 
266 	/* Calculate state time deltas against the previous snapshot */
267 	for (s = 0; s < NR_PSI_STATES; s++) {
268 		u32 delta;
269 		/*
270 		 * In addition to already concluded states, we also
271 		 * incorporate currently active states on the CPU,
272 		 * since states may last for many sampling periods.
273 		 *
274 		 * This way we keep our delta sampling buckets small
275 		 * (u32) and our reported pressure close to what's
276 		 * actually happening.
277 		 */
278 		if (state_mask & (1 << s))
279 			times[s] += now - state_start;
280 
281 		delta = times[s] - groupc->times_prev[aggregator][s];
282 		groupc->times_prev[aggregator][s] = times[s];
283 
284 		times[s] = delta;
285 		if (delta)
286 			*pchanged_states |= (1 << s);
287 	}
288 }
289 
290 static void calc_avgs(unsigned long avg[3], int missed_periods,
291 		      u64 time, u64 period)
292 {
293 	unsigned long pct;
294 
295 	/* Fill in zeroes for periods of no activity */
296 	if (missed_periods) {
297 		avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
298 		avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
299 		avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
300 	}
301 
302 	/* Sample the most recent active period */
303 	pct = div_u64(time * 100, period);
304 	pct *= FIXED_1;
305 	avg[0] = calc_load(avg[0], EXP_10s, pct);
306 	avg[1] = calc_load(avg[1], EXP_60s, pct);
307 	avg[2] = calc_load(avg[2], EXP_300s, pct);
308 }
309 
310 static void collect_percpu_times(struct psi_group *group,
311 				 enum psi_aggregators aggregator,
312 				 u32 *pchanged_states)
313 {
314 	u64 deltas[NR_PSI_STATES - 1] = { 0, };
315 	unsigned long nonidle_total = 0;
316 	u32 changed_states = 0;
317 	int cpu;
318 	int s;
319 
320 	/*
321 	 * Collect the per-cpu time buckets and average them into a
322 	 * single time sample that is normalized to wallclock time.
323 	 *
324 	 * For averaging, each CPU is weighted by its non-idle time in
325 	 * the sampling period. This eliminates artifacts from uneven
326 	 * loading, or even entirely idle CPUs.
327 	 */
328 	for_each_possible_cpu(cpu) {
329 		u32 times[NR_PSI_STATES];
330 		u32 nonidle;
331 		u32 cpu_changed_states;
332 
333 		get_recent_times(group, cpu, aggregator, times,
334 				&cpu_changed_states);
335 		changed_states |= cpu_changed_states;
336 
337 		nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
338 		nonidle_total += nonidle;
339 
340 		for (s = 0; s < PSI_NONIDLE; s++)
341 			deltas[s] += (u64)times[s] * nonidle;
342 	}
343 
344 	/*
345 	 * Integrate the sample into the running statistics that are
346 	 * reported to userspace: the cumulative stall times and the
347 	 * decaying averages.
348 	 *
349 	 * Pressure percentages are sampled at PSI_FREQ. We might be
350 	 * called more often when the user polls more frequently than
351 	 * that; we might be called less often when there is no task
352 	 * activity, thus no data, and clock ticks are sporadic. The
353 	 * below handles both.
354 	 */
355 
356 	/* total= */
357 	for (s = 0; s < NR_PSI_STATES - 1; s++)
358 		group->total[aggregator][s] +=
359 				div_u64(deltas[s], max(nonidle_total, 1UL));
360 
361 	if (pchanged_states)
362 		*pchanged_states = changed_states;
363 }
364 
365 static u64 update_averages(struct psi_group *group, u64 now)
366 {
367 	unsigned long missed_periods = 0;
368 	u64 expires, period;
369 	u64 avg_next_update;
370 	int s;
371 
372 	/* avgX= */
373 	expires = group->avg_next_update;
374 	if (now - expires >= psi_period)
375 		missed_periods = div_u64(now - expires, psi_period);
376 
377 	/*
378 	 * The periodic clock tick can get delayed for various
379 	 * reasons, especially on loaded systems. To avoid clock
380 	 * drift, we schedule the clock in fixed psi_period intervals.
381 	 * But the deltas we sample out of the per-cpu buckets above
382 	 * are based on the actual time elapsing between clock ticks.
383 	 */
384 	avg_next_update = expires + ((1 + missed_periods) * psi_period);
385 	period = now - (group->avg_last_update + (missed_periods * psi_period));
386 	group->avg_last_update = now;
387 
388 	for (s = 0; s < NR_PSI_STATES - 1; s++) {
389 		u32 sample;
390 
391 		sample = group->total[PSI_AVGS][s] - group->avg_total[s];
392 		/*
393 		 * Due to the lockless sampling of the time buckets,
394 		 * recorded time deltas can slip into the next period,
395 		 * which under full pressure can result in samples in
396 		 * excess of the period length.
397 		 *
398 		 * We don't want to report non-sensical pressures in
399 		 * excess of 100%, nor do we want to drop such events
400 		 * on the floor. Instead we punt any overage into the
401 		 * future until pressure subsides. By doing this we
402 		 * don't underreport the occurring pressure curve, we
403 		 * just report it delayed by one period length.
404 		 *
405 		 * The error isn't cumulative. As soon as another
406 		 * delta slips from a period P to P+1, by definition
407 		 * it frees up its time T in P.
408 		 */
409 		if (sample > period)
410 			sample = period;
411 		group->avg_total[s] += sample;
412 		calc_avgs(group->avg[s], missed_periods, sample, period);
413 	}
414 
415 	return avg_next_update;
416 }
417 
418 static void psi_avgs_work(struct work_struct *work)
419 {
420 	struct delayed_work *dwork;
421 	struct psi_group *group;
422 	u32 changed_states;
423 	bool nonidle;
424 	u64 now;
425 
426 	dwork = to_delayed_work(work);
427 	group = container_of(dwork, struct psi_group, avgs_work);
428 
429 	mutex_lock(&group->avgs_lock);
430 
431 	now = sched_clock();
432 
433 	collect_percpu_times(group, PSI_AVGS, &changed_states);
434 	nonidle = changed_states & (1 << PSI_NONIDLE);
435 	/*
436 	 * If there is task activity, periodically fold the per-cpu
437 	 * times and feed samples into the running averages. If things
438 	 * are idle and there is no data to process, stop the clock.
439 	 * Once restarted, we'll catch up the running averages in one
440 	 * go - see calc_avgs() and missed_periods.
441 	 */
442 	if (now >= group->avg_next_update)
443 		group->avg_next_update = update_averages(group, now);
444 
445 	if (nonidle) {
446 		schedule_delayed_work(dwork, nsecs_to_jiffies(
447 				group->avg_next_update - now) + 1);
448 	}
449 
450 	mutex_unlock(&group->avgs_lock);
451 }
452 
453 /* Trigger tracking window manipulations */
454 static void window_reset(struct psi_window *win, u64 now, u64 value,
455 			 u64 prev_growth)
456 {
457 	win->start_time = now;
458 	win->start_value = value;
459 	win->prev_growth = prev_growth;
460 }
461 
462 /*
463  * PSI growth tracking window update and growth calculation routine.
464  *
465  * This approximates a sliding tracking window by interpolating
466  * partially elapsed windows using historical growth data from the
467  * previous intervals. This minimizes memory requirements (by not storing
468  * all the intermediate values in the previous window) and simplifies
469  * the calculations. It works well because PSI signal changes only in
470  * positive direction and over relatively small window sizes the growth
471  * is close to linear.
472  */
473 static u64 window_update(struct psi_window *win, u64 now, u64 value)
474 {
475 	u64 elapsed;
476 	u64 growth;
477 
478 	elapsed = now - win->start_time;
479 	growth = value - win->start_value;
480 	/*
481 	 * After each tracking window passes win->start_value and
482 	 * win->start_time get reset and win->prev_growth stores
483 	 * the average per-window growth of the previous window.
484 	 * win->prev_growth is then used to interpolate additional
485 	 * growth from the previous window assuming it was linear.
486 	 */
487 	if (elapsed > win->size)
488 		window_reset(win, now, value, growth);
489 	else {
490 		u32 remaining;
491 
492 		remaining = win->size - elapsed;
493 		growth += div64_u64(win->prev_growth * remaining, win->size);
494 	}
495 
496 	return growth;
497 }
498 
499 static void init_triggers(struct psi_group *group, u64 now)
500 {
501 	struct psi_trigger *t;
502 
503 	list_for_each_entry(t, &group->triggers, node)
504 		window_reset(&t->win, now,
505 				group->total[PSI_POLL][t->state], 0);
506 	memcpy(group->polling_total, group->total[PSI_POLL],
507 		   sizeof(group->polling_total));
508 	group->polling_next_update = now + group->poll_min_period;
509 }
510 
511 static u64 update_triggers(struct psi_group *group, u64 now)
512 {
513 	struct psi_trigger *t;
514 	bool new_stall = false;
515 	u64 *total = group->total[PSI_POLL];
516 
517 	/*
518 	 * On subsequent updates, calculate growth deltas and let
519 	 * watchers know when their specified thresholds are exceeded.
520 	 */
521 	list_for_each_entry(t, &group->triggers, node) {
522 		u64 growth;
523 
524 		/* Check for stall activity */
525 		if (group->polling_total[t->state] == total[t->state])
526 			continue;
527 
528 		/*
529 		 * Multiple triggers might be looking at the same state,
530 		 * remember to update group->polling_total[] once we've
531 		 * been through all of them. Also remember to extend the
532 		 * polling time if we see new stall activity.
533 		 */
534 		new_stall = true;
535 
536 		/* Calculate growth since last update */
537 		growth = window_update(&t->win, now, total[t->state]);
538 		if (growth < t->threshold)
539 			continue;
540 
541 		/* Limit event signaling to once per window */
542 		if (now < t->last_event_time + t->win.size)
543 			continue;
544 
545 		/* Generate an event */
546 		if (cmpxchg(&t->event, 0, 1) == 0)
547 			wake_up_interruptible(&t->event_wait);
548 		t->last_event_time = now;
549 	}
550 
551 	if (new_stall)
552 		memcpy(group->polling_total, total,
553 				sizeof(group->polling_total));
554 
555 	return now + group->poll_min_period;
556 }
557 
558 /* Schedule polling if it's not already scheduled. */
559 static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
560 {
561 	struct task_struct *task;
562 
563 	/*
564 	 * Do not reschedule if already scheduled.
565 	 * Possible race with a timer scheduled after this check but before
566 	 * mod_timer below can be tolerated because group->polling_next_update
567 	 * will keep updates on schedule.
568 	 */
569 	if (timer_pending(&group->poll_timer))
570 		return;
571 
572 	rcu_read_lock();
573 
574 	task = rcu_dereference(group->poll_task);
575 	/*
576 	 * kworker might be NULL in case psi_trigger_destroy races with
577 	 * psi_task_change (hotpath) which can't use locks
578 	 */
579 	if (likely(task))
580 		mod_timer(&group->poll_timer, jiffies + delay);
581 
582 	rcu_read_unlock();
583 }
584 
585 static void psi_poll_work(struct psi_group *group)
586 {
587 	u32 changed_states;
588 	u64 now;
589 
590 	mutex_lock(&group->trigger_lock);
591 
592 	now = sched_clock();
593 
594 	collect_percpu_times(group, PSI_POLL, &changed_states);
595 
596 	if (changed_states & group->poll_states) {
597 		/* Initialize trigger windows when entering polling mode */
598 		if (now > group->polling_until)
599 			init_triggers(group, now);
600 
601 		/*
602 		 * Keep the monitor active for at least the duration of the
603 		 * minimum tracking window as long as monitor states are
604 		 * changing.
605 		 */
606 		group->polling_until = now +
607 			group->poll_min_period * UPDATES_PER_WINDOW;
608 	}
609 
610 	if (now > group->polling_until) {
611 		group->polling_next_update = ULLONG_MAX;
612 		goto out;
613 	}
614 
615 	if (now >= group->polling_next_update)
616 		group->polling_next_update = update_triggers(group, now);
617 
618 	psi_schedule_poll_work(group,
619 		nsecs_to_jiffies(group->polling_next_update - now) + 1);
620 
621 out:
622 	mutex_unlock(&group->trigger_lock);
623 }
624 
625 static int psi_poll_worker(void *data)
626 {
627 	struct psi_group *group = (struct psi_group *)data;
628 
629 	sched_set_fifo_low(current);
630 
631 	while (true) {
632 		wait_event_interruptible(group->poll_wait,
633 				atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
634 				kthread_should_stop());
635 		if (kthread_should_stop())
636 			break;
637 
638 		psi_poll_work(group);
639 	}
640 	return 0;
641 }
642 
643 static void poll_timer_fn(struct timer_list *t)
644 {
645 	struct psi_group *group = from_timer(group, t, poll_timer);
646 
647 	atomic_set(&group->poll_wakeup, 1);
648 	wake_up_interruptible(&group->poll_wait);
649 }
650 
651 static void record_times(struct psi_group_cpu *groupc, u64 now)
652 {
653 	u32 delta;
654 
655 	delta = now - groupc->state_start;
656 	groupc->state_start = now;
657 
658 	if (groupc->state_mask & (1 << PSI_IO_SOME)) {
659 		groupc->times[PSI_IO_SOME] += delta;
660 		if (groupc->state_mask & (1 << PSI_IO_FULL))
661 			groupc->times[PSI_IO_FULL] += delta;
662 	}
663 
664 	if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
665 		groupc->times[PSI_MEM_SOME] += delta;
666 		if (groupc->state_mask & (1 << PSI_MEM_FULL))
667 			groupc->times[PSI_MEM_FULL] += delta;
668 	}
669 
670 	if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
671 		groupc->times[PSI_CPU_SOME] += delta;
672 		if (groupc->state_mask & (1 << PSI_CPU_FULL))
673 			groupc->times[PSI_CPU_FULL] += delta;
674 	}
675 
676 	if (groupc->state_mask & (1 << PSI_NONIDLE))
677 		groupc->times[PSI_NONIDLE] += delta;
678 }
679 
680 static void psi_group_change(struct psi_group *group, int cpu,
681 			     unsigned int clear, unsigned int set, u64 now,
682 			     bool wake_clock)
683 {
684 	struct psi_group_cpu *groupc;
685 	u32 state_mask = 0;
686 	unsigned int t, m;
687 	enum psi_states s;
688 
689 	groupc = per_cpu_ptr(group->pcpu, cpu);
690 
691 	/*
692 	 * First we assess the aggregate resource states this CPU's
693 	 * tasks have been in since the last change, and account any
694 	 * SOME and FULL time these may have resulted in.
695 	 *
696 	 * Then we update the task counts according to the state
697 	 * change requested through the @clear and @set bits.
698 	 */
699 	write_seqcount_begin(&groupc->seq);
700 
701 	record_times(groupc, now);
702 
703 	for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
704 		if (!(m & (1 << t)))
705 			continue;
706 		if (groupc->tasks[t]) {
707 			groupc->tasks[t]--;
708 		} else if (!psi_bug) {
709 			printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
710 					cpu, t, groupc->tasks[0],
711 					groupc->tasks[1], groupc->tasks[2],
712 					groupc->tasks[3], clear, set);
713 			psi_bug = 1;
714 		}
715 	}
716 
717 	for (t = 0; set; set &= ~(1 << t), t++)
718 		if (set & (1 << t))
719 			groupc->tasks[t]++;
720 
721 	/* Calculate state mask representing active states */
722 	for (s = 0; s < NR_PSI_STATES; s++) {
723 		if (test_state(groupc->tasks, s))
724 			state_mask |= (1 << s);
725 	}
726 
727 	/*
728 	 * Since we care about lost potential, a memstall is FULL
729 	 * when there are no other working tasks, but also when
730 	 * the CPU is actively reclaiming and nothing productive
731 	 * could run even if it were runnable. So when the current
732 	 * task in a cgroup is in_memstall, the corresponding groupc
733 	 * on that cpu is in PSI_MEM_FULL state.
734 	 */
735 	if (unlikely(groupc->tasks[NR_ONCPU] && cpu_curr(cpu)->in_memstall))
736 		state_mask |= (1 << PSI_MEM_FULL);
737 
738 	groupc->state_mask = state_mask;
739 
740 	write_seqcount_end(&groupc->seq);
741 
742 	if (state_mask & group->poll_states)
743 		psi_schedule_poll_work(group, 1);
744 
745 	if (wake_clock && !delayed_work_pending(&group->avgs_work))
746 		schedule_delayed_work(&group->avgs_work, PSI_FREQ);
747 }
748 
749 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
750 {
751 #ifdef CONFIG_CGROUPS
752 	struct cgroup *cgroup = NULL;
753 
754 	if (!*iter)
755 		cgroup = task->cgroups->dfl_cgrp;
756 	else if (*iter == &psi_system)
757 		return NULL;
758 	else
759 		cgroup = cgroup_parent(*iter);
760 
761 	if (cgroup && cgroup_parent(cgroup)) {
762 		*iter = cgroup;
763 		return cgroup_psi(cgroup);
764 	}
765 #else
766 	if (*iter)
767 		return NULL;
768 #endif
769 	*iter = &psi_system;
770 	return &psi_system;
771 }
772 
773 static void psi_flags_change(struct task_struct *task, int clear, int set)
774 {
775 	if (((task->psi_flags & set) ||
776 	     (task->psi_flags & clear) != clear) &&
777 	    !psi_bug) {
778 		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
779 				task->pid, task->comm, task_cpu(task),
780 				task->psi_flags, clear, set);
781 		psi_bug = 1;
782 	}
783 
784 	task->psi_flags &= ~clear;
785 	task->psi_flags |= set;
786 }
787 
788 void psi_task_change(struct task_struct *task, int clear, int set)
789 {
790 	int cpu = task_cpu(task);
791 	struct psi_group *group;
792 	bool wake_clock = true;
793 	void *iter = NULL;
794 	u64 now;
795 
796 	if (!task->pid)
797 		return;
798 
799 	psi_flags_change(task, clear, set);
800 
801 	now = cpu_clock(cpu);
802 	/*
803 	 * Periodic aggregation shuts off if there is a period of no
804 	 * task changes, so we wake it back up if necessary. However,
805 	 * don't do this if the task change is the aggregation worker
806 	 * itself going to sleep, or we'll ping-pong forever.
807 	 */
808 	if (unlikely((clear & TSK_RUNNING) &&
809 		     (task->flags & PF_WQ_WORKER) &&
810 		     wq_worker_last_func(task) == psi_avgs_work))
811 		wake_clock = false;
812 
813 	while ((group = iterate_groups(task, &iter)))
814 		psi_group_change(group, cpu, clear, set, now, wake_clock);
815 }
816 
817 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
818 		     bool sleep)
819 {
820 	struct psi_group *group, *common = NULL;
821 	int cpu = task_cpu(prev);
822 	void *iter;
823 	u64 now = cpu_clock(cpu);
824 
825 	if (next->pid) {
826 		bool identical_state;
827 
828 		psi_flags_change(next, 0, TSK_ONCPU);
829 		/*
830 		 * When switching between tasks that have an identical
831 		 * runtime state, the cgroup that contains both tasks
832 		 * runtime state, the cgroup that contains both tasks
833 		 * we reach the first common ancestor. Iterate @next's
834 		 * ancestors only until we encounter @prev's ONCPU.
835 		 */
836 		identical_state = prev->psi_flags == next->psi_flags;
837 		iter = NULL;
838 		while ((group = iterate_groups(next, &iter))) {
839 			if (identical_state &&
840 			    per_cpu_ptr(group->pcpu, cpu)->tasks[NR_ONCPU]) {
841 				common = group;
842 				break;
843 			}
844 
845 			psi_group_change(group, cpu, 0, TSK_ONCPU, now, true);
846 		}
847 	}
848 
849 	if (prev->pid) {
850 		int clear = TSK_ONCPU, set = 0;
851 
852 		/*
853 		 * When we're going to sleep, psi_dequeue() lets us handle
854 		 * TSK_RUNNING and TSK_IOWAIT here, where we can combine it
855 		 * with TSK_ONCPU and save walking common ancestors twice.
856 		 */
857 		if (sleep) {
858 			clear |= TSK_RUNNING;
859 			if (prev->in_iowait)
860 				set |= TSK_IOWAIT;
861 		}
862 
863 		psi_flags_change(prev, clear, set);
864 
865 		iter = NULL;
866 		while ((group = iterate_groups(prev, &iter)) && group != common)
867 			psi_group_change(group, cpu, clear, set, now, true);
868 
869 		/*
870 		 * TSK_ONCPU is handled up to the common ancestor. If we're tasked
871 		 * with dequeuing too, finish that for the rest of the hierarchy.
872 		 */
873 		if (sleep) {
874 			clear &= ~TSK_ONCPU;
875 			for (; group; group = iterate_groups(prev, &iter))
876 				psi_group_change(group, cpu, clear, set, now, true);
877 		}
878 	}
879 }
880 
881 /**
882  * psi_memstall_enter - mark the beginning of a memory stall section
883  * @flags: flags to handle nested sections
884  *
885  * Marks the calling task as being stalled due to a lack of memory,
886  * such as waiting for a refault or performing reclaim.
887  */
888 void psi_memstall_enter(unsigned long *flags)
889 {
890 	struct rq_flags rf;
891 	struct rq *rq;
892 
893 	if (static_branch_likely(&psi_disabled))
894 		return;
895 
896 	*flags = current->in_memstall;
897 	if (*flags)
898 		return;
899 	/*
900 	 * in_memstall setting & accounting needs to be atomic wrt
901 	 * changes to the task's scheduling state, otherwise we can
902 	 * race with CPU migration.
903 	 */
904 	rq = this_rq_lock_irq(&rf);
905 
906 	current->in_memstall = 1;
907 	psi_task_change(current, 0, TSK_MEMSTALL);
908 
909 	rq_unlock_irq(rq, &rf);
910 }
911 
912 /**
913  * psi_memstall_leave - mark the end of an memory stall section
914  * @flags: flags to handle nested memdelay sections
915  *
916  * Marks the calling task as no longer stalled due to lack of memory.
917  */
918 void psi_memstall_leave(unsigned long *flags)
919 {
920 	struct rq_flags rf;
921 	struct rq *rq;
922 
923 	if (static_branch_likely(&psi_disabled))
924 		return;
925 
926 	if (*flags)
927 		return;
928 	/*
929 	 * in_memstall clearing & accounting needs to be atomic wrt
930 	 * changes to the task's scheduling state, otherwise we could
931 	 * race with CPU migration.
932 	 */
933 	rq = this_rq_lock_irq(&rf);
934 
935 	current->in_memstall = 0;
936 	psi_task_change(current, TSK_MEMSTALL, 0);
937 
938 	rq_unlock_irq(rq, &rf);
939 }
940 
941 #ifdef CONFIG_CGROUPS
942 int psi_cgroup_alloc(struct cgroup *cgroup)
943 {
944 	if (static_branch_likely(&psi_disabled))
945 		return 0;
946 
947 	cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
948 	if (!cgroup->psi.pcpu)
949 		return -ENOMEM;
950 	group_init(&cgroup->psi);
951 	return 0;
952 }
953 
954 void psi_cgroup_free(struct cgroup *cgroup)
955 {
956 	if (static_branch_likely(&psi_disabled))
957 		return;
958 
959 	cancel_delayed_work_sync(&cgroup->psi.avgs_work);
960 	free_percpu(cgroup->psi.pcpu);
961 	/* All triggers must be removed by now */
962 	WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
963 }
964 
965 /**
966  * cgroup_move_task - move task to a different cgroup
967  * @task: the task
968  * @to: the target css_set
969  *
970  * Move task to a new cgroup and safely migrate its associated stall
971  * state between the different groups.
972  *
973  * This function acquires the task's rq lock to lock out concurrent
974  * changes to the task's scheduling state and - in case the task is
975  * running - concurrent changes to its stall state.
976  */
977 void cgroup_move_task(struct task_struct *task, struct css_set *to)
978 {
979 	unsigned int task_flags;
980 	struct rq_flags rf;
981 	struct rq *rq;
982 
983 	if (static_branch_likely(&psi_disabled)) {
984 		/*
985 		 * Lame to do this here, but the scheduler cannot be locked
986 		 * from the outside, so we move cgroups from inside sched/.
987 		 */
988 		rcu_assign_pointer(task->cgroups, to);
989 		return;
990 	}
991 
992 	rq = task_rq_lock(task, &rf);
993 
994 	/*
995 	 * We may race with schedule() dropping the rq lock between
996 	 * deactivating prev and switching to next. Because the psi
997 	 * updates from the deactivation are deferred to the switch
998 	 * callback to save cgroup tree updates, the task's scheduling
999 	 * state here is not coherent with its psi state:
1000 	 *
1001 	 * schedule()                   cgroup_move_task()
1002 	 *   rq_lock()
1003 	 *   deactivate_task()
1004 	 *     p->on_rq = 0
1005 	 *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1006 	 *   pick_next_task()
1007 	 *     rq_unlock()
1008 	 *                                rq_lock()
1009 	 *                                psi_task_change() // old cgroup
1010 	 *                                task->cgroups = to
1011 	 *                                psi_task_change() // new cgroup
1012 	 *                                rq_unlock()
1013 	 *     rq_lock()
1014 	 *   psi_sched_switch() // does deferred updates in new cgroup
1015 	 *
1016 	 * Don't rely on the scheduling state. Use psi_flags instead.
1017 	 */
1018 	task_flags = task->psi_flags;
1019 
1020 	if (task_flags)
1021 		psi_task_change(task, task_flags, 0);
1022 
1023 	/* See comment above */
1024 	rcu_assign_pointer(task->cgroups, to);
1025 
1026 	if (task_flags)
1027 		psi_task_change(task, 0, task_flags);
1028 
1029 	task_rq_unlock(rq, task, &rf);
1030 }
1031 #endif /* CONFIG_CGROUPS */
1032 
1033 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1034 {
1035 	int full;
1036 	u64 now;
1037 
1038 	if (static_branch_likely(&psi_disabled))
1039 		return -EOPNOTSUPP;
1040 
1041 	/* Update averages before reporting them */
1042 	mutex_lock(&group->avgs_lock);
1043 	now = sched_clock();
1044 	collect_percpu_times(group, PSI_AVGS, NULL);
1045 	if (now >= group->avg_next_update)
1046 		group->avg_next_update = update_averages(group, now);
1047 	mutex_unlock(&group->avgs_lock);
1048 
1049 	for (full = 0; full < 2; full++) {
1050 		unsigned long avg[3];
1051 		u64 total;
1052 		int w;
1053 
1054 		for (w = 0; w < 3; w++)
1055 			avg[w] = group->avg[res * 2 + full][w];
1056 		total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1057 				NSEC_PER_USEC);
1058 
1059 		seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1060 			   full ? "full" : "some",
1061 			   LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1062 			   LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1063 			   LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1064 			   total);
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 static int psi_io_show(struct seq_file *m, void *v)
1071 {
1072 	return psi_show(m, &psi_system, PSI_IO);
1073 }
1074 
1075 static int psi_memory_show(struct seq_file *m, void *v)
1076 {
1077 	return psi_show(m, &psi_system, PSI_MEM);
1078 }
1079 
1080 static int psi_cpu_show(struct seq_file *m, void *v)
1081 {
1082 	return psi_show(m, &psi_system, PSI_CPU);
1083 }
1084 
1085 static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
1086 {
1087 	if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
1088 		return -EPERM;
1089 
1090 	return single_open(file, psi_show, NULL);
1091 }
1092 
1093 static int psi_io_open(struct inode *inode, struct file *file)
1094 {
1095 	return psi_open(file, psi_io_show);
1096 }
1097 
1098 static int psi_memory_open(struct inode *inode, struct file *file)
1099 {
1100 	return psi_open(file, psi_memory_show);
1101 }
1102 
1103 static int psi_cpu_open(struct inode *inode, struct file *file)
1104 {
1105 	return psi_open(file, psi_cpu_show);
1106 }
1107 
1108 struct psi_trigger *psi_trigger_create(struct psi_group *group,
1109 			char *buf, size_t nbytes, enum psi_res res)
1110 {
1111 	struct psi_trigger *t;
1112 	enum psi_states state;
1113 	u32 threshold_us;
1114 	u32 window_us;
1115 
1116 	if (static_branch_likely(&psi_disabled))
1117 		return ERR_PTR(-EOPNOTSUPP);
1118 
1119 	if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1120 		state = PSI_IO_SOME + res * 2;
1121 	else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1122 		state = PSI_IO_FULL + res * 2;
1123 	else
1124 		return ERR_PTR(-EINVAL);
1125 
1126 	if (state >= PSI_NONIDLE)
1127 		return ERR_PTR(-EINVAL);
1128 
1129 	if (window_us < WINDOW_MIN_US ||
1130 		window_us > WINDOW_MAX_US)
1131 		return ERR_PTR(-EINVAL);
1132 
1133 	/* Check threshold */
1134 	if (threshold_us == 0 || threshold_us > window_us)
1135 		return ERR_PTR(-EINVAL);
1136 
1137 	t = kmalloc(sizeof(*t), GFP_KERNEL);
1138 	if (!t)
1139 		return ERR_PTR(-ENOMEM);
1140 
1141 	t->group = group;
1142 	t->state = state;
1143 	t->threshold = threshold_us * NSEC_PER_USEC;
1144 	t->win.size = window_us * NSEC_PER_USEC;
1145 	window_reset(&t->win, 0, 0, 0);
1146 
1147 	t->event = 0;
1148 	t->last_event_time = 0;
1149 	init_waitqueue_head(&t->event_wait);
1150 	kref_init(&t->refcount);
1151 
1152 	mutex_lock(&group->trigger_lock);
1153 
1154 	if (!rcu_access_pointer(group->poll_task)) {
1155 		struct task_struct *task;
1156 
1157 		task = kthread_create(psi_poll_worker, group, "psimon");
1158 		if (IS_ERR(task)) {
1159 			kfree(t);
1160 			mutex_unlock(&group->trigger_lock);
1161 			return ERR_CAST(task);
1162 		}
1163 		atomic_set(&group->poll_wakeup, 0);
1164 		wake_up_process(task);
1165 		rcu_assign_pointer(group->poll_task, task);
1166 	}
1167 
1168 	list_add(&t->node, &group->triggers);
1169 	group->poll_min_period = min(group->poll_min_period,
1170 		div_u64(t->win.size, UPDATES_PER_WINDOW));
1171 	group->nr_triggers[t->state]++;
1172 	group->poll_states |= (1 << t->state);
1173 
1174 	mutex_unlock(&group->trigger_lock);
1175 
1176 	return t;
1177 }
1178 
1179 static void psi_trigger_destroy(struct kref *ref)
1180 {
1181 	struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
1182 	struct psi_group *group = t->group;
1183 	struct task_struct *task_to_destroy = NULL;
1184 
1185 	if (static_branch_likely(&psi_disabled))
1186 		return;
1187 
1188 	/*
1189 	 * Wakeup waiters to stop polling. Can happen if cgroup is deleted
1190 	 * from under a polling process.
1191 	 */
1192 	wake_up_interruptible(&t->event_wait);
1193 
1194 	mutex_lock(&group->trigger_lock);
1195 
1196 	if (!list_empty(&t->node)) {
1197 		struct psi_trigger *tmp;
1198 		u64 period = ULLONG_MAX;
1199 
1200 		list_del(&t->node);
1201 		group->nr_triggers[t->state]--;
1202 		if (!group->nr_triggers[t->state])
1203 			group->poll_states &= ~(1 << t->state);
1204 		/* reset min update period for the remaining triggers */
1205 		list_for_each_entry(tmp, &group->triggers, node)
1206 			period = min(period, div_u64(tmp->win.size,
1207 					UPDATES_PER_WINDOW));
1208 		group->poll_min_period = period;
1209 		/* Destroy poll_task when the last trigger is destroyed */
1210 		if (group->poll_states == 0) {
1211 			group->polling_until = 0;
1212 			task_to_destroy = rcu_dereference_protected(
1213 					group->poll_task,
1214 					lockdep_is_held(&group->trigger_lock));
1215 			rcu_assign_pointer(group->poll_task, NULL);
1216 			del_timer(&group->poll_timer);
1217 		}
1218 	}
1219 
1220 	mutex_unlock(&group->trigger_lock);
1221 
1222 	/*
1223 	 * Wait for both *trigger_ptr from psi_trigger_replace and
1224 	 * poll_task RCUs to complete their read-side critical sections
1225 	 * before destroying the trigger and optionally the poll_task
1226 	 */
1227 	synchronize_rcu();
1228 	/*
1229 	 * Stop kthread 'psimon' after releasing trigger_lock to prevent a
1230 	 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1231 	 */
1232 	if (task_to_destroy) {
1233 		/*
1234 		 * After the RCU grace period has expired, the worker
1235 		 * can no longer be found through group->poll_task.
1236 		 */
1237 		kthread_stop(task_to_destroy);
1238 	}
1239 	kfree(t);
1240 }
1241 
1242 void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
1243 {
1244 	struct psi_trigger *old = *trigger_ptr;
1245 
1246 	if (static_branch_likely(&psi_disabled))
1247 		return;
1248 
1249 	rcu_assign_pointer(*trigger_ptr, new);
1250 	if (old)
1251 		kref_put(&old->refcount, psi_trigger_destroy);
1252 }
1253 
1254 __poll_t psi_trigger_poll(void **trigger_ptr,
1255 				struct file *file, poll_table *wait)
1256 {
1257 	__poll_t ret = DEFAULT_POLLMASK;
1258 	struct psi_trigger *t;
1259 
1260 	if (static_branch_likely(&psi_disabled))
1261 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1262 
1263 	rcu_read_lock();
1264 
1265 	t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
1266 	if (!t) {
1267 		rcu_read_unlock();
1268 		return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1269 	}
1270 	kref_get(&t->refcount);
1271 
1272 	rcu_read_unlock();
1273 
1274 	poll_wait(file, &t->event_wait, wait);
1275 
1276 	if (cmpxchg(&t->event, 1, 0) == 1)
1277 		ret |= EPOLLPRI;
1278 
1279 	kref_put(&t->refcount, psi_trigger_destroy);
1280 
1281 	return ret;
1282 }
1283 
1284 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1285 			 size_t nbytes, enum psi_res res)
1286 {
1287 	char buf[32];
1288 	size_t buf_size;
1289 	struct seq_file *seq;
1290 	struct psi_trigger *new;
1291 
1292 	if (static_branch_likely(&psi_disabled))
1293 		return -EOPNOTSUPP;
1294 
1295 	if (!nbytes)
1296 		return -EINVAL;
1297 
1298 	buf_size = min(nbytes, sizeof(buf));
1299 	if (copy_from_user(buf, user_buf, buf_size))
1300 		return -EFAULT;
1301 
1302 	buf[buf_size - 1] = '\0';
1303 
1304 	new = psi_trigger_create(&psi_system, buf, nbytes, res);
1305 	if (IS_ERR(new))
1306 		return PTR_ERR(new);
1307 
1308 	seq = file->private_data;
1309 	/* Take seq->lock to protect seq->private from concurrent writes */
1310 	mutex_lock(&seq->lock);
1311 	psi_trigger_replace(&seq->private, new);
1312 	mutex_unlock(&seq->lock);
1313 
1314 	return nbytes;
1315 }
1316 
1317 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1318 			    size_t nbytes, loff_t *ppos)
1319 {
1320 	return psi_write(file, user_buf, nbytes, PSI_IO);
1321 }
1322 
1323 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1324 				size_t nbytes, loff_t *ppos)
1325 {
1326 	return psi_write(file, user_buf, nbytes, PSI_MEM);
1327 }
1328 
1329 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1330 			     size_t nbytes, loff_t *ppos)
1331 {
1332 	return psi_write(file, user_buf, nbytes, PSI_CPU);
1333 }
1334 
1335 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1336 {
1337 	struct seq_file *seq = file->private_data;
1338 
1339 	return psi_trigger_poll(&seq->private, file, wait);
1340 }
1341 
1342 static int psi_fop_release(struct inode *inode, struct file *file)
1343 {
1344 	struct seq_file *seq = file->private_data;
1345 
1346 	psi_trigger_replace(&seq->private, NULL);
1347 	return single_release(inode, file);
1348 }
1349 
1350 static const struct proc_ops psi_io_proc_ops = {
1351 	.proc_open	= psi_io_open,
1352 	.proc_read	= seq_read,
1353 	.proc_lseek	= seq_lseek,
1354 	.proc_write	= psi_io_write,
1355 	.proc_poll	= psi_fop_poll,
1356 	.proc_release	= psi_fop_release,
1357 };
1358 
1359 static const struct proc_ops psi_memory_proc_ops = {
1360 	.proc_open	= psi_memory_open,
1361 	.proc_read	= seq_read,
1362 	.proc_lseek	= seq_lseek,
1363 	.proc_write	= psi_memory_write,
1364 	.proc_poll	= psi_fop_poll,
1365 	.proc_release	= psi_fop_release,
1366 };
1367 
1368 static const struct proc_ops psi_cpu_proc_ops = {
1369 	.proc_open	= psi_cpu_open,
1370 	.proc_read	= seq_read,
1371 	.proc_lseek	= seq_lseek,
1372 	.proc_write	= psi_cpu_write,
1373 	.proc_poll	= psi_fop_poll,
1374 	.proc_release	= psi_fop_release,
1375 };
1376 
1377 static int __init psi_proc_init(void)
1378 {
1379 	if (psi_enable) {
1380 		proc_mkdir("pressure", NULL);
1381 		proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1382 		proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1383 		proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1384 	}
1385 	return 0;
1386 }
1387 module_init(psi_proc_init);
1388