Lines Matching +full:avg +full:- +full:samples
1 // SPDX-License-Identifier: GPL-2.0
34 * In the FULL state of a given resource, all non-idle tasks are
48 * FULL means all non-idle tasks in the cgroup are delayed on the CPU
64 * unrealized due to resource contention *also* scales with non-idle
83 * we have to base our calculation on the number of non-idle tasks in
91 * FULL = (threads - min(nr_productive_tasks, threads)) / threads
97 * FULL = (256 - min(256, 256)) / 256 = 0%
99 * For the 1 out of 4 memory-delayed tasks, this yields:
103 * FULL = (4 - min(3, 4)) / 4 = 25%
106 * extension of the single-CPU model. ]
136 * cost-wise, yet way more sensitive and accurate than periodic
156 /* Running averages - we need to be higher-res than loadavg */
158 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
169 /* System-level pressure and stall tracking */
183 group->enabled = true; in group_init()
185 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq); in group_init()
186 group->avg_last_update = sched_clock(); in group_init()
187 group->avg_next_update = group->avg_last_update + psi_period; in group_init()
188 mutex_init(&group->avgs_lock); in group_init()
190 /* Init avg trigger-related members */ in group_init()
191 INIT_LIST_HEAD(&group->avg_triggers); in group_init()
192 memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers)); in group_init()
193 INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work); in group_init()
195 /* Init rtpoll trigger-related members */ in group_init()
196 atomic_set(&group->rtpoll_scheduled, 0); in group_init()
197 mutex_init(&group->rtpoll_trigger_lock); in group_init()
198 INIT_LIST_HEAD(&group->rtpoll_triggers); in group_init()
199 group->rtpoll_min_period = U32_MAX; in group_init()
200 group->rtpoll_next_update = ULLONG_MAX; in group_init()
201 init_waitqueue_head(&group->rtpoll_wait); in group_init()
202 timer_setup(&group->rtpoll_timer, poll_timer_fn, 0); in group_init()
203 rcu_assign_pointer(group->rtpoll_task, NULL); in group_init()
249 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu); in get_recent_times()
261 seq = read_seqcount_begin(&groupc->seq); in get_recent_times()
263 memcpy(times, groupc->times, sizeof(groupc->times)); in get_recent_times()
264 state_mask = groupc->state_mask; in get_recent_times()
265 state_start = groupc->state_start; in get_recent_times()
267 memcpy(tasks, groupc->tasks, sizeof(groupc->tasks)); in get_recent_times()
268 } while (read_seqcount_retry(&groupc->seq, seq)); in get_recent_times()
283 times[s] += now - state_start; in get_recent_times()
285 delta = times[s] - groupc->times_prev[aggregator][s]; in get_recent_times()
286 groupc->times_prev[aggregator][s] = times[s]; in get_recent_times()
295 * re-arm avgs_work when all CPUs are IDLE. But the current CPU running in get_recent_times()
297 * So for the current CPU, we need to re-arm avgs_work only when in get_recent_times()
301 if (current_work() == &group->avgs_work.work) { in get_recent_times()
316 static void calc_avgs(unsigned long avg[3], int missed_periods, in calc_avgs()
323 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods); in calc_avgs()
324 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods); in calc_avgs()
325 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods); in calc_avgs()
331 avg[0] = calc_load(avg[0], EXP_10s, pct); in calc_avgs()
332 avg[1] = calc_load(avg[1], EXP_60s, pct); in calc_avgs()
333 avg[2] = calc_load(avg[2], EXP_300s, pct); in calc_avgs()
340 u64 deltas[NR_PSI_STATES - 1] = { 0, }; in collect_percpu_times()
347 * Collect the per-cpu time buckets and average them into a in collect_percpu_times()
350 * For averaging, each CPU is weighted by its non-idle time in in collect_percpu_times()
383 for (s = 0; s < NR_PSI_STATES - 1; s++) in collect_percpu_times()
384 group->total[aggregator][s] += in collect_percpu_times()
395 win->start_time = now; in window_reset()
396 win->start_value = value; in window_reset()
397 win->prev_growth = prev_growth; in window_reset()
416 elapsed = now - win->start_time; in window_update()
417 growth = value - win->start_value; in window_update()
419 * After each tracking window passes win->start_value and in window_update()
420 * win->start_time get reset and win->prev_growth stores in window_update()
421 * the average per-window growth of the previous window. in window_update()
422 * win->prev_growth is then used to interpolate additional in window_update()
425 if (elapsed > win->size) in window_update()
430 remaining = win->size - elapsed; in window_update()
431 growth += div64_u64(win->prev_growth * remaining, win->size); in window_update()
441 u64 *total = group->total[aggregator]; in update_triggers()
447 triggers = &group->avg_triggers; in update_triggers()
448 aggregator_total = group->avg_total; in update_triggers()
450 triggers = &group->rtpoll_triggers; in update_triggers()
451 aggregator_total = group->rtpoll_total; in update_triggers()
462 new_stall = aggregator_total[t->state] != total[t->state]; in update_triggers()
465 if (!new_stall && !t->pending_event) in update_triggers()
476 * remember to update group->polling_total[] once we've in update_triggers()
483 growth = window_update(&t->win, now, total[t->state]); in update_triggers()
484 if (!t->pending_event) { in update_triggers()
485 if (growth < t->threshold) in update_triggers()
488 t->pending_event = true; in update_triggers()
492 if (now < t->last_event_time + t->win.size) in update_triggers()
496 if (cmpxchg(&t->event, 0, 1) == 0) { in update_triggers()
497 if (t->of) in update_triggers()
498 kernfs_notify(t->of->kn); in update_triggers()
500 wake_up_interruptible(&t->event_wait); in update_triggers()
502 t->last_event_time = now; in update_triggers()
504 t->pending_event = false; in update_triggers()
507 return now + group->rtpoll_min_period; in update_triggers()
518 expires = group->avg_next_update; in update_averages()
519 if (now - expires >= psi_period) in update_averages()
520 missed_periods = div_u64(now - expires, psi_period); in update_averages()
526 * But the deltas we sample out of the per-cpu buckets above in update_averages()
530 period = now - (group->avg_last_update + (missed_periods * psi_period)); in update_averages()
531 group->avg_last_update = now; in update_averages()
533 for (s = 0; s < NR_PSI_STATES - 1; s++) { in update_averages()
536 sample = group->total[PSI_AVGS][s] - group->avg_total[s]; in update_averages()
540 * which under full pressure can result in samples in in update_averages()
543 * We don't want to report non-sensical pressures in in update_averages()
556 group->avg_total[s] += sample; in update_averages()
557 calc_avgs(group->avg[s], missed_periods, sample, period); in update_averages()
574 mutex_lock(&group->avgs_lock); in psi_avgs_work()
580 * If there is task activity, periodically fold the per-cpu in psi_avgs_work()
581 * times and feed samples into the running averages. If things in psi_avgs_work()
584 * go - see calc_avgs() and missed_periods. in psi_avgs_work()
586 if (now >= group->avg_next_update) { in psi_avgs_work()
588 group->avg_next_update = update_averages(group, now); in psi_avgs_work()
593 group->avg_next_update - now) + 1); in psi_avgs_work()
596 mutex_unlock(&group->avgs_lock); in psi_avgs_work()
603 list_for_each_entry(t, &group->rtpoll_triggers, node) in init_rtpoll_triggers()
604 window_reset(&t->win, now, in init_rtpoll_triggers()
605 group->total[PSI_POLL][t->state], 0); in init_rtpoll_triggers()
606 memcpy(group->rtpoll_total, group->total[PSI_POLL], in init_rtpoll_triggers()
607 sizeof(group->rtpoll_total)); in init_rtpoll_triggers()
608 group->rtpoll_next_update = now + group->rtpoll_min_period; in init_rtpoll_triggers()
621 if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force) in psi_schedule_rtpoll_work()
626 task = rcu_dereference(group->rtpoll_task); in psi_schedule_rtpoll_work()
632 mod_timer(&group->rtpoll_timer, jiffies + delay); in psi_schedule_rtpoll_work()
634 atomic_set(&group->rtpoll_scheduled, 0); in psi_schedule_rtpoll_work()
646 mutex_lock(&group->rtpoll_trigger_lock); in psi_rtpoll_work()
650 if (now > group->rtpoll_until) { in psi_rtpoll_work()
660 atomic_set(&group->rtpoll_scheduled, 0); in psi_rtpoll_work()
689 if (changed_states & group->rtpoll_states) { in psi_rtpoll_work()
691 if (now > group->rtpoll_until) in psi_rtpoll_work()
699 group->rtpoll_until = now + in psi_rtpoll_work()
700 group->rtpoll_min_period * UPDATES_PER_WINDOW; in psi_rtpoll_work()
703 if (now > group->rtpoll_until) { in psi_rtpoll_work()
704 group->rtpoll_next_update = ULLONG_MAX; in psi_rtpoll_work()
708 if (now >= group->rtpoll_next_update) { in psi_rtpoll_work()
709 group->rtpoll_next_update = update_triggers(group, now, &update_total, PSI_POLL); in psi_rtpoll_work()
711 memcpy(group->rtpoll_total, group->total[PSI_POLL], in psi_rtpoll_work()
712 sizeof(group->rtpoll_total)); in psi_rtpoll_work()
716 nsecs_to_jiffies(group->rtpoll_next_update - now) + 1, in psi_rtpoll_work()
720 mutex_unlock(&group->rtpoll_trigger_lock); in psi_rtpoll_work()
730 wait_event_interruptible(group->rtpoll_wait, in psi_rtpoll_worker()
731 atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) || in psi_rtpoll_worker()
745 atomic_set(&group->rtpoll_wakeup, 1); in poll_timer_fn()
746 wake_up_interruptible(&group->rtpoll_wait); in poll_timer_fn()
753 delta = now - groupc->state_start; in record_times()
754 groupc->state_start = now; in record_times()
756 if (groupc->state_mask & (1 << PSI_IO_SOME)) { in record_times()
757 groupc->times[PSI_IO_SOME] += delta; in record_times()
758 if (groupc->state_mask & (1 << PSI_IO_FULL)) in record_times()
759 groupc->times[PSI_IO_FULL] += delta; in record_times()
762 if (groupc->state_mask & (1 << PSI_MEM_SOME)) { in record_times()
763 groupc->times[PSI_MEM_SOME] += delta; in record_times()
764 if (groupc->state_mask & (1 << PSI_MEM_FULL)) in record_times()
765 groupc->times[PSI_MEM_FULL] += delta; in record_times()
768 if (groupc->state_mask & (1 << PSI_CPU_SOME)) { in record_times()
769 groupc->times[PSI_CPU_SOME] += delta; in record_times()
770 if (groupc->state_mask & (1 << PSI_CPU_FULL)) in record_times()
771 groupc->times[PSI_CPU_FULL] += delta; in record_times()
774 if (groupc->state_mask & (1 << PSI_NONIDLE)) in record_times()
775 groupc->times[PSI_NONIDLE] += delta; in record_times()
789 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_group_change()
800 write_seqcount_begin(&groupc->seq); in psi_group_change()
805 * task count - it's just a boolean flag directly encoded in in psi_group_change()
816 state_mask = groupc->state_mask & PSI_ONCPU; in psi_group_change()
826 if (groupc->tasks[t]) { in psi_group_change()
827 groupc->tasks[t]--; in psi_group_change()
830 cpu, t, groupc->tasks[0], in psi_group_change()
831 groupc->tasks[1], groupc->tasks[2], in psi_group_change()
832 groupc->tasks[3], clear, set); in psi_group_change()
839 groupc->tasks[t]++; in psi_group_change()
841 if (!group->enabled) { in psi_group_change()
847 * avoid a delta sample underflow when PSI is later re-enabled. in psi_group_change()
849 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE))) in psi_group_change()
852 groupc->state_mask = state_mask; in psi_group_change()
854 write_seqcount_end(&groupc->seq); in psi_group_change()
859 if (test_state(groupc->tasks, s, state_mask & PSI_ONCPU)) in psi_group_change()
871 if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall)) in psi_group_change()
876 groupc->state_mask = state_mask; in psi_group_change()
878 write_seqcount_end(&groupc->seq); in psi_group_change()
880 if (state_mask & group->rtpoll_states) in psi_group_change()
883 if (wake_clock && !delayed_work_pending(&group->avgs_work)) in psi_group_change()
884 schedule_delayed_work(&group->avgs_work, PSI_FREQ); in psi_group_change()
898 if (((task->psi_flags & set) || in psi_flags_change()
899 (task->psi_flags & clear) != clear) && in psi_flags_change()
902 task->pid, task->comm, task_cpu(task), in psi_flags_change()
903 task->psi_flags, clear, set); in psi_flags_change()
907 task->psi_flags &= ~clear; in psi_flags_change()
908 task->psi_flags |= set; in psi_flags_change()
916 if (!task->pid) in psi_task_change()
924 } while ((group = group->parent)); in psi_task_change()
933 if (next->pid) { in psi_task_switch()
942 if (per_cpu_ptr(group->pcpu, cpu)->state_mask & in psi_task_switch()
949 } while ((group = group->parent)); in psi_task_switch()
952 if (prev->pid) { in psi_task_switch()
964 if (prev->in_memstall) in psi_task_switch()
966 if (prev->in_iowait) in psi_task_switch()
973 * itself going to sleep, or we'll ping-pong forever. in psi_task_switch()
975 if (unlikely((prev->flags & PF_WQ_WORKER) && in psi_task_switch()
987 } while ((group = group->parent)); in psi_task_switch()
995 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) { in psi_task_switch()
997 for (; group; group = group->parent) in psi_task_switch()
1012 if (!curr->pid) in psi_account_irqtime()
1021 delta = (s64)(irq - rq->psi_irq_time); in psi_account_irqtime()
1024 rq->psi_irq_time = irq; in psi_account_irqtime()
1029 if (!group->enabled) in psi_account_irqtime()
1032 groupc = per_cpu_ptr(group->pcpu, cpu); in psi_account_irqtime()
1034 write_seqcount_begin(&groupc->seq); in psi_account_irqtime()
1038 groupc->times[PSI_IRQ_FULL] += delta; in psi_account_irqtime()
1040 write_seqcount_end(&groupc->seq); in psi_account_irqtime()
1042 if (group->rtpoll_states & (1 << PSI_IRQ_FULL)) in psi_account_irqtime()
1044 } while ((group = group->parent)); in psi_account_irqtime()
1049 * psi_memstall_enter - mark the beginning of a memory stall section
1063 *flags = current->in_memstall; in psi_memstall_enter()
1073 current->in_memstall = 1; in psi_memstall_enter()
1081 * psi_memstall_leave - mark the end of an memory stall section
1103 current->in_memstall = 0; in psi_memstall_leave()
1116 cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL); in psi_cgroup_alloc()
1117 if (!cgroup->psi) in psi_cgroup_alloc()
1118 return -ENOMEM; in psi_cgroup_alloc()
1120 cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu); in psi_cgroup_alloc()
1121 if (!cgroup->psi->pcpu) { in psi_cgroup_alloc()
1122 kfree(cgroup->psi); in psi_cgroup_alloc()
1123 return -ENOMEM; in psi_cgroup_alloc()
1125 group_init(cgroup->psi); in psi_cgroup_alloc()
1126 cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup)); in psi_cgroup_alloc()
1135 cancel_delayed_work_sync(&cgroup->psi->avgs_work); in psi_cgroup_free()
1136 free_percpu(cgroup->psi->pcpu); in psi_cgroup_free()
1138 WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n"); in psi_cgroup_free()
1139 kfree(cgroup->psi); in psi_cgroup_free()
1143 * cgroup_move_task - move task to a different cgroup
1151 * changes to the task's scheduling state and - in case the task is
1152 * running - concurrent changes to its stall state.
1165 rcu_assign_pointer(task->cgroups, to); in cgroup_move_task()
1181 * p->on_rq = 0 in cgroup_move_task()
1187 * task->cgroups = to in cgroup_move_task()
1195 task_flags = task->psi_flags; in cgroup_move_task()
1201 rcu_assign_pointer(task->cgroups, to); in cgroup_move_task()
1214 * After we disable psi_group->enabled, we don't actually in psi_cgroup_restart()
1221 * would see !psi_group->enabled and only do task accounting. in psi_cgroup_restart()
1223 * When re-enable cgroup PSI, this function use psi_group_change() in psi_cgroup_restart()
1225 * and restart groupc->state_start from now, use .clear = .set = 0 in psi_cgroup_restart()
1228 if (!group->enabled) in psi_cgroup_restart()
1249 return -EOPNOTSUPP; in psi_show()
1252 mutex_lock(&group->avgs_lock); in psi_show()
1255 if (now >= group->avg_next_update) in psi_show()
1256 group->avg_next_update = update_averages(group, now); in psi_show()
1257 mutex_unlock(&group->avgs_lock); in psi_show()
1263 for (full = 0; full < 2 - only_full; full++) { in psi_show()
1264 unsigned long avg[3] = { 0, }; in psi_show() local
1271 avg[w] = group->avg[res * 2 + full][w]; in psi_show()
1272 total = div_u64(group->total[PSI_AVGS][res * 2 + full], in psi_show()
1278 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]), in psi_show()
1279 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]), in psi_show()
1280 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]), in psi_show()
1298 return ERR_PTR(-EOPNOTSUPP); in psi_trigger_create()
1301 * Checking the privilege here on file->f_cred implies that a privileged user in psi_trigger_create()
1304 privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE); in psi_trigger_create()
1311 return ERR_PTR(-EINVAL); in psi_trigger_create()
1314 if (res == PSI_IRQ && --state != PSI_IRQ_FULL) in psi_trigger_create()
1315 return ERR_PTR(-EINVAL); in psi_trigger_create()
1319 return ERR_PTR(-EINVAL); in psi_trigger_create()
1322 return ERR_PTR(-EINVAL); in psi_trigger_create()
1329 return ERR_PTR(-EINVAL); in psi_trigger_create()
1333 return ERR_PTR(-EINVAL); in psi_trigger_create()
1337 return ERR_PTR(-ENOMEM); in psi_trigger_create()
1339 t->group = group; in psi_trigger_create()
1340 t->state = state; in psi_trigger_create()
1341 t->threshold = threshold_us * NSEC_PER_USEC; in psi_trigger_create()
1342 t->win.size = window_us * NSEC_PER_USEC; in psi_trigger_create()
1343 window_reset(&t->win, sched_clock(), in psi_trigger_create()
1344 group->total[PSI_POLL][t->state], 0); in psi_trigger_create()
1346 t->event = 0; in psi_trigger_create()
1347 t->last_event_time = 0; in psi_trigger_create()
1348 t->of = of; in psi_trigger_create()
1350 init_waitqueue_head(&t->event_wait); in psi_trigger_create()
1351 t->pending_event = false; in psi_trigger_create()
1352 t->aggregator = privileged ? PSI_POLL : PSI_AVGS; in psi_trigger_create()
1355 mutex_lock(&group->rtpoll_trigger_lock); in psi_trigger_create()
1357 if (!rcu_access_pointer(group->rtpoll_task)) { in psi_trigger_create()
1363 mutex_unlock(&group->rtpoll_trigger_lock); in psi_trigger_create()
1366 atomic_set(&group->rtpoll_wakeup, 0); in psi_trigger_create()
1368 rcu_assign_pointer(group->rtpoll_task, task); in psi_trigger_create()
1371 list_add(&t->node, &group->rtpoll_triggers); in psi_trigger_create()
1372 group->rtpoll_min_period = min(group->rtpoll_min_period, in psi_trigger_create()
1373 div_u64(t->win.size, UPDATES_PER_WINDOW)); in psi_trigger_create()
1374 group->rtpoll_nr_triggers[t->state]++; in psi_trigger_create()
1375 group->rtpoll_states |= (1 << t->state); in psi_trigger_create()
1377 mutex_unlock(&group->rtpoll_trigger_lock); in psi_trigger_create()
1379 mutex_lock(&group->avgs_lock); in psi_trigger_create()
1381 list_add(&t->node, &group->avg_triggers); in psi_trigger_create()
1382 group->avg_nr_triggers[t->state]++; in psi_trigger_create()
1384 mutex_unlock(&group->avgs_lock); in psi_trigger_create()
1401 group = t->group; in psi_trigger_destroy()
1407 if (t->of) in psi_trigger_destroy()
1408 kernfs_notify(t->of->kn); in psi_trigger_destroy()
1410 wake_up_interruptible(&t->event_wait); in psi_trigger_destroy()
1412 if (t->aggregator == PSI_AVGS) { in psi_trigger_destroy()
1413 mutex_lock(&group->avgs_lock); in psi_trigger_destroy()
1414 if (!list_empty(&t->node)) { in psi_trigger_destroy()
1415 list_del(&t->node); in psi_trigger_destroy()
1416 group->avg_nr_triggers[t->state]--; in psi_trigger_destroy()
1418 mutex_unlock(&group->avgs_lock); in psi_trigger_destroy()
1420 mutex_lock(&group->rtpoll_trigger_lock); in psi_trigger_destroy()
1421 if (!list_empty(&t->node)) { in psi_trigger_destroy()
1425 list_del(&t->node); in psi_trigger_destroy()
1426 group->rtpoll_nr_triggers[t->state]--; in psi_trigger_destroy()
1427 if (!group->rtpoll_nr_triggers[t->state]) in psi_trigger_destroy()
1428 group->rtpoll_states &= ~(1 << t->state); in psi_trigger_destroy()
1433 if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) { in psi_trigger_destroy()
1434 list_for_each_entry(tmp, &group->rtpoll_triggers, node) in psi_trigger_destroy()
1435 period = min(period, div_u64(tmp->win.size, in psi_trigger_destroy()
1437 group->rtpoll_min_period = period; in psi_trigger_destroy()
1440 if (group->rtpoll_states == 0) { in psi_trigger_destroy()
1441 group->rtpoll_until = 0; in psi_trigger_destroy()
1443 group->rtpoll_task, in psi_trigger_destroy()
1444 lockdep_is_held(&group->rtpoll_trigger_lock)); in psi_trigger_destroy()
1445 rcu_assign_pointer(group->rtpoll_task, NULL); in psi_trigger_destroy()
1446 del_timer(&group->rtpoll_timer); in psi_trigger_destroy()
1449 mutex_unlock(&group->rtpoll_trigger_lock); in psi_trigger_destroy()
1453 * Wait for psi_schedule_rtpoll_work RCU to complete its read-side in psi_trigger_destroy()
1466 * can no longer be found through group->rtpoll_task. in psi_trigger_destroy()
1469 atomic_set(&group->rtpoll_scheduled, 0); in psi_trigger_destroy()
1487 if (t->of) in psi_trigger_poll()
1488 kernfs_generic_poll(t->of, wait); in psi_trigger_poll()
1490 poll_wait(file, &t->event_wait, wait); in psi_trigger_poll()
1492 if (cmpxchg(&t->event, 1, 0) == 1) in psi_trigger_poll()
1538 return -EOPNOTSUPP; in psi_write()
1541 return -EINVAL; in psi_write()
1545 return -EFAULT; in psi_write()
1547 buf[buf_size - 1] = '\0'; in psi_write()
1549 seq = file->private_data; in psi_write()
1551 /* Take seq->lock to protect seq->private from concurrent writes */ in psi_write()
1552 mutex_lock(&seq->lock); in psi_write()
1555 if (seq->private) { in psi_write()
1556 mutex_unlock(&seq->lock); in psi_write()
1557 return -EBUSY; in psi_write()
1562 mutex_unlock(&seq->lock); in psi_write()
1566 smp_store_release(&seq->private, new); in psi_write()
1567 mutex_unlock(&seq->lock); in psi_write()
1592 struct seq_file *seq = file->private_data; in psi_fop_poll()
1594 return psi_trigger_poll(&seq->private, file, wait); in psi_fop_poll()
1599 struct seq_file *seq = file->private_data; in psi_fop_release()
1601 psi_trigger_destroy(seq->private); in psi_fop_release()