1 // SPDX-License-Identifier: GPL-2.0
2 /* sysfs entries for device PM */
3 #include <linux/device.h>
4 #include <linux/kobject.h>
5 #include <linux/string.h>
6 #include <linux/export.h>
7 #include <linux/pm_qos.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/pm_wakeup.h>
10 #include <linux/atomic.h>
11 #include <linux/jiffies.h>
12 #include "power.h"
13
14 /*
15 * control - Report/change current runtime PM setting of the device
16 *
17 * Runtime power management of a device can be blocked with the help of
18 * this attribute. All devices have one of the following two values for
19 * the power/control file:
20 *
21 * + "auto\n" to allow the device to be power managed at run time;
22 * + "on\n" to prevent the device from being power managed at run time;
23 *
24 * The default for all devices is "auto", which means that devices may be
25 * subject to automatic power management, depending on their drivers.
26 * Changing this attribute to "on" prevents the driver from power managing
27 * the device at run time. Doing that while the device is suspended causes
28 * it to be woken up.
29 *
30 * wakeup - Report/change current wakeup option for device
31 *
32 * Some devices support "wakeup" events, which are hardware signals
33 * used to activate devices from suspended or low power states. Such
34 * devices have one of three values for the sysfs power/wakeup file:
35 *
36 * + "enabled\n" to issue the events;
37 * + "disabled\n" not to do so; or
38 * + "\n" for temporary or permanent inability to issue wakeup.
39 *
40 * (For example, unconfigured USB devices can't issue wakeups.)
41 *
42 * Familiar examples of devices that can issue wakeup events include
43 * keyboards and mice (both PS2 and USB styles), power buttons, modems,
44 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
45 * will wake the entire system from a suspend state; others may just
46 * wake up the device (if the system as a whole is already active).
47 * Some wakeup events use normal IRQ lines; other use special out
48 * of band signaling.
49 *
50 * It is the responsibility of device drivers to enable (or disable)
51 * wakeup signaling as part of changing device power states, respecting
52 * the policy choices provided through the driver model.
53 *
54 * Devices may not be able to generate wakeup events from all power
55 * states. Also, the events may be ignored in some configurations;
56 * for example, they might need help from other devices that aren't
57 * active, or which may have wakeup disabled. Some drivers rely on
58 * wakeup events internally (unless they are disabled), keeping
59 * their hardware in low power modes whenever they're unused. This
60 * saves runtime power, without requiring system-wide sleep states.
61 *
62 * async - Report/change current async suspend setting for the device
63 *
64 * Asynchronous suspend and resume of the device during system-wide power
65 * state transitions can be enabled by writing "enabled" to this file.
66 * Analogously, if "disabled" is written to this file, the device will be
67 * suspended and resumed synchronously.
68 *
69 * All devices have one of the following two values for power/async:
70 *
71 * + "enabled\n" to permit the asynchronous suspend/resume of the device;
72 * + "disabled\n" to forbid it;
73 *
74 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
75 * of a device unless it is certain that all of the PM dependencies of the
76 * device are known to the PM core. However, for some devices this
77 * attribute is set to "enabled" by bus type code or device drivers and in
78 * that cases it should be safe to leave the default value.
79 *
80 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
81 *
82 * Some drivers don't want to carry out a runtime suspend as soon as a
83 * device becomes idle; they want it always to remain idle for some period
84 * of time before suspending it. This period is the autosuspend_delay
85 * value (expressed in milliseconds) and it can be controlled by the user.
86 * If the value is negative then the device will never be runtime
87 * suspended.
88 *
89 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
90 * value are used only if the driver calls pm_runtime_use_autosuspend().
91 *
92 * wakeup_count - Report the number of wakeup events related to the device
93 */
94
95 const char power_group_name[] = "power";
96 EXPORT_SYMBOL_GPL(power_group_name);
97
98 static const char ctrl_auto[] = "auto";
99 static const char ctrl_on[] = "on";
100
control_show(struct device * dev,struct device_attribute * attr,char * buf)101 static ssize_t control_show(struct device *dev, struct device_attribute *attr,
102 char *buf)
103 {
104 return sysfs_emit(buf, "%s\n",
105 dev->power.runtime_auto ? ctrl_auto : ctrl_on);
106 }
107
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)108 static ssize_t control_store(struct device * dev, struct device_attribute *attr,
109 const char * buf, size_t n)
110 {
111 device_lock(dev);
112 if (sysfs_streq(buf, ctrl_auto))
113 pm_runtime_allow(dev);
114 else if (sysfs_streq(buf, ctrl_on))
115 pm_runtime_forbid(dev);
116 else
117 n = -EINVAL;
118 device_unlock(dev);
119 return n;
120 }
121
122 static DEVICE_ATTR_RW(control);
123
runtime_active_time_show(struct device * dev,struct device_attribute * attr,char * buf)124 static ssize_t runtime_active_time_show(struct device *dev,
125 struct device_attribute *attr,
126 char *buf)
127 {
128 u64 tmp = pm_runtime_active_time(dev);
129
130 do_div(tmp, NSEC_PER_MSEC);
131
132 return sysfs_emit(buf, "%llu\n", tmp);
133 }
134
135 static DEVICE_ATTR_RO(runtime_active_time);
136
runtime_suspended_time_show(struct device * dev,struct device_attribute * attr,char * buf)137 static ssize_t runtime_suspended_time_show(struct device *dev,
138 struct device_attribute *attr,
139 char *buf)
140 {
141 u64 tmp = pm_runtime_suspended_time(dev);
142
143 do_div(tmp, NSEC_PER_MSEC);
144
145 return sysfs_emit(buf, "%llu\n", tmp);
146 }
147
148 static DEVICE_ATTR_RO(runtime_suspended_time);
149
runtime_status_show(struct device * dev,struct device_attribute * attr,char * buf)150 static ssize_t runtime_status_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
152 {
153 const char *output;
154
155 if (dev->power.runtime_error) {
156 output = "error";
157 } else if (dev->power.disable_depth) {
158 output = "unsupported";
159 } else {
160 switch (dev->power.runtime_status) {
161 case RPM_SUSPENDED:
162 output = "suspended";
163 break;
164 case RPM_SUSPENDING:
165 output = "suspending";
166 break;
167 case RPM_RESUMING:
168 output = "resuming";
169 break;
170 case RPM_ACTIVE:
171 output = "active";
172 break;
173 default:
174 return -EIO;
175 }
176 }
177 return sysfs_emit(buf, "%s\n", output);
178 }
179
180 static DEVICE_ATTR_RO(runtime_status);
181
autosuspend_delay_ms_show(struct device * dev,struct device_attribute * attr,char * buf)182 static ssize_t autosuspend_delay_ms_show(struct device *dev,
183 struct device_attribute *attr,
184 char *buf)
185 {
186 if (!dev->power.use_autosuspend)
187 return -EIO;
188
189 return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
190 }
191
autosuspend_delay_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)192 static ssize_t autosuspend_delay_ms_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t n)
194 {
195 long delay;
196
197 if (!dev->power.use_autosuspend)
198 return -EIO;
199
200 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
201 return -EINVAL;
202
203 device_lock(dev);
204 pm_runtime_set_autosuspend_delay(dev, delay);
205 device_unlock(dev);
206 return n;
207 }
208
209 static DEVICE_ATTR_RW(autosuspend_delay_ms);
210
pm_qos_resume_latency_us_show(struct device * dev,struct device_attribute * attr,char * buf)211 static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
212 struct device_attribute *attr,
213 char *buf)
214 {
215 s32 value = dev_pm_qos_requested_resume_latency(dev);
216
217 if (value == 0)
218 return sysfs_emit(buf, "n/a\n");
219 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
220 value = 0;
221
222 return sysfs_emit(buf, "%d\n", value);
223 }
224
pm_qos_resume_latency_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)225 static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
226 struct device_attribute *attr,
227 const char *buf, size_t n)
228 {
229 s32 value;
230 int ret;
231
232 if (!kstrtos32(buf, 0, &value)) {
233 /*
234 * Prevent users from writing negative or "no constraint" values
235 * directly.
236 */
237 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
238 return -EINVAL;
239
240 if (value == 0)
241 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
242 } else if (sysfs_streq(buf, "n/a")) {
243 value = 0;
244 } else {
245 return -EINVAL;
246 }
247
248 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
249 value);
250 return ret < 0 ? ret : n;
251 }
252
253 static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
254
pm_qos_latency_tolerance_us_show(struct device * dev,struct device_attribute * attr,char * buf)255 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
256 struct device_attribute *attr,
257 char *buf)
258 {
259 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
260
261 if (value < 0)
262 return sysfs_emit(buf, "%s\n", "auto");
263 if (value == PM_QOS_LATENCY_ANY)
264 return sysfs_emit(buf, "%s\n", "any");
265
266 return sysfs_emit(buf, "%d\n", value);
267 }
268
pm_qos_latency_tolerance_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)269 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
270 struct device_attribute *attr,
271 const char *buf, size_t n)
272 {
273 s32 value;
274 int ret;
275
276 if (kstrtos32(buf, 0, &value) == 0) {
277 /* Users can't write negative values directly */
278 if (value < 0)
279 return -EINVAL;
280 } else {
281 if (sysfs_streq(buf, "auto"))
282 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
283 else if (sysfs_streq(buf, "any"))
284 value = PM_QOS_LATENCY_ANY;
285 else
286 return -EINVAL;
287 }
288 ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
289 return ret < 0 ? ret : n;
290 }
291
292 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
293
pm_qos_no_power_off_show(struct device * dev,struct device_attribute * attr,char * buf)294 static ssize_t pm_qos_no_power_off_show(struct device *dev,
295 struct device_attribute *attr,
296 char *buf)
297 {
298 return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
299 & PM_QOS_FLAG_NO_POWER_OFF));
300 }
301
pm_qos_no_power_off_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)302 static ssize_t pm_qos_no_power_off_store(struct device *dev,
303 struct device_attribute *attr,
304 const char *buf, size_t n)
305 {
306 int ret;
307
308 if (kstrtoint(buf, 0, &ret))
309 return -EINVAL;
310
311 if (ret != 0 && ret != 1)
312 return -EINVAL;
313
314 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
315 return ret < 0 ? ret : n;
316 }
317
318 static DEVICE_ATTR_RW(pm_qos_no_power_off);
319
320 #ifdef CONFIG_PM_SLEEP
321 static const char _enabled[] = "enabled";
322 static const char _disabled[] = "disabled";
323
wakeup_show(struct device * dev,struct device_attribute * attr,char * buf)324 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
325 char *buf)
326 {
327 return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
328 ? (device_may_wakeup(dev) ? _enabled : _disabled)
329 : "");
330 }
331
wakeup_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)332 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
333 const char *buf, size_t n)
334 {
335 if (!device_can_wakeup(dev))
336 return -EINVAL;
337
338 if (sysfs_streq(buf, _enabled))
339 device_set_wakeup_enable(dev, 1);
340 else if (sysfs_streq(buf, _disabled))
341 device_set_wakeup_enable(dev, 0);
342 else
343 return -EINVAL;
344 return n;
345 }
346
347 static DEVICE_ATTR_RW(wakeup);
348
wakeup_count_show(struct device * dev,struct device_attribute * attr,char * buf)349 static ssize_t wakeup_count_show(struct device *dev,
350 struct device_attribute *attr, char *buf)
351 {
352 unsigned long count;
353 bool enabled = false;
354
355 spin_lock_irq(&dev->power.lock);
356 if (dev->power.wakeup) {
357 count = dev->power.wakeup->wakeup_count;
358 enabled = true;
359 }
360 spin_unlock_irq(&dev->power.lock);
361
362 if (!enabled)
363 return sysfs_emit(buf, "\n");
364 return sysfs_emit(buf, "%lu\n", count);
365 }
366
367 static DEVICE_ATTR_RO(wakeup_count);
368
wakeup_active_count_show(struct device * dev,struct device_attribute * attr,char * buf)369 static ssize_t wakeup_active_count_show(struct device *dev,
370 struct device_attribute *attr,
371 char *buf)
372 {
373 unsigned long count;
374 bool enabled = false;
375
376 spin_lock_irq(&dev->power.lock);
377 if (dev->power.wakeup) {
378 count = dev->power.wakeup->active_count;
379 enabled = true;
380 }
381 spin_unlock_irq(&dev->power.lock);
382
383 if (!enabled)
384 return sysfs_emit(buf, "\n");
385 return sysfs_emit(buf, "%lu\n", count);
386 }
387
388 static DEVICE_ATTR_RO(wakeup_active_count);
389
wakeup_abort_count_show(struct device * dev,struct device_attribute * attr,char * buf)390 static ssize_t wakeup_abort_count_show(struct device *dev,
391 struct device_attribute *attr,
392 char *buf)
393 {
394 unsigned long count;
395 bool enabled = false;
396
397 spin_lock_irq(&dev->power.lock);
398 if (dev->power.wakeup) {
399 count = dev->power.wakeup->wakeup_count;
400 enabled = true;
401 }
402 spin_unlock_irq(&dev->power.lock);
403
404 if (!enabled)
405 return sysfs_emit(buf, "\n");
406 return sysfs_emit(buf, "%lu\n", count);
407 }
408
409 static DEVICE_ATTR_RO(wakeup_abort_count);
410
wakeup_expire_count_show(struct device * dev,struct device_attribute * attr,char * buf)411 static ssize_t wakeup_expire_count_show(struct device *dev,
412 struct device_attribute *attr,
413 char *buf)
414 {
415 unsigned long count;
416 bool enabled = false;
417
418 spin_lock_irq(&dev->power.lock);
419 if (dev->power.wakeup) {
420 count = dev->power.wakeup->expire_count;
421 enabled = true;
422 }
423 spin_unlock_irq(&dev->power.lock);
424
425 if (!enabled)
426 return sysfs_emit(buf, "\n");
427 return sysfs_emit(buf, "%lu\n", count);
428 }
429
430 static DEVICE_ATTR_RO(wakeup_expire_count);
431
wakeup_active_show(struct device * dev,struct device_attribute * attr,char * buf)432 static ssize_t wakeup_active_show(struct device *dev,
433 struct device_attribute *attr, char *buf)
434 {
435 unsigned int active;
436 bool enabled = false;
437
438 spin_lock_irq(&dev->power.lock);
439 if (dev->power.wakeup) {
440 active = dev->power.wakeup->active;
441 enabled = true;
442 }
443 spin_unlock_irq(&dev->power.lock);
444
445 if (!enabled)
446 return sysfs_emit(buf, "\n");
447 return sysfs_emit(buf, "%u\n", active);
448 }
449
450 static DEVICE_ATTR_RO(wakeup_active);
451
wakeup_total_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)452 static ssize_t wakeup_total_time_ms_show(struct device *dev,
453 struct device_attribute *attr,
454 char *buf)
455 {
456 s64 msec;
457 bool enabled = false;
458
459 spin_lock_irq(&dev->power.lock);
460 if (dev->power.wakeup) {
461 msec = ktime_to_ms(dev->power.wakeup->total_time);
462 enabled = true;
463 }
464 spin_unlock_irq(&dev->power.lock);
465
466 if (!enabled)
467 return sysfs_emit(buf, "\n");
468 return sysfs_emit(buf, "%lld\n", msec);
469 }
470
471 static DEVICE_ATTR_RO(wakeup_total_time_ms);
472
wakeup_max_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t wakeup_max_time_ms_show(struct device *dev,
474 struct device_attribute *attr, char *buf)
475 {
476 s64 msec;
477 bool enabled = false;
478
479 spin_lock_irq(&dev->power.lock);
480 if (dev->power.wakeup) {
481 msec = ktime_to_ms(dev->power.wakeup->max_time);
482 enabled = true;
483 }
484 spin_unlock_irq(&dev->power.lock);
485
486 if (!enabled)
487 return sysfs_emit(buf, "\n");
488 return sysfs_emit(buf, "%lld\n", msec);
489 }
490
491 static DEVICE_ATTR_RO(wakeup_max_time_ms);
492
wakeup_last_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)493 static ssize_t wakeup_last_time_ms_show(struct device *dev,
494 struct device_attribute *attr,
495 char *buf)
496 {
497 s64 msec;
498 bool enabled = false;
499
500 spin_lock_irq(&dev->power.lock);
501 if (dev->power.wakeup) {
502 msec = ktime_to_ms(dev->power.wakeup->last_time);
503 enabled = true;
504 }
505 spin_unlock_irq(&dev->power.lock);
506
507 if (!enabled)
508 return sysfs_emit(buf, "\n");
509 return sysfs_emit(buf, "%lld\n", msec);
510 }
511
dpm_sysfs_wakeup_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)512 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
513 kgid_t kgid)
514 {
515 if (dev->power.wakeup && dev->power.wakeup->dev)
516 return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
517 return 0;
518 }
519
520 static DEVICE_ATTR_RO(wakeup_last_time_ms);
521
522 #ifdef CONFIG_PM_AUTOSLEEP
wakeup_prevent_sleep_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)523 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
524 struct device_attribute *attr,
525 char *buf)
526 {
527 s64 msec;
528 bool enabled = false;
529
530 spin_lock_irq(&dev->power.lock);
531 if (dev->power.wakeup) {
532 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
533 enabled = true;
534 }
535 spin_unlock_irq(&dev->power.lock);
536
537 if (!enabled)
538 return sysfs_emit(buf, "\n");
539 return sysfs_emit(buf, "%lld\n", msec);
540 }
541
542 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
543 #endif /* CONFIG_PM_AUTOSLEEP */
544 #else /* CONFIG_PM_SLEEP */
dpm_sysfs_wakeup_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)545 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
546 kgid_t kgid)
547 {
548 return 0;
549 }
550 #endif
551
552 #ifdef CONFIG_PM_ADVANCED_DEBUG
runtime_usage_show(struct device * dev,struct device_attribute * attr,char * buf)553 static ssize_t runtime_usage_show(struct device *dev,
554 struct device_attribute *attr, char *buf)
555 {
556 return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
557 }
558 static DEVICE_ATTR_RO(runtime_usage);
559
runtime_active_kids_show(struct device * dev,struct device_attribute * attr,char * buf)560 static ssize_t runtime_active_kids_show(struct device *dev,
561 struct device_attribute *attr,
562 char *buf)
563 {
564 return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
565 0 : atomic_read(&dev->power.child_count));
566 }
567 static DEVICE_ATTR_RO(runtime_active_kids);
568
runtime_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)569 static ssize_t runtime_enabled_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571 {
572 const char *output;
573
574 if (dev->power.disable_depth && !dev->power.runtime_auto)
575 output = "disabled & forbidden";
576 else if (dev->power.disable_depth)
577 output = "disabled";
578 else if (!dev->power.runtime_auto)
579 output = "forbidden";
580 else
581 output = "enabled";
582
583 return sysfs_emit(buf, "%s\n", output);
584 }
585 static DEVICE_ATTR_RO(runtime_enabled);
586
587 #ifdef CONFIG_PM_SLEEP
async_show(struct device * dev,struct device_attribute * attr,char * buf)588 static ssize_t async_show(struct device *dev, struct device_attribute *attr,
589 char *buf)
590 {
591 return sysfs_emit(buf, "%s\n",
592 device_async_suspend_enabled(dev) ?
593 _enabled : _disabled);
594 }
595
async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)596 static ssize_t async_store(struct device *dev, struct device_attribute *attr,
597 const char *buf, size_t n)
598 {
599 if (sysfs_streq(buf, _enabled))
600 device_enable_async_suspend(dev);
601 else if (sysfs_streq(buf, _disabled))
602 device_disable_async_suspend(dev);
603 else
604 return -EINVAL;
605 return n;
606 }
607
608 static DEVICE_ATTR_RW(async);
609
610 #endif /* CONFIG_PM_SLEEP */
611 #endif /* CONFIG_PM_ADVANCED_DEBUG */
612
613 static struct attribute *power_attrs[] = {
614 #ifdef CONFIG_PM_ADVANCED_DEBUG
615 #ifdef CONFIG_PM_SLEEP
616 &dev_attr_async.attr,
617 #endif
618 &dev_attr_runtime_status.attr,
619 &dev_attr_runtime_usage.attr,
620 &dev_attr_runtime_active_kids.attr,
621 &dev_attr_runtime_enabled.attr,
622 #endif /* CONFIG_PM_ADVANCED_DEBUG */
623 NULL,
624 };
625 static const struct attribute_group pm_attr_group = {
626 .name = power_group_name,
627 .attrs = power_attrs,
628 };
629
630 static struct attribute *wakeup_attrs[] = {
631 #ifdef CONFIG_PM_SLEEP
632 &dev_attr_wakeup.attr,
633 &dev_attr_wakeup_count.attr,
634 &dev_attr_wakeup_active_count.attr,
635 &dev_attr_wakeup_abort_count.attr,
636 &dev_attr_wakeup_expire_count.attr,
637 &dev_attr_wakeup_active.attr,
638 &dev_attr_wakeup_total_time_ms.attr,
639 &dev_attr_wakeup_max_time_ms.attr,
640 &dev_attr_wakeup_last_time_ms.attr,
641 #ifdef CONFIG_PM_AUTOSLEEP
642 &dev_attr_wakeup_prevent_sleep_time_ms.attr,
643 #endif
644 #endif
645 NULL,
646 };
647 static const struct attribute_group pm_wakeup_attr_group = {
648 .name = power_group_name,
649 .attrs = wakeup_attrs,
650 };
651
652 static struct attribute *runtime_attrs[] = {
653 #ifndef CONFIG_PM_ADVANCED_DEBUG
654 &dev_attr_runtime_status.attr,
655 #endif
656 &dev_attr_control.attr,
657 &dev_attr_runtime_suspended_time.attr,
658 &dev_attr_runtime_active_time.attr,
659 &dev_attr_autosuspend_delay_ms.attr,
660 NULL,
661 };
662 static const struct attribute_group pm_runtime_attr_group = {
663 .name = power_group_name,
664 .attrs = runtime_attrs,
665 };
666
667 static struct attribute *pm_qos_resume_latency_attrs[] = {
668 &dev_attr_pm_qos_resume_latency_us.attr,
669 NULL,
670 };
671 static const struct attribute_group pm_qos_resume_latency_attr_group = {
672 .name = power_group_name,
673 .attrs = pm_qos_resume_latency_attrs,
674 };
675
676 static struct attribute *pm_qos_latency_tolerance_attrs[] = {
677 &dev_attr_pm_qos_latency_tolerance_us.attr,
678 NULL,
679 };
680 static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
681 .name = power_group_name,
682 .attrs = pm_qos_latency_tolerance_attrs,
683 };
684
685 static struct attribute *pm_qos_flags_attrs[] = {
686 &dev_attr_pm_qos_no_power_off.attr,
687 NULL,
688 };
689 static const struct attribute_group pm_qos_flags_attr_group = {
690 .name = power_group_name,
691 .attrs = pm_qos_flags_attrs,
692 };
693
dpm_sysfs_add(struct device * dev)694 int dpm_sysfs_add(struct device *dev)
695 {
696 int rc;
697
698 /* No need to create PM sysfs if explicitly disabled. */
699 if (device_pm_not_required(dev))
700 return 0;
701
702 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
703 if (rc)
704 return rc;
705
706 if (!pm_runtime_has_no_callbacks(dev)) {
707 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
708 if (rc)
709 goto err_out;
710 }
711 if (device_can_wakeup(dev)) {
712 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
713 if (rc)
714 goto err_runtime;
715 }
716 if (dev->power.set_latency_tolerance) {
717 rc = sysfs_merge_group(&dev->kobj,
718 &pm_qos_latency_tolerance_attr_group);
719 if (rc)
720 goto err_wakeup;
721 }
722 rc = pm_wakeup_source_sysfs_add(dev);
723 if (rc)
724 goto err_latency;
725 return 0;
726
727 err_latency:
728 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
729 err_wakeup:
730 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
731 err_runtime:
732 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
733 err_out:
734 sysfs_remove_group(&dev->kobj, &pm_attr_group);
735 return rc;
736 }
737
dpm_sysfs_change_owner(struct device * dev,kuid_t kuid,kgid_t kgid)738 int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
739 {
740 int rc;
741
742 if (device_pm_not_required(dev))
743 return 0;
744
745 rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
746 if (rc)
747 return rc;
748
749 if (!pm_runtime_has_no_callbacks(dev)) {
750 rc = sysfs_group_change_owner(
751 &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
752 if (rc)
753 return rc;
754 }
755
756 if (device_can_wakeup(dev)) {
757 rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
758 kuid, kgid);
759 if (rc)
760 return rc;
761
762 rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
763 if (rc)
764 return rc;
765 }
766
767 if (dev->power.set_latency_tolerance) {
768 rc = sysfs_group_change_owner(
769 &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
770 kgid);
771 if (rc)
772 return rc;
773 }
774 return 0;
775 }
776
wakeup_sysfs_add(struct device * dev)777 int wakeup_sysfs_add(struct device *dev)
778 {
779 int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
780
781 if (!ret)
782 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
783
784 return ret;
785 }
786
wakeup_sysfs_remove(struct device * dev)787 void wakeup_sysfs_remove(struct device *dev)
788 {
789 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
790 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
791 }
792
pm_qos_sysfs_add_resume_latency(struct device * dev)793 int pm_qos_sysfs_add_resume_latency(struct device *dev)
794 {
795 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
796 }
797
pm_qos_sysfs_remove_resume_latency(struct device * dev)798 void pm_qos_sysfs_remove_resume_latency(struct device *dev)
799 {
800 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
801 }
802
pm_qos_sysfs_add_flags(struct device * dev)803 int pm_qos_sysfs_add_flags(struct device *dev)
804 {
805 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
806 }
807
pm_qos_sysfs_remove_flags(struct device * dev)808 void pm_qos_sysfs_remove_flags(struct device *dev)
809 {
810 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
811 }
812
pm_qos_sysfs_add_latency_tolerance(struct device * dev)813 int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
814 {
815 return sysfs_merge_group(&dev->kobj,
816 &pm_qos_latency_tolerance_attr_group);
817 }
818
pm_qos_sysfs_remove_latency_tolerance(struct device * dev)819 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
820 {
821 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
822 }
823
rpm_sysfs_remove(struct device * dev)824 void rpm_sysfs_remove(struct device *dev)
825 {
826 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
827 }
828
dpm_sysfs_remove(struct device * dev)829 void dpm_sysfs_remove(struct device *dev)
830 {
831 if (device_pm_not_required(dev))
832 return;
833 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
834 dev_pm_qos_constraints_destroy(dev);
835 rpm_sysfs_remove(dev);
836 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
837 sysfs_remove_group(&dev->kobj, &pm_attr_group);
838 }
839