1 // SPDX-License-Identifier: GPL-2.0 2 /* sysfs entries for device PM */ 3 #include <linux/device.h> 4 #include <linux/string.h> 5 #include <linux/export.h> 6 #include <linux/pm_qos.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/pm_wakeup.h> 9 #include <linux/atomic.h> 10 #include <linux/jiffies.h> 11 #include "power.h" 12 13 /* 14 * control - Report/change current runtime PM setting of the device 15 * 16 * Runtime power management of a device can be blocked with the help of 17 * this attribute. All devices have one of the following two values for 18 * the power/control file: 19 * 20 * + "auto\n" to allow the device to be power managed at run time; 21 * + "on\n" to prevent the device from being power managed at run time; 22 * 23 * The default for all devices is "auto", which means that devices may be 24 * subject to automatic power management, depending on their drivers. 25 * Changing this attribute to "on" prevents the driver from power managing 26 * the device at run time. Doing that while the device is suspended causes 27 * it to be woken up. 28 * 29 * wakeup - Report/change current wakeup option for device 30 * 31 * Some devices support "wakeup" events, which are hardware signals 32 * used to activate devices from suspended or low power states. Such 33 * devices have one of three values for the sysfs power/wakeup file: 34 * 35 * + "enabled\n" to issue the events; 36 * + "disabled\n" not to do so; or 37 * + "\n" for temporary or permanent inability to issue wakeup. 38 * 39 * (For example, unconfigured USB devices can't issue wakeups.) 40 * 41 * Familiar examples of devices that can issue wakeup events include 42 * keyboards and mice (both PS2 and USB styles), power buttons, modems, 43 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events 44 * will wake the entire system from a suspend state; others may just 45 * wake up the device (if the system as a whole is already active). 46 * Some wakeup events use normal IRQ lines; other use special out 47 * of band signaling. 48 * 49 * It is the responsibility of device drivers to enable (or disable) 50 * wakeup signaling as part of changing device power states, respecting 51 * the policy choices provided through the driver model. 52 * 53 * Devices may not be able to generate wakeup events from all power 54 * states. Also, the events may be ignored in some configurations; 55 * for example, they might need help from other devices that aren't 56 * active, or which may have wakeup disabled. Some drivers rely on 57 * wakeup events internally (unless they are disabled), keeping 58 * their hardware in low power modes whenever they're unused. This 59 * saves runtime power, without requiring system-wide sleep states. 60 * 61 * async - Report/change current async suspend setting for the device 62 * 63 * Asynchronous suspend and resume of the device during system-wide power 64 * state transitions can be enabled by writing "enabled" to this file. 65 * Analogously, if "disabled" is written to this file, the device will be 66 * suspended and resumed synchronously. 67 * 68 * All devices have one of the following two values for power/async: 69 * 70 * + "enabled\n" to permit the asynchronous suspend/resume of the device; 71 * + "disabled\n" to forbid it; 72 * 73 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume 74 * of a device unless it is certain that all of the PM dependencies of the 75 * device are known to the PM core. However, for some devices this 76 * attribute is set to "enabled" by bus type code or device drivers and in 77 * that cases it should be safe to leave the default value. 78 * 79 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value 80 * 81 * Some drivers don't want to carry out a runtime suspend as soon as a 82 * device becomes idle; they want it always to remain idle for some period 83 * of time before suspending it. This period is the autosuspend_delay 84 * value (expressed in milliseconds) and it can be controlled by the user. 85 * If the value is negative then the device will never be runtime 86 * suspended. 87 * 88 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay 89 * value are used only if the driver calls pm_runtime_use_autosuspend(). 90 * 91 * wakeup_count - Report the number of wakeup events related to the device 92 */ 93 94 const char power_group_name[] = "power"; 95 EXPORT_SYMBOL_GPL(power_group_name); 96 97 static const char ctrl_auto[] = "auto"; 98 static const char ctrl_on[] = "on"; 99 100 static ssize_t control_show(struct device *dev, struct device_attribute *attr, 101 char *buf) 102 { 103 return sprintf(buf, "%s\n", 104 dev->power.runtime_auto ? ctrl_auto : ctrl_on); 105 } 106 107 static ssize_t control_store(struct device * dev, struct device_attribute *attr, 108 const char * buf, size_t n) 109 { 110 device_lock(dev); 111 if (sysfs_streq(buf, ctrl_auto)) 112 pm_runtime_allow(dev); 113 else if (sysfs_streq(buf, ctrl_on)) 114 pm_runtime_forbid(dev); 115 else 116 n = -EINVAL; 117 device_unlock(dev); 118 return n; 119 } 120 121 static DEVICE_ATTR_RW(control); 122 123 static ssize_t runtime_active_time_show(struct device *dev, 124 struct device_attribute *attr, char *buf) 125 { 126 int ret; 127 u64 tmp = pm_runtime_active_time(dev); 128 do_div(tmp, NSEC_PER_MSEC); 129 ret = sprintf(buf, "%llu\n", tmp); 130 return ret; 131 } 132 133 static DEVICE_ATTR_RO(runtime_active_time); 134 135 static ssize_t runtime_suspended_time_show(struct device *dev, 136 struct device_attribute *attr, char *buf) 137 { 138 int ret; 139 u64 tmp = pm_runtime_suspended_time(dev); 140 do_div(tmp, NSEC_PER_MSEC); 141 ret = sprintf(buf, "%llu\n", tmp); 142 return ret; 143 } 144 145 static DEVICE_ATTR_RO(runtime_suspended_time); 146 147 static ssize_t runtime_status_show(struct device *dev, 148 struct device_attribute *attr, char *buf) 149 { 150 const char *p; 151 152 if (dev->power.runtime_error) { 153 p = "error\n"; 154 } else if (dev->power.disable_depth) { 155 p = "unsupported\n"; 156 } else { 157 switch (dev->power.runtime_status) { 158 case RPM_SUSPENDED: 159 p = "suspended\n"; 160 break; 161 case RPM_SUSPENDING: 162 p = "suspending\n"; 163 break; 164 case RPM_RESUMING: 165 p = "resuming\n"; 166 break; 167 case RPM_ACTIVE: 168 p = "active\n"; 169 break; 170 default: 171 return -EIO; 172 } 173 } 174 return sprintf(buf, p); 175 } 176 177 static DEVICE_ATTR_RO(runtime_status); 178 179 static ssize_t autosuspend_delay_ms_show(struct device *dev, 180 struct device_attribute *attr, char *buf) 181 { 182 if (!dev->power.use_autosuspend) 183 return -EIO; 184 return sprintf(buf, "%d\n", dev->power.autosuspend_delay); 185 } 186 187 static ssize_t autosuspend_delay_ms_store(struct device *dev, 188 struct device_attribute *attr, const char *buf, size_t n) 189 { 190 long delay; 191 192 if (!dev->power.use_autosuspend) 193 return -EIO; 194 195 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) 196 return -EINVAL; 197 198 device_lock(dev); 199 pm_runtime_set_autosuspend_delay(dev, delay); 200 device_unlock(dev); 201 return n; 202 } 203 204 static DEVICE_ATTR_RW(autosuspend_delay_ms); 205 206 static ssize_t pm_qos_resume_latency_us_show(struct device *dev, 207 struct device_attribute *attr, 208 char *buf) 209 { 210 s32 value = dev_pm_qos_requested_resume_latency(dev); 211 212 if (value == 0) 213 return sprintf(buf, "n/a\n"); 214 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 215 value = 0; 216 217 return sprintf(buf, "%d\n", value); 218 } 219 220 static ssize_t pm_qos_resume_latency_us_store(struct device *dev, 221 struct device_attribute *attr, 222 const char *buf, size_t n) 223 { 224 s32 value; 225 int ret; 226 227 if (!kstrtos32(buf, 0, &value)) { 228 /* 229 * Prevent users from writing negative or "no constraint" values 230 * directly. 231 */ 232 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 233 return -EINVAL; 234 235 if (value == 0) 236 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; 237 } else if (sysfs_streq(buf, "n/a")) { 238 value = 0; 239 } else { 240 return -EINVAL; 241 } 242 243 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, 244 value); 245 return ret < 0 ? ret : n; 246 } 247 248 static DEVICE_ATTR_RW(pm_qos_resume_latency_us); 249 250 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, 251 struct device_attribute *attr, 252 char *buf) 253 { 254 s32 value = dev_pm_qos_get_user_latency_tolerance(dev); 255 256 if (value < 0) 257 return sprintf(buf, "auto\n"); 258 if (value == PM_QOS_LATENCY_ANY) 259 return sprintf(buf, "any\n"); 260 261 return sprintf(buf, "%d\n", value); 262 } 263 264 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, 265 struct device_attribute *attr, 266 const char *buf, size_t n) 267 { 268 s32 value; 269 int ret; 270 271 if (kstrtos32(buf, 0, &value) == 0) { 272 /* Users can't write negative values directly */ 273 if (value < 0) 274 return -EINVAL; 275 } else { 276 if (sysfs_streq(buf, "auto")) 277 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; 278 else if (sysfs_streq(buf, "any")) 279 value = PM_QOS_LATENCY_ANY; 280 else 281 return -EINVAL; 282 } 283 ret = dev_pm_qos_update_user_latency_tolerance(dev, value); 284 return ret < 0 ? ret : n; 285 } 286 287 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); 288 289 static ssize_t pm_qos_no_power_off_show(struct device *dev, 290 struct device_attribute *attr, 291 char *buf) 292 { 293 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) 294 & PM_QOS_FLAG_NO_POWER_OFF)); 295 } 296 297 static ssize_t pm_qos_no_power_off_store(struct device *dev, 298 struct device_attribute *attr, 299 const char *buf, size_t n) 300 { 301 int ret; 302 303 if (kstrtoint(buf, 0, &ret)) 304 return -EINVAL; 305 306 if (ret != 0 && ret != 1) 307 return -EINVAL; 308 309 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); 310 return ret < 0 ? ret : n; 311 } 312 313 static DEVICE_ATTR_RW(pm_qos_no_power_off); 314 315 #ifdef CONFIG_PM_SLEEP 316 static const char _enabled[] = "enabled"; 317 static const char _disabled[] = "disabled"; 318 319 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, 320 char *buf) 321 { 322 return sprintf(buf, "%s\n", device_can_wakeup(dev) 323 ? (device_may_wakeup(dev) ? _enabled : _disabled) 324 : ""); 325 } 326 327 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, 328 const char *buf, size_t n) 329 { 330 if (!device_can_wakeup(dev)) 331 return -EINVAL; 332 333 if (sysfs_streq(buf, _enabled)) 334 device_set_wakeup_enable(dev, 1); 335 else if (sysfs_streq(buf, _disabled)) 336 device_set_wakeup_enable(dev, 0); 337 else 338 return -EINVAL; 339 return n; 340 } 341 342 static DEVICE_ATTR_RW(wakeup); 343 344 static ssize_t wakeup_count_show(struct device *dev, 345 struct device_attribute *attr, char *buf) 346 { 347 unsigned long count = 0; 348 bool enabled = false; 349 350 spin_lock_irq(&dev->power.lock); 351 if (dev->power.wakeup) { 352 count = dev->power.wakeup->wakeup_count; 353 enabled = true; 354 } 355 spin_unlock_irq(&dev->power.lock); 356 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 357 } 358 359 static DEVICE_ATTR_RO(wakeup_count); 360 361 static ssize_t wakeup_active_count_show(struct device *dev, 362 struct device_attribute *attr, 363 char *buf) 364 { 365 unsigned long count = 0; 366 bool enabled = false; 367 368 spin_lock_irq(&dev->power.lock); 369 if (dev->power.wakeup) { 370 count = dev->power.wakeup->active_count; 371 enabled = true; 372 } 373 spin_unlock_irq(&dev->power.lock); 374 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 375 } 376 377 static DEVICE_ATTR_RO(wakeup_active_count); 378 379 static ssize_t wakeup_abort_count_show(struct device *dev, 380 struct device_attribute *attr, 381 char *buf) 382 { 383 unsigned long count = 0; 384 bool enabled = false; 385 386 spin_lock_irq(&dev->power.lock); 387 if (dev->power.wakeup) { 388 count = dev->power.wakeup->wakeup_count; 389 enabled = true; 390 } 391 spin_unlock_irq(&dev->power.lock); 392 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 393 } 394 395 static DEVICE_ATTR_RO(wakeup_abort_count); 396 397 static ssize_t wakeup_expire_count_show(struct device *dev, 398 struct device_attribute *attr, 399 char *buf) 400 { 401 unsigned long count = 0; 402 bool enabled = false; 403 404 spin_lock_irq(&dev->power.lock); 405 if (dev->power.wakeup) { 406 count = dev->power.wakeup->expire_count; 407 enabled = true; 408 } 409 spin_unlock_irq(&dev->power.lock); 410 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 411 } 412 413 static DEVICE_ATTR_RO(wakeup_expire_count); 414 415 static ssize_t wakeup_active_show(struct device *dev, 416 struct device_attribute *attr, char *buf) 417 { 418 unsigned int active = 0; 419 bool enabled = false; 420 421 spin_lock_irq(&dev->power.lock); 422 if (dev->power.wakeup) { 423 active = dev->power.wakeup->active; 424 enabled = true; 425 } 426 spin_unlock_irq(&dev->power.lock); 427 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); 428 } 429 430 static DEVICE_ATTR_RO(wakeup_active); 431 432 static ssize_t wakeup_total_time_ms_show(struct device *dev, 433 struct device_attribute *attr, 434 char *buf) 435 { 436 s64 msec = 0; 437 bool enabled = false; 438 439 spin_lock_irq(&dev->power.lock); 440 if (dev->power.wakeup) { 441 msec = ktime_to_ms(dev->power.wakeup->total_time); 442 enabled = true; 443 } 444 spin_unlock_irq(&dev->power.lock); 445 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 446 } 447 448 static DEVICE_ATTR_RO(wakeup_total_time_ms); 449 450 static ssize_t wakeup_max_time_ms_show(struct device *dev, 451 struct device_attribute *attr, char *buf) 452 { 453 s64 msec = 0; 454 bool enabled = false; 455 456 spin_lock_irq(&dev->power.lock); 457 if (dev->power.wakeup) { 458 msec = ktime_to_ms(dev->power.wakeup->max_time); 459 enabled = true; 460 } 461 spin_unlock_irq(&dev->power.lock); 462 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 463 } 464 465 static DEVICE_ATTR_RO(wakeup_max_time_ms); 466 467 static ssize_t wakeup_last_time_ms_show(struct device *dev, 468 struct device_attribute *attr, 469 char *buf) 470 { 471 s64 msec = 0; 472 bool enabled = false; 473 474 spin_lock_irq(&dev->power.lock); 475 if (dev->power.wakeup) { 476 msec = ktime_to_ms(dev->power.wakeup->last_time); 477 enabled = true; 478 } 479 spin_unlock_irq(&dev->power.lock); 480 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 481 } 482 483 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, 484 kgid_t kgid) 485 { 486 if (dev->power.wakeup && dev->power.wakeup->dev) 487 return device_change_owner(dev->power.wakeup->dev, kuid, kgid); 488 return 0; 489 } 490 491 static DEVICE_ATTR_RO(wakeup_last_time_ms); 492 493 #ifdef CONFIG_PM_AUTOSLEEP 494 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, 495 struct device_attribute *attr, 496 char *buf) 497 { 498 s64 msec = 0; 499 bool enabled = false; 500 501 spin_lock_irq(&dev->power.lock); 502 if (dev->power.wakeup) { 503 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); 504 enabled = true; 505 } 506 spin_unlock_irq(&dev->power.lock); 507 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 508 } 509 510 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); 511 #endif /* CONFIG_PM_AUTOSLEEP */ 512 #else /* CONFIG_PM_SLEEP */ 513 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, 514 kgid_t kgid) 515 { 516 return 0; 517 } 518 #endif 519 520 #ifdef CONFIG_PM_ADVANCED_DEBUG 521 static ssize_t runtime_usage_show(struct device *dev, 522 struct device_attribute *attr, char *buf) 523 { 524 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); 525 } 526 static DEVICE_ATTR_RO(runtime_usage); 527 528 static ssize_t runtime_active_kids_show(struct device *dev, 529 struct device_attribute *attr, 530 char *buf) 531 { 532 return sprintf(buf, "%d\n", dev->power.ignore_children ? 533 0 : atomic_read(&dev->power.child_count)); 534 } 535 static DEVICE_ATTR_RO(runtime_active_kids); 536 537 static ssize_t runtime_enabled_show(struct device *dev, 538 struct device_attribute *attr, char *buf) 539 { 540 if (dev->power.disable_depth && (dev->power.runtime_auto == false)) 541 return sprintf(buf, "disabled & forbidden\n"); 542 if (dev->power.disable_depth) 543 return sprintf(buf, "disabled\n"); 544 if (dev->power.runtime_auto == false) 545 return sprintf(buf, "forbidden\n"); 546 return sprintf(buf, "enabled\n"); 547 } 548 static DEVICE_ATTR_RO(runtime_enabled); 549 550 #ifdef CONFIG_PM_SLEEP 551 static ssize_t async_show(struct device *dev, struct device_attribute *attr, 552 char *buf) 553 { 554 return sprintf(buf, "%s\n", 555 device_async_suspend_enabled(dev) ? 556 _enabled : _disabled); 557 } 558 559 static ssize_t async_store(struct device *dev, struct device_attribute *attr, 560 const char *buf, size_t n) 561 { 562 if (sysfs_streq(buf, _enabled)) 563 device_enable_async_suspend(dev); 564 else if (sysfs_streq(buf, _disabled)) 565 device_disable_async_suspend(dev); 566 else 567 return -EINVAL; 568 return n; 569 } 570 571 static DEVICE_ATTR_RW(async); 572 573 #endif /* CONFIG_PM_SLEEP */ 574 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 575 576 static struct attribute *power_attrs[] = { 577 #ifdef CONFIG_PM_ADVANCED_DEBUG 578 #ifdef CONFIG_PM_SLEEP 579 &dev_attr_async.attr, 580 #endif 581 &dev_attr_runtime_status.attr, 582 &dev_attr_runtime_usage.attr, 583 &dev_attr_runtime_active_kids.attr, 584 &dev_attr_runtime_enabled.attr, 585 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 586 NULL, 587 }; 588 static const struct attribute_group pm_attr_group = { 589 .name = power_group_name, 590 .attrs = power_attrs, 591 }; 592 593 static struct attribute *wakeup_attrs[] = { 594 #ifdef CONFIG_PM_SLEEP 595 &dev_attr_wakeup.attr, 596 &dev_attr_wakeup_count.attr, 597 &dev_attr_wakeup_active_count.attr, 598 &dev_attr_wakeup_abort_count.attr, 599 &dev_attr_wakeup_expire_count.attr, 600 &dev_attr_wakeup_active.attr, 601 &dev_attr_wakeup_total_time_ms.attr, 602 &dev_attr_wakeup_max_time_ms.attr, 603 &dev_attr_wakeup_last_time_ms.attr, 604 #ifdef CONFIG_PM_AUTOSLEEP 605 &dev_attr_wakeup_prevent_sleep_time_ms.attr, 606 #endif 607 #endif 608 NULL, 609 }; 610 static const struct attribute_group pm_wakeup_attr_group = { 611 .name = power_group_name, 612 .attrs = wakeup_attrs, 613 }; 614 615 static struct attribute *runtime_attrs[] = { 616 #ifndef CONFIG_PM_ADVANCED_DEBUG 617 &dev_attr_runtime_status.attr, 618 #endif 619 &dev_attr_control.attr, 620 &dev_attr_runtime_suspended_time.attr, 621 &dev_attr_runtime_active_time.attr, 622 &dev_attr_autosuspend_delay_ms.attr, 623 NULL, 624 }; 625 static const struct attribute_group pm_runtime_attr_group = { 626 .name = power_group_name, 627 .attrs = runtime_attrs, 628 }; 629 630 static struct attribute *pm_qos_resume_latency_attrs[] = { 631 &dev_attr_pm_qos_resume_latency_us.attr, 632 NULL, 633 }; 634 static const struct attribute_group pm_qos_resume_latency_attr_group = { 635 .name = power_group_name, 636 .attrs = pm_qos_resume_latency_attrs, 637 }; 638 639 static struct attribute *pm_qos_latency_tolerance_attrs[] = { 640 &dev_attr_pm_qos_latency_tolerance_us.attr, 641 NULL, 642 }; 643 static const struct attribute_group pm_qos_latency_tolerance_attr_group = { 644 .name = power_group_name, 645 .attrs = pm_qos_latency_tolerance_attrs, 646 }; 647 648 static struct attribute *pm_qos_flags_attrs[] = { 649 &dev_attr_pm_qos_no_power_off.attr, 650 NULL, 651 }; 652 static const struct attribute_group pm_qos_flags_attr_group = { 653 .name = power_group_name, 654 .attrs = pm_qos_flags_attrs, 655 }; 656 657 int dpm_sysfs_add(struct device *dev) 658 { 659 int rc; 660 661 /* No need to create PM sysfs if explicitly disabled. */ 662 if (device_pm_not_required(dev)) 663 return 0; 664 665 rc = sysfs_create_group(&dev->kobj, &pm_attr_group); 666 if (rc) 667 return rc; 668 669 if (pm_runtime_callbacks_present(dev)) { 670 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); 671 if (rc) 672 goto err_out; 673 } 674 if (device_can_wakeup(dev)) { 675 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 676 if (rc) 677 goto err_runtime; 678 } 679 if (dev->power.set_latency_tolerance) { 680 rc = sysfs_merge_group(&dev->kobj, 681 &pm_qos_latency_tolerance_attr_group); 682 if (rc) 683 goto err_wakeup; 684 } 685 rc = pm_wakeup_source_sysfs_add(dev); 686 if (rc) 687 goto err_latency; 688 return 0; 689 690 err_latency: 691 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 692 err_wakeup: 693 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 694 err_runtime: 695 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 696 err_out: 697 sysfs_remove_group(&dev->kobj, &pm_attr_group); 698 return rc; 699 } 700 701 int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 702 { 703 int rc; 704 705 if (device_pm_not_required(dev)) 706 return 0; 707 708 rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid); 709 if (rc) 710 return rc; 711 712 if (pm_runtime_callbacks_present(dev)) { 713 rc = sysfs_group_change_owner( 714 &dev->kobj, &pm_runtime_attr_group, kuid, kgid); 715 if (rc) 716 return rc; 717 } 718 719 if (device_can_wakeup(dev)) { 720 rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group, 721 kuid, kgid); 722 if (rc) 723 return rc; 724 725 rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid); 726 if (rc) 727 return rc; 728 } 729 730 if (dev->power.set_latency_tolerance) { 731 rc = sysfs_group_change_owner( 732 &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid, 733 kgid); 734 if (rc) 735 return rc; 736 } 737 return 0; 738 } 739 740 int wakeup_sysfs_add(struct device *dev) 741 { 742 return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 743 } 744 745 void wakeup_sysfs_remove(struct device *dev) 746 { 747 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 748 } 749 750 int pm_qos_sysfs_add_resume_latency(struct device *dev) 751 { 752 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 753 } 754 755 void pm_qos_sysfs_remove_resume_latency(struct device *dev) 756 { 757 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 758 } 759 760 int pm_qos_sysfs_add_flags(struct device *dev) 761 { 762 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); 763 } 764 765 void pm_qos_sysfs_remove_flags(struct device *dev) 766 { 767 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 768 } 769 770 int pm_qos_sysfs_add_latency_tolerance(struct device *dev) 771 { 772 return sysfs_merge_group(&dev->kobj, 773 &pm_qos_latency_tolerance_attr_group); 774 } 775 776 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) 777 { 778 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 779 } 780 781 void rpm_sysfs_remove(struct device *dev) 782 { 783 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 784 } 785 786 void dpm_sysfs_remove(struct device *dev) 787 { 788 if (device_pm_not_required(dev)) 789 return; 790 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 791 dev_pm_qos_constraints_destroy(dev); 792 rpm_sysfs_remove(dev); 793 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 794 sysfs_remove_group(&dev->kobj, &pm_attr_group); 795 } 796