1 // SPDX-License-Identifier: GPL-2.0 2 /* sysfs entries for device PM */ 3 #include <linux/device.h> 4 #include <linux/kobject.h> 5 #include <linux/string.h> 6 #include <linux/export.h> 7 #include <linux/pm_qos.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/pm_wakeup.h> 10 #include <linux/atomic.h> 11 #include <linux/jiffies.h> 12 #include "power.h" 13 14 /* 15 * control - Report/change current runtime PM setting of the device 16 * 17 * Runtime power management of a device can be blocked with the help of 18 * this attribute. All devices have one of the following two values for 19 * the power/control file: 20 * 21 * + "auto\n" to allow the device to be power managed at run time; 22 * + "on\n" to prevent the device from being power managed at run time; 23 * 24 * The default for all devices is "auto", which means that devices may be 25 * subject to automatic power management, depending on their drivers. 26 * Changing this attribute to "on" prevents the driver from power managing 27 * the device at run time. Doing that while the device is suspended causes 28 * it to be woken up. 29 * 30 * wakeup - Report/change current wakeup option for device 31 * 32 * Some devices support "wakeup" events, which are hardware signals 33 * used to activate devices from suspended or low power states. Such 34 * devices have one of three values for the sysfs power/wakeup file: 35 * 36 * + "enabled\n" to issue the events; 37 * + "disabled\n" not to do so; or 38 * + "\n" for temporary or permanent inability to issue wakeup. 39 * 40 * (For example, unconfigured USB devices can't issue wakeups.) 41 * 42 * Familiar examples of devices that can issue wakeup events include 43 * keyboards and mice (both PS2 and USB styles), power buttons, modems, 44 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events 45 * will wake the entire system from a suspend state; others may just 46 * wake up the device (if the system as a whole is already active). 47 * Some wakeup events use normal IRQ lines; other use special out 48 * of band signaling. 49 * 50 * It is the responsibility of device drivers to enable (or disable) 51 * wakeup signaling as part of changing device power states, respecting 52 * the policy choices provided through the driver model. 53 * 54 * Devices may not be able to generate wakeup events from all power 55 * states. Also, the events may be ignored in some configurations; 56 * for example, they might need help from other devices that aren't 57 * active, or which may have wakeup disabled. Some drivers rely on 58 * wakeup events internally (unless they are disabled), keeping 59 * their hardware in low power modes whenever they're unused. This 60 * saves runtime power, without requiring system-wide sleep states. 61 * 62 * async - Report/change current async suspend setting for the device 63 * 64 * Asynchronous suspend and resume of the device during system-wide power 65 * state transitions can be enabled by writing "enabled" to this file. 66 * Analogously, if "disabled" is written to this file, the device will be 67 * suspended and resumed synchronously. 68 * 69 * All devices have one of the following two values for power/async: 70 * 71 * + "enabled\n" to permit the asynchronous suspend/resume of the device; 72 * + "disabled\n" to forbid it; 73 * 74 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume 75 * of a device unless it is certain that all of the PM dependencies of the 76 * device are known to the PM core. However, for some devices this 77 * attribute is set to "enabled" by bus type code or device drivers and in 78 * that cases it should be safe to leave the default value. 79 * 80 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value 81 * 82 * Some drivers don't want to carry out a runtime suspend as soon as a 83 * device becomes idle; they want it always to remain idle for some period 84 * of time before suspending it. This period is the autosuspend_delay 85 * value (expressed in milliseconds) and it can be controlled by the user. 86 * If the value is negative then the device will never be runtime 87 * suspended. 88 * 89 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay 90 * value are used only if the driver calls pm_runtime_use_autosuspend(). 91 * 92 * wakeup_count - Report the number of wakeup events related to the device 93 */ 94 95 const char power_group_name[] = "power"; 96 EXPORT_SYMBOL_GPL(power_group_name); 97 98 static const char ctrl_auto[] = "auto"; 99 static const char ctrl_on[] = "on"; 100 101 static ssize_t control_show(struct device *dev, struct device_attribute *attr, 102 char *buf) 103 { 104 return sprintf(buf, "%s\n", 105 dev->power.runtime_auto ? ctrl_auto : ctrl_on); 106 } 107 108 static ssize_t control_store(struct device * dev, struct device_attribute *attr, 109 const char * buf, size_t n) 110 { 111 device_lock(dev); 112 if (sysfs_streq(buf, ctrl_auto)) 113 pm_runtime_allow(dev); 114 else if (sysfs_streq(buf, ctrl_on)) 115 pm_runtime_forbid(dev); 116 else 117 n = -EINVAL; 118 device_unlock(dev); 119 return n; 120 } 121 122 static DEVICE_ATTR_RW(control); 123 124 static ssize_t runtime_active_time_show(struct device *dev, 125 struct device_attribute *attr, char *buf) 126 { 127 int ret; 128 u64 tmp = pm_runtime_active_time(dev); 129 do_div(tmp, NSEC_PER_MSEC); 130 ret = sprintf(buf, "%llu\n", tmp); 131 return ret; 132 } 133 134 static DEVICE_ATTR_RO(runtime_active_time); 135 136 static ssize_t runtime_suspended_time_show(struct device *dev, 137 struct device_attribute *attr, char *buf) 138 { 139 int ret; 140 u64 tmp = pm_runtime_suspended_time(dev); 141 do_div(tmp, NSEC_PER_MSEC); 142 ret = sprintf(buf, "%llu\n", tmp); 143 return ret; 144 } 145 146 static DEVICE_ATTR_RO(runtime_suspended_time); 147 148 static ssize_t runtime_status_show(struct device *dev, 149 struct device_attribute *attr, char *buf) 150 { 151 const char *p; 152 153 if (dev->power.runtime_error) { 154 p = "error\n"; 155 } else if (dev->power.disable_depth) { 156 p = "unsupported\n"; 157 } else { 158 switch (dev->power.runtime_status) { 159 case RPM_SUSPENDED: 160 p = "suspended\n"; 161 break; 162 case RPM_SUSPENDING: 163 p = "suspending\n"; 164 break; 165 case RPM_RESUMING: 166 p = "resuming\n"; 167 break; 168 case RPM_ACTIVE: 169 p = "active\n"; 170 break; 171 default: 172 return -EIO; 173 } 174 } 175 return sprintf(buf, p); 176 } 177 178 static DEVICE_ATTR_RO(runtime_status); 179 180 static ssize_t autosuspend_delay_ms_show(struct device *dev, 181 struct device_attribute *attr, char *buf) 182 { 183 if (!dev->power.use_autosuspend) 184 return -EIO; 185 return sprintf(buf, "%d\n", dev->power.autosuspend_delay); 186 } 187 188 static ssize_t autosuspend_delay_ms_store(struct device *dev, 189 struct device_attribute *attr, const char *buf, size_t n) 190 { 191 long delay; 192 193 if (!dev->power.use_autosuspend) 194 return -EIO; 195 196 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) 197 return -EINVAL; 198 199 device_lock(dev); 200 pm_runtime_set_autosuspend_delay(dev, delay); 201 device_unlock(dev); 202 return n; 203 } 204 205 static DEVICE_ATTR_RW(autosuspend_delay_ms); 206 207 static ssize_t pm_qos_resume_latency_us_show(struct device *dev, 208 struct device_attribute *attr, 209 char *buf) 210 { 211 s32 value = dev_pm_qos_requested_resume_latency(dev); 212 213 if (value == 0) 214 return sprintf(buf, "n/a\n"); 215 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 216 value = 0; 217 218 return sprintf(buf, "%d\n", value); 219 } 220 221 static ssize_t pm_qos_resume_latency_us_store(struct device *dev, 222 struct device_attribute *attr, 223 const char *buf, size_t n) 224 { 225 s32 value; 226 int ret; 227 228 if (!kstrtos32(buf, 0, &value)) { 229 /* 230 * Prevent users from writing negative or "no constraint" values 231 * directly. 232 */ 233 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 234 return -EINVAL; 235 236 if (value == 0) 237 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; 238 } else if (sysfs_streq(buf, "n/a")) { 239 value = 0; 240 } else { 241 return -EINVAL; 242 } 243 244 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, 245 value); 246 return ret < 0 ? ret : n; 247 } 248 249 static DEVICE_ATTR_RW(pm_qos_resume_latency_us); 250 251 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, 252 struct device_attribute *attr, 253 char *buf) 254 { 255 s32 value = dev_pm_qos_get_user_latency_tolerance(dev); 256 257 if (value < 0) 258 return sprintf(buf, "auto\n"); 259 if (value == PM_QOS_LATENCY_ANY) 260 return sprintf(buf, "any\n"); 261 262 return sprintf(buf, "%d\n", value); 263 } 264 265 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, 266 struct device_attribute *attr, 267 const char *buf, size_t n) 268 { 269 s32 value; 270 int ret; 271 272 if (kstrtos32(buf, 0, &value) == 0) { 273 /* Users can't write negative values directly */ 274 if (value < 0) 275 return -EINVAL; 276 } else { 277 if (sysfs_streq(buf, "auto")) 278 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; 279 else if (sysfs_streq(buf, "any")) 280 value = PM_QOS_LATENCY_ANY; 281 else 282 return -EINVAL; 283 } 284 ret = dev_pm_qos_update_user_latency_tolerance(dev, value); 285 return ret < 0 ? ret : n; 286 } 287 288 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); 289 290 static ssize_t pm_qos_no_power_off_show(struct device *dev, 291 struct device_attribute *attr, 292 char *buf) 293 { 294 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) 295 & PM_QOS_FLAG_NO_POWER_OFF)); 296 } 297 298 static ssize_t pm_qos_no_power_off_store(struct device *dev, 299 struct device_attribute *attr, 300 const char *buf, size_t n) 301 { 302 int ret; 303 304 if (kstrtoint(buf, 0, &ret)) 305 return -EINVAL; 306 307 if (ret != 0 && ret != 1) 308 return -EINVAL; 309 310 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); 311 return ret < 0 ? ret : n; 312 } 313 314 static DEVICE_ATTR_RW(pm_qos_no_power_off); 315 316 #ifdef CONFIG_PM_SLEEP 317 static const char _enabled[] = "enabled"; 318 static const char _disabled[] = "disabled"; 319 320 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, 321 char *buf) 322 { 323 return sprintf(buf, "%s\n", device_can_wakeup(dev) 324 ? (device_may_wakeup(dev) ? _enabled : _disabled) 325 : ""); 326 } 327 328 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, 329 const char *buf, size_t n) 330 { 331 if (!device_can_wakeup(dev)) 332 return -EINVAL; 333 334 if (sysfs_streq(buf, _enabled)) 335 device_set_wakeup_enable(dev, 1); 336 else if (sysfs_streq(buf, _disabled)) 337 device_set_wakeup_enable(dev, 0); 338 else 339 return -EINVAL; 340 return n; 341 } 342 343 static DEVICE_ATTR_RW(wakeup); 344 345 static ssize_t wakeup_count_show(struct device *dev, 346 struct device_attribute *attr, char *buf) 347 { 348 unsigned long count = 0; 349 bool enabled = false; 350 351 spin_lock_irq(&dev->power.lock); 352 if (dev->power.wakeup) { 353 count = dev->power.wakeup->wakeup_count; 354 enabled = true; 355 } 356 spin_unlock_irq(&dev->power.lock); 357 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 358 } 359 360 static DEVICE_ATTR_RO(wakeup_count); 361 362 static ssize_t wakeup_active_count_show(struct device *dev, 363 struct device_attribute *attr, 364 char *buf) 365 { 366 unsigned long count = 0; 367 bool enabled = false; 368 369 spin_lock_irq(&dev->power.lock); 370 if (dev->power.wakeup) { 371 count = dev->power.wakeup->active_count; 372 enabled = true; 373 } 374 spin_unlock_irq(&dev->power.lock); 375 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 376 } 377 378 static DEVICE_ATTR_RO(wakeup_active_count); 379 380 static ssize_t wakeup_abort_count_show(struct device *dev, 381 struct device_attribute *attr, 382 char *buf) 383 { 384 unsigned long count = 0; 385 bool enabled = false; 386 387 spin_lock_irq(&dev->power.lock); 388 if (dev->power.wakeup) { 389 count = dev->power.wakeup->wakeup_count; 390 enabled = true; 391 } 392 spin_unlock_irq(&dev->power.lock); 393 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 394 } 395 396 static DEVICE_ATTR_RO(wakeup_abort_count); 397 398 static ssize_t wakeup_expire_count_show(struct device *dev, 399 struct device_attribute *attr, 400 char *buf) 401 { 402 unsigned long count = 0; 403 bool enabled = false; 404 405 spin_lock_irq(&dev->power.lock); 406 if (dev->power.wakeup) { 407 count = dev->power.wakeup->expire_count; 408 enabled = true; 409 } 410 spin_unlock_irq(&dev->power.lock); 411 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 412 } 413 414 static DEVICE_ATTR_RO(wakeup_expire_count); 415 416 static ssize_t wakeup_active_show(struct device *dev, 417 struct device_attribute *attr, char *buf) 418 { 419 unsigned int active = 0; 420 bool enabled = false; 421 422 spin_lock_irq(&dev->power.lock); 423 if (dev->power.wakeup) { 424 active = dev->power.wakeup->active; 425 enabled = true; 426 } 427 spin_unlock_irq(&dev->power.lock); 428 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); 429 } 430 431 static DEVICE_ATTR_RO(wakeup_active); 432 433 static ssize_t wakeup_total_time_ms_show(struct device *dev, 434 struct device_attribute *attr, 435 char *buf) 436 { 437 s64 msec = 0; 438 bool enabled = false; 439 440 spin_lock_irq(&dev->power.lock); 441 if (dev->power.wakeup) { 442 msec = ktime_to_ms(dev->power.wakeup->total_time); 443 enabled = true; 444 } 445 spin_unlock_irq(&dev->power.lock); 446 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 447 } 448 449 static DEVICE_ATTR_RO(wakeup_total_time_ms); 450 451 static ssize_t wakeup_max_time_ms_show(struct device *dev, 452 struct device_attribute *attr, char *buf) 453 { 454 s64 msec = 0; 455 bool enabled = false; 456 457 spin_lock_irq(&dev->power.lock); 458 if (dev->power.wakeup) { 459 msec = ktime_to_ms(dev->power.wakeup->max_time); 460 enabled = true; 461 } 462 spin_unlock_irq(&dev->power.lock); 463 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 464 } 465 466 static DEVICE_ATTR_RO(wakeup_max_time_ms); 467 468 static ssize_t wakeup_last_time_ms_show(struct device *dev, 469 struct device_attribute *attr, 470 char *buf) 471 { 472 s64 msec = 0; 473 bool enabled = false; 474 475 spin_lock_irq(&dev->power.lock); 476 if (dev->power.wakeup) { 477 msec = ktime_to_ms(dev->power.wakeup->last_time); 478 enabled = true; 479 } 480 spin_unlock_irq(&dev->power.lock); 481 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 482 } 483 484 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, 485 kgid_t kgid) 486 { 487 if (dev->power.wakeup && dev->power.wakeup->dev) 488 return device_change_owner(dev->power.wakeup->dev, kuid, kgid); 489 return 0; 490 } 491 492 static DEVICE_ATTR_RO(wakeup_last_time_ms); 493 494 #ifdef CONFIG_PM_AUTOSLEEP 495 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, 496 struct device_attribute *attr, 497 char *buf) 498 { 499 s64 msec = 0; 500 bool enabled = false; 501 502 spin_lock_irq(&dev->power.lock); 503 if (dev->power.wakeup) { 504 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); 505 enabled = true; 506 } 507 spin_unlock_irq(&dev->power.lock); 508 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 509 } 510 511 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); 512 #endif /* CONFIG_PM_AUTOSLEEP */ 513 #else /* CONFIG_PM_SLEEP */ 514 static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, 515 kgid_t kgid) 516 { 517 return 0; 518 } 519 #endif 520 521 #ifdef CONFIG_PM_ADVANCED_DEBUG 522 static ssize_t runtime_usage_show(struct device *dev, 523 struct device_attribute *attr, char *buf) 524 { 525 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); 526 } 527 static DEVICE_ATTR_RO(runtime_usage); 528 529 static ssize_t runtime_active_kids_show(struct device *dev, 530 struct device_attribute *attr, 531 char *buf) 532 { 533 return sprintf(buf, "%d\n", dev->power.ignore_children ? 534 0 : atomic_read(&dev->power.child_count)); 535 } 536 static DEVICE_ATTR_RO(runtime_active_kids); 537 538 static ssize_t runtime_enabled_show(struct device *dev, 539 struct device_attribute *attr, char *buf) 540 { 541 if (dev->power.disable_depth && (dev->power.runtime_auto == false)) 542 return sprintf(buf, "disabled & forbidden\n"); 543 if (dev->power.disable_depth) 544 return sprintf(buf, "disabled\n"); 545 if (dev->power.runtime_auto == false) 546 return sprintf(buf, "forbidden\n"); 547 return sprintf(buf, "enabled\n"); 548 } 549 static DEVICE_ATTR_RO(runtime_enabled); 550 551 #ifdef CONFIG_PM_SLEEP 552 static ssize_t async_show(struct device *dev, struct device_attribute *attr, 553 char *buf) 554 { 555 return sprintf(buf, "%s\n", 556 device_async_suspend_enabled(dev) ? 557 _enabled : _disabled); 558 } 559 560 static ssize_t async_store(struct device *dev, struct device_attribute *attr, 561 const char *buf, size_t n) 562 { 563 if (sysfs_streq(buf, _enabled)) 564 device_enable_async_suspend(dev); 565 else if (sysfs_streq(buf, _disabled)) 566 device_disable_async_suspend(dev); 567 else 568 return -EINVAL; 569 return n; 570 } 571 572 static DEVICE_ATTR_RW(async); 573 574 #endif /* CONFIG_PM_SLEEP */ 575 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 576 577 static struct attribute *power_attrs[] = { 578 #ifdef CONFIG_PM_ADVANCED_DEBUG 579 #ifdef CONFIG_PM_SLEEP 580 &dev_attr_async.attr, 581 #endif 582 &dev_attr_runtime_status.attr, 583 &dev_attr_runtime_usage.attr, 584 &dev_attr_runtime_active_kids.attr, 585 &dev_attr_runtime_enabled.attr, 586 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 587 NULL, 588 }; 589 static const struct attribute_group pm_attr_group = { 590 .name = power_group_name, 591 .attrs = power_attrs, 592 }; 593 594 static struct attribute *wakeup_attrs[] = { 595 #ifdef CONFIG_PM_SLEEP 596 &dev_attr_wakeup.attr, 597 &dev_attr_wakeup_count.attr, 598 &dev_attr_wakeup_active_count.attr, 599 &dev_attr_wakeup_abort_count.attr, 600 &dev_attr_wakeup_expire_count.attr, 601 &dev_attr_wakeup_active.attr, 602 &dev_attr_wakeup_total_time_ms.attr, 603 &dev_attr_wakeup_max_time_ms.attr, 604 &dev_attr_wakeup_last_time_ms.attr, 605 #ifdef CONFIG_PM_AUTOSLEEP 606 &dev_attr_wakeup_prevent_sleep_time_ms.attr, 607 #endif 608 #endif 609 NULL, 610 }; 611 static const struct attribute_group pm_wakeup_attr_group = { 612 .name = power_group_name, 613 .attrs = wakeup_attrs, 614 }; 615 616 static struct attribute *runtime_attrs[] = { 617 #ifndef CONFIG_PM_ADVANCED_DEBUG 618 &dev_attr_runtime_status.attr, 619 #endif 620 &dev_attr_control.attr, 621 &dev_attr_runtime_suspended_time.attr, 622 &dev_attr_runtime_active_time.attr, 623 &dev_attr_autosuspend_delay_ms.attr, 624 NULL, 625 }; 626 static const struct attribute_group pm_runtime_attr_group = { 627 .name = power_group_name, 628 .attrs = runtime_attrs, 629 }; 630 631 static struct attribute *pm_qos_resume_latency_attrs[] = { 632 &dev_attr_pm_qos_resume_latency_us.attr, 633 NULL, 634 }; 635 static const struct attribute_group pm_qos_resume_latency_attr_group = { 636 .name = power_group_name, 637 .attrs = pm_qos_resume_latency_attrs, 638 }; 639 640 static struct attribute *pm_qos_latency_tolerance_attrs[] = { 641 &dev_attr_pm_qos_latency_tolerance_us.attr, 642 NULL, 643 }; 644 static const struct attribute_group pm_qos_latency_tolerance_attr_group = { 645 .name = power_group_name, 646 .attrs = pm_qos_latency_tolerance_attrs, 647 }; 648 649 static struct attribute *pm_qos_flags_attrs[] = { 650 &dev_attr_pm_qos_no_power_off.attr, 651 NULL, 652 }; 653 static const struct attribute_group pm_qos_flags_attr_group = { 654 .name = power_group_name, 655 .attrs = pm_qos_flags_attrs, 656 }; 657 658 int dpm_sysfs_add(struct device *dev) 659 { 660 int rc; 661 662 /* No need to create PM sysfs if explicitly disabled. */ 663 if (device_pm_not_required(dev)) 664 return 0; 665 666 rc = sysfs_create_group(&dev->kobj, &pm_attr_group); 667 if (rc) 668 return rc; 669 670 if (!pm_runtime_has_no_callbacks(dev)) { 671 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); 672 if (rc) 673 goto err_out; 674 } 675 if (device_can_wakeup(dev)) { 676 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 677 if (rc) 678 goto err_runtime; 679 } 680 if (dev->power.set_latency_tolerance) { 681 rc = sysfs_merge_group(&dev->kobj, 682 &pm_qos_latency_tolerance_attr_group); 683 if (rc) 684 goto err_wakeup; 685 } 686 rc = pm_wakeup_source_sysfs_add(dev); 687 if (rc) 688 goto err_latency; 689 return 0; 690 691 err_latency: 692 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 693 err_wakeup: 694 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 695 err_runtime: 696 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 697 err_out: 698 sysfs_remove_group(&dev->kobj, &pm_attr_group); 699 return rc; 700 } 701 702 int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) 703 { 704 int rc; 705 706 if (device_pm_not_required(dev)) 707 return 0; 708 709 rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid); 710 if (rc) 711 return rc; 712 713 if (!pm_runtime_has_no_callbacks(dev)) { 714 rc = sysfs_group_change_owner( 715 &dev->kobj, &pm_runtime_attr_group, kuid, kgid); 716 if (rc) 717 return rc; 718 } 719 720 if (device_can_wakeup(dev)) { 721 rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group, 722 kuid, kgid); 723 if (rc) 724 return rc; 725 726 rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid); 727 if (rc) 728 return rc; 729 } 730 731 if (dev->power.set_latency_tolerance) { 732 rc = sysfs_group_change_owner( 733 &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid, 734 kgid); 735 if (rc) 736 return rc; 737 } 738 return 0; 739 } 740 741 int wakeup_sysfs_add(struct device *dev) 742 { 743 int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 744 745 if (!ret) 746 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 747 748 return ret; 749 } 750 751 void wakeup_sysfs_remove(struct device *dev) 752 { 753 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 754 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 755 } 756 757 int pm_qos_sysfs_add_resume_latency(struct device *dev) 758 { 759 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 760 } 761 762 void pm_qos_sysfs_remove_resume_latency(struct device *dev) 763 { 764 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 765 } 766 767 int pm_qos_sysfs_add_flags(struct device *dev) 768 { 769 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); 770 } 771 772 void pm_qos_sysfs_remove_flags(struct device *dev) 773 { 774 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 775 } 776 777 int pm_qos_sysfs_add_latency_tolerance(struct device *dev) 778 { 779 return sysfs_merge_group(&dev->kobj, 780 &pm_qos_latency_tolerance_attr_group); 781 } 782 783 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) 784 { 785 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 786 } 787 788 void rpm_sysfs_remove(struct device *dev) 789 { 790 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 791 } 792 793 void dpm_sysfs_remove(struct device *dev) 794 { 795 if (device_pm_not_required(dev)) 796 return; 797 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 798 dev_pm_qos_constraints_destroy(dev); 799 rpm_sysfs_remove(dev); 800 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 801 sysfs_remove_group(&dev->kobj, &pm_attr_group); 802 } 803