1 // SPDX-License-Identifier: GPL-2.0 2 /* sysfs entries for device PM */ 3 #include <linux/device.h> 4 #include <linux/string.h> 5 #include <linux/export.h> 6 #include <linux/pm_qos.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/atomic.h> 9 #include <linux/jiffies.h> 10 #include "power.h" 11 12 /* 13 * control - Report/change current runtime PM setting of the device 14 * 15 * Runtime power management of a device can be blocked with the help of 16 * this attribute. All devices have one of the following two values for 17 * the power/control file: 18 * 19 * + "auto\n" to allow the device to be power managed at run time; 20 * + "on\n" to prevent the device from being power managed at run time; 21 * 22 * The default for all devices is "auto", which means that devices may be 23 * subject to automatic power management, depending on their drivers. 24 * Changing this attribute to "on" prevents the driver from power managing 25 * the device at run time. Doing that while the device is suspended causes 26 * it to be woken up. 27 * 28 * wakeup - Report/change current wakeup option for device 29 * 30 * Some devices support "wakeup" events, which are hardware signals 31 * used to activate devices from suspended or low power states. Such 32 * devices have one of three values for the sysfs power/wakeup file: 33 * 34 * + "enabled\n" to issue the events; 35 * + "disabled\n" not to do so; or 36 * + "\n" for temporary or permanent inability to issue wakeup. 37 * 38 * (For example, unconfigured USB devices can't issue wakeups.) 39 * 40 * Familiar examples of devices that can issue wakeup events include 41 * keyboards and mice (both PS2 and USB styles), power buttons, modems, 42 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events 43 * will wake the entire system from a suspend state; others may just 44 * wake up the device (if the system as a whole is already active). 45 * Some wakeup events use normal IRQ lines; other use special out 46 * of band signaling. 47 * 48 * It is the responsibility of device drivers to enable (or disable) 49 * wakeup signaling as part of changing device power states, respecting 50 * the policy choices provided through the driver model. 51 * 52 * Devices may not be able to generate wakeup events from all power 53 * states. Also, the events may be ignored in some configurations; 54 * for example, they might need help from other devices that aren't 55 * active, or which may have wakeup disabled. Some drivers rely on 56 * wakeup events internally (unless they are disabled), keeping 57 * their hardware in low power modes whenever they're unused. This 58 * saves runtime power, without requiring system-wide sleep states. 59 * 60 * async - Report/change current async suspend setting for the device 61 * 62 * Asynchronous suspend and resume of the device during system-wide power 63 * state transitions can be enabled by writing "enabled" to this file. 64 * Analogously, if "disabled" is written to this file, the device will be 65 * suspended and resumed synchronously. 66 * 67 * All devices have one of the following two values for power/async: 68 * 69 * + "enabled\n" to permit the asynchronous suspend/resume of the device; 70 * + "disabled\n" to forbid it; 71 * 72 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume 73 * of a device unless it is certain that all of the PM dependencies of the 74 * device are known to the PM core. However, for some devices this 75 * attribute is set to "enabled" by bus type code or device drivers and in 76 * that cases it should be safe to leave the default value. 77 * 78 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value 79 * 80 * Some drivers don't want to carry out a runtime suspend as soon as a 81 * device becomes idle; they want it always to remain idle for some period 82 * of time before suspending it. This period is the autosuspend_delay 83 * value (expressed in milliseconds) and it can be controlled by the user. 84 * If the value is negative then the device will never be runtime 85 * suspended. 86 * 87 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay 88 * value are used only if the driver calls pm_runtime_use_autosuspend(). 89 * 90 * wakeup_count - Report the number of wakeup events related to the device 91 */ 92 93 const char power_group_name[] = "power"; 94 EXPORT_SYMBOL_GPL(power_group_name); 95 96 static const char ctrl_auto[] = "auto"; 97 static const char ctrl_on[] = "on"; 98 99 static ssize_t control_show(struct device *dev, struct device_attribute *attr, 100 char *buf) 101 { 102 return sprintf(buf, "%s\n", 103 dev->power.runtime_auto ? ctrl_auto : ctrl_on); 104 } 105 106 static ssize_t control_store(struct device * dev, struct device_attribute *attr, 107 const char * buf, size_t n) 108 { 109 device_lock(dev); 110 if (sysfs_streq(buf, ctrl_auto)) 111 pm_runtime_allow(dev); 112 else if (sysfs_streq(buf, ctrl_on)) 113 pm_runtime_forbid(dev); 114 else 115 n = -EINVAL; 116 device_unlock(dev); 117 return n; 118 } 119 120 static DEVICE_ATTR_RW(control); 121 122 static ssize_t runtime_active_time_show(struct device *dev, 123 struct device_attribute *attr, char *buf) 124 { 125 int ret; 126 u64 tmp = pm_runtime_active_time(dev); 127 do_div(tmp, NSEC_PER_MSEC); 128 ret = sprintf(buf, "%llu\n", tmp); 129 return ret; 130 } 131 132 static DEVICE_ATTR_RO(runtime_active_time); 133 134 static ssize_t runtime_suspended_time_show(struct device *dev, 135 struct device_attribute *attr, char *buf) 136 { 137 int ret; 138 u64 tmp = pm_runtime_suspended_time(dev); 139 do_div(tmp, NSEC_PER_MSEC); 140 ret = sprintf(buf, "%llu\n", tmp); 141 return ret; 142 } 143 144 static DEVICE_ATTR_RO(runtime_suspended_time); 145 146 static ssize_t runtime_status_show(struct device *dev, 147 struct device_attribute *attr, char *buf) 148 { 149 const char *p; 150 151 if (dev->power.runtime_error) { 152 p = "error\n"; 153 } else if (dev->power.disable_depth) { 154 p = "unsupported\n"; 155 } else { 156 switch (dev->power.runtime_status) { 157 case RPM_SUSPENDED: 158 p = "suspended\n"; 159 break; 160 case RPM_SUSPENDING: 161 p = "suspending\n"; 162 break; 163 case RPM_RESUMING: 164 p = "resuming\n"; 165 break; 166 case RPM_ACTIVE: 167 p = "active\n"; 168 break; 169 default: 170 return -EIO; 171 } 172 } 173 return sprintf(buf, p); 174 } 175 176 static DEVICE_ATTR_RO(runtime_status); 177 178 static ssize_t autosuspend_delay_ms_show(struct device *dev, 179 struct device_attribute *attr, char *buf) 180 { 181 if (!dev->power.use_autosuspend) 182 return -EIO; 183 return sprintf(buf, "%d\n", dev->power.autosuspend_delay); 184 } 185 186 static ssize_t autosuspend_delay_ms_store(struct device *dev, 187 struct device_attribute *attr, const char *buf, size_t n) 188 { 189 long delay; 190 191 if (!dev->power.use_autosuspend) 192 return -EIO; 193 194 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) 195 return -EINVAL; 196 197 device_lock(dev); 198 pm_runtime_set_autosuspend_delay(dev, delay); 199 device_unlock(dev); 200 return n; 201 } 202 203 static DEVICE_ATTR_RW(autosuspend_delay_ms); 204 205 static ssize_t pm_qos_resume_latency_us_show(struct device *dev, 206 struct device_attribute *attr, 207 char *buf) 208 { 209 s32 value = dev_pm_qos_requested_resume_latency(dev); 210 211 if (value == 0) 212 return sprintf(buf, "n/a\n"); 213 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 214 value = 0; 215 216 return sprintf(buf, "%d\n", value); 217 } 218 219 static ssize_t pm_qos_resume_latency_us_store(struct device *dev, 220 struct device_attribute *attr, 221 const char *buf, size_t n) 222 { 223 s32 value; 224 int ret; 225 226 if (!kstrtos32(buf, 0, &value)) { 227 /* 228 * Prevent users from writing negative or "no constraint" values 229 * directly. 230 */ 231 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) 232 return -EINVAL; 233 234 if (value == 0) 235 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; 236 } else if (sysfs_streq(buf, "n/a")) { 237 value = 0; 238 } else { 239 return -EINVAL; 240 } 241 242 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, 243 value); 244 return ret < 0 ? ret : n; 245 } 246 247 static DEVICE_ATTR_RW(pm_qos_resume_latency_us); 248 249 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, 250 struct device_attribute *attr, 251 char *buf) 252 { 253 s32 value = dev_pm_qos_get_user_latency_tolerance(dev); 254 255 if (value < 0) 256 return sprintf(buf, "auto\n"); 257 if (value == PM_QOS_LATENCY_ANY) 258 return sprintf(buf, "any\n"); 259 260 return sprintf(buf, "%d\n", value); 261 } 262 263 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, 264 struct device_attribute *attr, 265 const char *buf, size_t n) 266 { 267 s32 value; 268 int ret; 269 270 if (kstrtos32(buf, 0, &value) == 0) { 271 /* Users can't write negative values directly */ 272 if (value < 0) 273 return -EINVAL; 274 } else { 275 if (sysfs_streq(buf, "auto")) 276 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; 277 else if (sysfs_streq(buf, "any")) 278 value = PM_QOS_LATENCY_ANY; 279 else 280 return -EINVAL; 281 } 282 ret = dev_pm_qos_update_user_latency_tolerance(dev, value); 283 return ret < 0 ? ret : n; 284 } 285 286 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); 287 288 static ssize_t pm_qos_no_power_off_show(struct device *dev, 289 struct device_attribute *attr, 290 char *buf) 291 { 292 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) 293 & PM_QOS_FLAG_NO_POWER_OFF)); 294 } 295 296 static ssize_t pm_qos_no_power_off_store(struct device *dev, 297 struct device_attribute *attr, 298 const char *buf, size_t n) 299 { 300 int ret; 301 302 if (kstrtoint(buf, 0, &ret)) 303 return -EINVAL; 304 305 if (ret != 0 && ret != 1) 306 return -EINVAL; 307 308 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); 309 return ret < 0 ? ret : n; 310 } 311 312 static DEVICE_ATTR_RW(pm_qos_no_power_off); 313 314 #ifdef CONFIG_PM_SLEEP 315 static const char _enabled[] = "enabled"; 316 static const char _disabled[] = "disabled"; 317 318 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, 319 char *buf) 320 { 321 return sprintf(buf, "%s\n", device_can_wakeup(dev) 322 ? (device_may_wakeup(dev) ? _enabled : _disabled) 323 : ""); 324 } 325 326 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, 327 const char *buf, size_t n) 328 { 329 if (!device_can_wakeup(dev)) 330 return -EINVAL; 331 332 if (sysfs_streq(buf, _enabled)) 333 device_set_wakeup_enable(dev, 1); 334 else if (sysfs_streq(buf, _disabled)) 335 device_set_wakeup_enable(dev, 0); 336 else 337 return -EINVAL; 338 return n; 339 } 340 341 static DEVICE_ATTR_RW(wakeup); 342 343 static ssize_t wakeup_count_show(struct device *dev, 344 struct device_attribute *attr, char *buf) 345 { 346 unsigned long count = 0; 347 bool enabled = false; 348 349 spin_lock_irq(&dev->power.lock); 350 if (dev->power.wakeup) { 351 count = dev->power.wakeup->wakeup_count; 352 enabled = true; 353 } 354 spin_unlock_irq(&dev->power.lock); 355 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 356 } 357 358 static DEVICE_ATTR_RO(wakeup_count); 359 360 static ssize_t wakeup_active_count_show(struct device *dev, 361 struct device_attribute *attr, 362 char *buf) 363 { 364 unsigned long count = 0; 365 bool enabled = false; 366 367 spin_lock_irq(&dev->power.lock); 368 if (dev->power.wakeup) { 369 count = dev->power.wakeup->active_count; 370 enabled = true; 371 } 372 spin_unlock_irq(&dev->power.lock); 373 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 374 } 375 376 static DEVICE_ATTR_RO(wakeup_active_count); 377 378 static ssize_t wakeup_abort_count_show(struct device *dev, 379 struct device_attribute *attr, 380 char *buf) 381 { 382 unsigned long count = 0; 383 bool enabled = false; 384 385 spin_lock_irq(&dev->power.lock); 386 if (dev->power.wakeup) { 387 count = dev->power.wakeup->wakeup_count; 388 enabled = true; 389 } 390 spin_unlock_irq(&dev->power.lock); 391 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 392 } 393 394 static DEVICE_ATTR_RO(wakeup_abort_count); 395 396 static ssize_t wakeup_expire_count_show(struct device *dev, 397 struct device_attribute *attr, 398 char *buf) 399 { 400 unsigned long count = 0; 401 bool enabled = false; 402 403 spin_lock_irq(&dev->power.lock); 404 if (dev->power.wakeup) { 405 count = dev->power.wakeup->expire_count; 406 enabled = true; 407 } 408 spin_unlock_irq(&dev->power.lock); 409 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); 410 } 411 412 static DEVICE_ATTR_RO(wakeup_expire_count); 413 414 static ssize_t wakeup_active_show(struct device *dev, 415 struct device_attribute *attr, char *buf) 416 { 417 unsigned int active = 0; 418 bool enabled = false; 419 420 spin_lock_irq(&dev->power.lock); 421 if (dev->power.wakeup) { 422 active = dev->power.wakeup->active; 423 enabled = true; 424 } 425 spin_unlock_irq(&dev->power.lock); 426 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); 427 } 428 429 static DEVICE_ATTR_RO(wakeup_active); 430 431 static ssize_t wakeup_total_time_ms_show(struct device *dev, 432 struct device_attribute *attr, 433 char *buf) 434 { 435 s64 msec = 0; 436 bool enabled = false; 437 438 spin_lock_irq(&dev->power.lock); 439 if (dev->power.wakeup) { 440 msec = ktime_to_ms(dev->power.wakeup->total_time); 441 enabled = true; 442 } 443 spin_unlock_irq(&dev->power.lock); 444 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 445 } 446 447 static DEVICE_ATTR_RO(wakeup_total_time_ms); 448 449 static ssize_t wakeup_max_time_ms_show(struct device *dev, 450 struct device_attribute *attr, char *buf) 451 { 452 s64 msec = 0; 453 bool enabled = false; 454 455 spin_lock_irq(&dev->power.lock); 456 if (dev->power.wakeup) { 457 msec = ktime_to_ms(dev->power.wakeup->max_time); 458 enabled = true; 459 } 460 spin_unlock_irq(&dev->power.lock); 461 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 462 } 463 464 static DEVICE_ATTR_RO(wakeup_max_time_ms); 465 466 static ssize_t wakeup_last_time_ms_show(struct device *dev, 467 struct device_attribute *attr, 468 char *buf) 469 { 470 s64 msec = 0; 471 bool enabled = false; 472 473 spin_lock_irq(&dev->power.lock); 474 if (dev->power.wakeup) { 475 msec = ktime_to_ms(dev->power.wakeup->last_time); 476 enabled = true; 477 } 478 spin_unlock_irq(&dev->power.lock); 479 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 480 } 481 482 static DEVICE_ATTR_RO(wakeup_last_time_ms); 483 484 #ifdef CONFIG_PM_AUTOSLEEP 485 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, 486 struct device_attribute *attr, 487 char *buf) 488 { 489 s64 msec = 0; 490 bool enabled = false; 491 492 spin_lock_irq(&dev->power.lock); 493 if (dev->power.wakeup) { 494 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); 495 enabled = true; 496 } 497 spin_unlock_irq(&dev->power.lock); 498 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); 499 } 500 501 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); 502 #endif /* CONFIG_PM_AUTOSLEEP */ 503 #endif /* CONFIG_PM_SLEEP */ 504 505 #ifdef CONFIG_PM_ADVANCED_DEBUG 506 static ssize_t runtime_usage_show(struct device *dev, 507 struct device_attribute *attr, char *buf) 508 { 509 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); 510 } 511 static DEVICE_ATTR_RO(runtime_usage); 512 513 static ssize_t runtime_active_kids_show(struct device *dev, 514 struct device_attribute *attr, 515 char *buf) 516 { 517 return sprintf(buf, "%d\n", dev->power.ignore_children ? 518 0 : atomic_read(&dev->power.child_count)); 519 } 520 static DEVICE_ATTR_RO(runtime_active_kids); 521 522 static ssize_t runtime_enabled_show(struct device *dev, 523 struct device_attribute *attr, char *buf) 524 { 525 if (dev->power.disable_depth && (dev->power.runtime_auto == false)) 526 return sprintf(buf, "disabled & forbidden\n"); 527 if (dev->power.disable_depth) 528 return sprintf(buf, "disabled\n"); 529 if (dev->power.runtime_auto == false) 530 return sprintf(buf, "forbidden\n"); 531 return sprintf(buf, "enabled\n"); 532 } 533 static DEVICE_ATTR_RO(runtime_enabled); 534 535 #ifdef CONFIG_PM_SLEEP 536 static ssize_t async_show(struct device *dev, struct device_attribute *attr, 537 char *buf) 538 { 539 return sprintf(buf, "%s\n", 540 device_async_suspend_enabled(dev) ? 541 _enabled : _disabled); 542 } 543 544 static ssize_t async_store(struct device *dev, struct device_attribute *attr, 545 const char *buf, size_t n) 546 { 547 if (sysfs_streq(buf, _enabled)) 548 device_enable_async_suspend(dev); 549 else if (sysfs_streq(buf, _disabled)) 550 device_disable_async_suspend(dev); 551 else 552 return -EINVAL; 553 return n; 554 } 555 556 static DEVICE_ATTR_RW(async); 557 558 #endif /* CONFIG_PM_SLEEP */ 559 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 560 561 static struct attribute *power_attrs[] = { 562 #ifdef CONFIG_PM_ADVANCED_DEBUG 563 #ifdef CONFIG_PM_SLEEP 564 &dev_attr_async.attr, 565 #endif 566 &dev_attr_runtime_status.attr, 567 &dev_attr_runtime_usage.attr, 568 &dev_attr_runtime_active_kids.attr, 569 &dev_attr_runtime_enabled.attr, 570 #endif /* CONFIG_PM_ADVANCED_DEBUG */ 571 NULL, 572 }; 573 static const struct attribute_group pm_attr_group = { 574 .name = power_group_name, 575 .attrs = power_attrs, 576 }; 577 578 static struct attribute *wakeup_attrs[] = { 579 #ifdef CONFIG_PM_SLEEP 580 &dev_attr_wakeup.attr, 581 &dev_attr_wakeup_count.attr, 582 &dev_attr_wakeup_active_count.attr, 583 &dev_attr_wakeup_abort_count.attr, 584 &dev_attr_wakeup_expire_count.attr, 585 &dev_attr_wakeup_active.attr, 586 &dev_attr_wakeup_total_time_ms.attr, 587 &dev_attr_wakeup_max_time_ms.attr, 588 &dev_attr_wakeup_last_time_ms.attr, 589 #ifdef CONFIG_PM_AUTOSLEEP 590 &dev_attr_wakeup_prevent_sleep_time_ms.attr, 591 #endif 592 #endif 593 NULL, 594 }; 595 static const struct attribute_group pm_wakeup_attr_group = { 596 .name = power_group_name, 597 .attrs = wakeup_attrs, 598 }; 599 600 static struct attribute *runtime_attrs[] = { 601 #ifndef CONFIG_PM_ADVANCED_DEBUG 602 &dev_attr_runtime_status.attr, 603 #endif 604 &dev_attr_control.attr, 605 &dev_attr_runtime_suspended_time.attr, 606 &dev_attr_runtime_active_time.attr, 607 &dev_attr_autosuspend_delay_ms.attr, 608 NULL, 609 }; 610 static const struct attribute_group pm_runtime_attr_group = { 611 .name = power_group_name, 612 .attrs = runtime_attrs, 613 }; 614 615 static struct attribute *pm_qos_resume_latency_attrs[] = { 616 &dev_attr_pm_qos_resume_latency_us.attr, 617 NULL, 618 }; 619 static const struct attribute_group pm_qos_resume_latency_attr_group = { 620 .name = power_group_name, 621 .attrs = pm_qos_resume_latency_attrs, 622 }; 623 624 static struct attribute *pm_qos_latency_tolerance_attrs[] = { 625 &dev_attr_pm_qos_latency_tolerance_us.attr, 626 NULL, 627 }; 628 static const struct attribute_group pm_qos_latency_tolerance_attr_group = { 629 .name = power_group_name, 630 .attrs = pm_qos_latency_tolerance_attrs, 631 }; 632 633 static struct attribute *pm_qos_flags_attrs[] = { 634 &dev_attr_pm_qos_no_power_off.attr, 635 NULL, 636 }; 637 static const struct attribute_group pm_qos_flags_attr_group = { 638 .name = power_group_name, 639 .attrs = pm_qos_flags_attrs, 640 }; 641 642 int dpm_sysfs_add(struct device *dev) 643 { 644 int rc; 645 646 /* No need to create PM sysfs if explicitly disabled. */ 647 if (device_pm_not_required(dev)) 648 return 0; 649 650 rc = sysfs_create_group(&dev->kobj, &pm_attr_group); 651 if (rc) 652 return rc; 653 654 if (pm_runtime_callbacks_present(dev)) { 655 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); 656 if (rc) 657 goto err_out; 658 } 659 if (device_can_wakeup(dev)) { 660 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 661 if (rc) 662 goto err_runtime; 663 } 664 if (dev->power.set_latency_tolerance) { 665 rc = sysfs_merge_group(&dev->kobj, 666 &pm_qos_latency_tolerance_attr_group); 667 if (rc) 668 goto err_wakeup; 669 } 670 return 0; 671 672 err_wakeup: 673 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 674 err_runtime: 675 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 676 err_out: 677 sysfs_remove_group(&dev->kobj, &pm_attr_group); 678 return rc; 679 } 680 681 int wakeup_sysfs_add(struct device *dev) 682 { 683 return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); 684 } 685 686 void wakeup_sysfs_remove(struct device *dev) 687 { 688 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 689 } 690 691 int pm_qos_sysfs_add_resume_latency(struct device *dev) 692 { 693 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 694 } 695 696 void pm_qos_sysfs_remove_resume_latency(struct device *dev) 697 { 698 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); 699 } 700 701 int pm_qos_sysfs_add_flags(struct device *dev) 702 { 703 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); 704 } 705 706 void pm_qos_sysfs_remove_flags(struct device *dev) 707 { 708 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); 709 } 710 711 int pm_qos_sysfs_add_latency_tolerance(struct device *dev) 712 { 713 return sysfs_merge_group(&dev->kobj, 714 &pm_qos_latency_tolerance_attr_group); 715 } 716 717 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) 718 { 719 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 720 } 721 722 void rpm_sysfs_remove(struct device *dev) 723 { 724 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 725 } 726 727 void dpm_sysfs_remove(struct device *dev) 728 { 729 if (device_pm_not_required(dev)) 730 return; 731 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); 732 dev_pm_qos_constraints_destroy(dev); 733 rpm_sysfs_remove(dev); 734 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 735 sysfs_remove_group(&dev->kobj, &pm_attr_group); 736 } 737