1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/smp.h> 19 #include <linux/device.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 /* Protection for the above */ 27 static DEFINE_RAW_SPINLOCK(clockevents_lock); 28 /* Protection for unbind operations */ 29 static DEFINE_MUTEX(clockevents_mutex); 30 31 struct ce_unbind { 32 struct clock_event_device *ce; 33 int res; 34 }; 35 36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 37 bool ismax) 38 { 39 u64 clc = (u64) latch << evt->shift; 40 u64 rnd; 41 42 if (unlikely(!evt->mult)) { 43 evt->mult = 1; 44 WARN_ON(1); 45 } 46 rnd = (u64) evt->mult - 1; 47 48 /* 49 * Upper bound sanity check. If the backwards conversion is 50 * not equal latch, we know that the above shift overflowed. 51 */ 52 if ((clc >> evt->shift) != (u64)latch) 53 clc = ~0ULL; 54 55 /* 56 * Scaled math oddities: 57 * 58 * For mult <= (1 << shift) we can safely add mult - 1 to 59 * prevent integer rounding loss. So the backwards conversion 60 * from nsec to device ticks will be correct. 61 * 62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we 63 * need to be careful. Adding mult - 1 will result in a value 64 * which when converted back to device ticks can be larger 65 * than latch by up to (mult - 1) >> shift. For the min_delta 66 * calculation we still want to apply this in order to stay 67 * above the minimum device ticks limit. For the upper limit 68 * we would end up with a latch value larger than the upper 69 * limit of the device, so we omit the add to stay below the 70 * device upper boundary. 71 * 72 * Also omit the add if it would overflow the u64 boundary. 73 */ 74 if ((~0ULL - clc > rnd) && 75 (!ismax || evt->mult <= (1ULL << evt->shift))) 76 clc += rnd; 77 78 do_div(clc, evt->mult); 79 80 /* Deltas less than 1usec are pointless noise */ 81 return clc > 1000 ? clc : 1000; 82 } 83 84 /** 85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86 * @latch: value to convert 87 * @evt: pointer to clock event device descriptor 88 * 89 * Math helper, returns latch value converted to nanoseconds (bound checked) 90 */ 91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92 { 93 return cev_delta2ns(latch, evt, false); 94 } 95 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96 97 static int __clockevents_switch_state(struct clock_event_device *dev, 98 enum clock_event_state state) 99 { 100 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 101 return 0; 102 103 /* Transition with new state-specific callbacks */ 104 switch (state) { 105 case CLOCK_EVT_STATE_DETACHED: 106 /* The clockevent device is getting replaced. Shut it down. */ 107 108 case CLOCK_EVT_STATE_SHUTDOWN: 109 if (dev->set_state_shutdown) 110 return dev->set_state_shutdown(dev); 111 return 0; 112 113 case CLOCK_EVT_STATE_PERIODIC: 114 /* Core internal bug */ 115 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 116 return -ENOSYS; 117 if (dev->set_state_periodic) 118 return dev->set_state_periodic(dev); 119 return 0; 120 121 case CLOCK_EVT_STATE_ONESHOT: 122 /* Core internal bug */ 123 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 124 return -ENOSYS; 125 if (dev->set_state_oneshot) 126 return dev->set_state_oneshot(dev); 127 return 0; 128 129 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 130 /* Core internal bug */ 131 if (WARN_ONCE(!clockevent_state_oneshot(dev), 132 "Current state: %d\n", 133 clockevent_get_state(dev))) 134 return -EINVAL; 135 136 if (dev->set_state_oneshot_stopped) 137 return dev->set_state_oneshot_stopped(dev); 138 else 139 return -ENOSYS; 140 141 default: 142 return -ENOSYS; 143 } 144 } 145 146 /** 147 * clockevents_switch_state - set the operating state of a clock event device 148 * @dev: device to modify 149 * @state: new state 150 * 151 * Must be called with interrupts disabled ! 152 */ 153 void clockevents_switch_state(struct clock_event_device *dev, 154 enum clock_event_state state) 155 { 156 if (clockevent_get_state(dev) != state) { 157 if (__clockevents_switch_state(dev, state)) 158 return; 159 160 clockevent_set_state(dev, state); 161 162 /* 163 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 164 * on it, so fix it up and emit a warning: 165 */ 166 if (clockevent_state_oneshot(dev)) { 167 if (unlikely(!dev->mult)) { 168 dev->mult = 1; 169 WARN_ON(1); 170 } 171 } 172 } 173 } 174 175 /** 176 * clockevents_shutdown - shutdown the device and clear next_event 177 * @dev: device to shutdown 178 */ 179 void clockevents_shutdown(struct clock_event_device *dev) 180 { 181 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 182 dev->next_event = KTIME_MAX; 183 } 184 185 /** 186 * clockevents_tick_resume - Resume the tick device before using it again 187 * @dev: device to resume 188 */ 189 int clockevents_tick_resume(struct clock_event_device *dev) 190 { 191 int ret = 0; 192 193 if (dev->tick_resume) 194 ret = dev->tick_resume(dev); 195 196 return ret; 197 } 198 199 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 200 201 /* Limit min_delta to a jiffie */ 202 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 203 204 /** 205 * clockevents_increase_min_delta - raise minimum delta of a clock event device 206 * @dev: device to increase the minimum delta 207 * 208 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 209 */ 210 static int clockevents_increase_min_delta(struct clock_event_device *dev) 211 { 212 /* Nothing to do if we already reached the limit */ 213 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 214 printk_deferred(KERN_WARNING 215 "CE: Reprogramming failure. Giving up\n"); 216 dev->next_event = KTIME_MAX; 217 return -ETIME; 218 } 219 220 if (dev->min_delta_ns < 5000) 221 dev->min_delta_ns = 5000; 222 else 223 dev->min_delta_ns += dev->min_delta_ns >> 1; 224 225 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 226 dev->min_delta_ns = MIN_DELTA_LIMIT; 227 228 printk_deferred(KERN_WARNING 229 "CE: %s increased min_delta_ns to %llu nsec\n", 230 dev->name ? dev->name : "?", 231 (unsigned long long) dev->min_delta_ns); 232 return 0; 233 } 234 235 /** 236 * clockevents_program_min_delta - Set clock event device to the minimum delay. 237 * @dev: device to program 238 * 239 * Returns 0 on success, -ETIME when the retry loop failed. 240 */ 241 static int clockevents_program_min_delta(struct clock_event_device *dev) 242 { 243 unsigned long long clc; 244 int64_t delta; 245 int i; 246 247 for (i = 0;;) { 248 delta = dev->min_delta_ns; 249 dev->next_event = ktime_add_ns(ktime_get(), delta); 250 251 if (clockevent_state_shutdown(dev)) 252 return 0; 253 254 dev->retries++; 255 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 256 if (dev->set_next_event((unsigned long) clc, dev) == 0) 257 return 0; 258 259 if (++i > 2) { 260 /* 261 * We tried 3 times to program the device with the 262 * given min_delta_ns. Try to increase the minimum 263 * delta, if that fails as well get out of here. 264 */ 265 if (clockevents_increase_min_delta(dev)) 266 return -ETIME; 267 i = 0; 268 } 269 } 270 } 271 272 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 273 274 /** 275 * clockevents_program_min_delta - Set clock event device to the minimum delay. 276 * @dev: device to program 277 * 278 * Returns 0 on success, -ETIME when the retry loop failed. 279 */ 280 static int clockevents_program_min_delta(struct clock_event_device *dev) 281 { 282 unsigned long long clc; 283 int64_t delta = 0; 284 int i; 285 286 for (i = 0; i < 10; i++) { 287 delta += dev->min_delta_ns; 288 dev->next_event = ktime_add_ns(ktime_get(), delta); 289 290 if (clockevent_state_shutdown(dev)) 291 return 0; 292 293 dev->retries++; 294 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 295 if (dev->set_next_event((unsigned long) clc, dev) == 0) 296 return 0; 297 } 298 return -ETIME; 299 } 300 301 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 302 303 /** 304 * clockevents_program_event - Reprogram the clock event device. 305 * @dev: device to program 306 * @expires: absolute expiry time (monotonic clock) 307 * @force: program minimum delay if expires can not be set 308 * 309 * Returns 0 on success, -ETIME when the event is in the past. 310 */ 311 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 312 bool force) 313 { 314 unsigned long long clc; 315 int64_t delta; 316 int rc; 317 318 if (unlikely(expires < 0)) { 319 WARN_ON_ONCE(1); 320 return -ETIME; 321 } 322 323 dev->next_event = expires; 324 325 if (clockevent_state_shutdown(dev)) 326 return 0; 327 328 /* We must be in ONESHOT state here */ 329 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", 330 clockevent_get_state(dev)); 331 332 /* Shortcut for clockevent devices that can deal with ktime. */ 333 if (dev->features & CLOCK_EVT_FEAT_KTIME) 334 return dev->set_next_ktime(expires, dev); 335 336 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 337 if (delta <= 0) 338 return force ? clockevents_program_min_delta(dev) : -ETIME; 339 340 delta = min(delta, (int64_t) dev->max_delta_ns); 341 delta = max(delta, (int64_t) dev->min_delta_ns); 342 343 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 344 rc = dev->set_next_event((unsigned long) clc, dev); 345 346 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 347 } 348 349 /* 350 * Called after a notify add to make devices available which were 351 * released from the notifier call. 352 */ 353 static void clockevents_notify_released(void) 354 { 355 struct clock_event_device *dev; 356 357 while (!list_empty(&clockevents_released)) { 358 dev = list_entry(clockevents_released.next, 359 struct clock_event_device, list); 360 list_del(&dev->list); 361 list_add(&dev->list, &clockevent_devices); 362 tick_check_new_device(dev); 363 } 364 } 365 366 /* 367 * Try to install a replacement clock event device 368 */ 369 static int clockevents_replace(struct clock_event_device *ced) 370 { 371 struct clock_event_device *dev, *newdev = NULL; 372 373 list_for_each_entry(dev, &clockevent_devices, list) { 374 if (dev == ced || !clockevent_state_detached(dev)) 375 continue; 376 377 if (!tick_check_replacement(newdev, dev)) 378 continue; 379 380 if (!try_module_get(dev->owner)) 381 continue; 382 383 if (newdev) 384 module_put(newdev->owner); 385 newdev = dev; 386 } 387 if (newdev) { 388 tick_install_replacement(newdev); 389 list_del_init(&ced->list); 390 } 391 return newdev ? 0 : -EBUSY; 392 } 393 394 /* 395 * Called with clockevents_mutex and clockevents_lock held 396 */ 397 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 398 { 399 /* Fast track. Device is unused */ 400 if (clockevent_state_detached(ced)) { 401 list_del_init(&ced->list); 402 return 0; 403 } 404 405 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 406 } 407 408 /* 409 * SMP function call to unbind a device 410 */ 411 static void __clockevents_unbind(void *arg) 412 { 413 struct ce_unbind *cu = arg; 414 int res; 415 416 raw_spin_lock(&clockevents_lock); 417 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 418 if (res == -EAGAIN) 419 res = clockevents_replace(cu->ce); 420 cu->res = res; 421 raw_spin_unlock(&clockevents_lock); 422 } 423 424 /* 425 * Issues smp function call to unbind a per cpu device. Called with 426 * clockevents_mutex held. 427 */ 428 static int clockevents_unbind(struct clock_event_device *ced, int cpu) 429 { 430 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 431 432 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 433 return cu.res; 434 } 435 436 /* 437 * Unbind a clockevents device. 438 */ 439 int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 440 { 441 int ret; 442 443 mutex_lock(&clockevents_mutex); 444 ret = clockevents_unbind(ced, cpu); 445 mutex_unlock(&clockevents_mutex); 446 return ret; 447 } 448 EXPORT_SYMBOL_GPL(clockevents_unbind_device); 449 450 /** 451 * clockevents_register_device - register a clock event device 452 * @dev: device to register 453 */ 454 void clockevents_register_device(struct clock_event_device *dev) 455 { 456 unsigned long flags; 457 458 /* Initialize state to DETACHED */ 459 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); 460 461 if (!dev->cpumask) { 462 WARN_ON(num_possible_cpus() > 1); 463 dev->cpumask = cpumask_of(smp_processor_id()); 464 } 465 466 if (dev->cpumask == cpu_all_mask) { 467 WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n", 468 dev->name); 469 dev->cpumask = cpu_possible_mask; 470 } 471 472 raw_spin_lock_irqsave(&clockevents_lock, flags); 473 474 list_add(&dev->list, &clockevent_devices); 475 tick_check_new_device(dev); 476 clockevents_notify_released(); 477 478 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 479 } 480 EXPORT_SYMBOL_GPL(clockevents_register_device); 481 482 static void clockevents_config(struct clock_event_device *dev, u32 freq) 483 { 484 u64 sec; 485 486 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 487 return; 488 489 /* 490 * Calculate the maximum number of seconds we can sleep. Limit 491 * to 10 minutes for hardware which can program more than 492 * 32bit ticks so we still get reasonable conversion values. 493 */ 494 sec = dev->max_delta_ticks; 495 do_div(sec, freq); 496 if (!sec) 497 sec = 1; 498 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 499 sec = 600; 500 501 clockevents_calc_mult_shift(dev, freq, sec); 502 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 503 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 504 } 505 506 /** 507 * clockevents_config_and_register - Configure and register a clock event device 508 * @dev: device to register 509 * @freq: The clock frequency 510 * @min_delta: The minimum clock ticks to program in oneshot mode 511 * @max_delta: The maximum clock ticks to program in oneshot mode 512 * 513 * min/max_delta can be 0 for devices which do not support oneshot mode. 514 */ 515 void clockevents_config_and_register(struct clock_event_device *dev, 516 u32 freq, unsigned long min_delta, 517 unsigned long max_delta) 518 { 519 dev->min_delta_ticks = min_delta; 520 dev->max_delta_ticks = max_delta; 521 clockevents_config(dev, freq); 522 clockevents_register_device(dev); 523 } 524 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 525 526 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) 527 { 528 clockevents_config(dev, freq); 529 530 if (clockevent_state_oneshot(dev)) 531 return clockevents_program_event(dev, dev->next_event, false); 532 533 if (clockevent_state_periodic(dev)) 534 return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); 535 536 return 0; 537 } 538 539 /** 540 * clockevents_update_freq - Update frequency and reprogram a clock event device. 541 * @dev: device to modify 542 * @freq: new device frequency 543 * 544 * Reconfigure and reprogram a clock event device in oneshot 545 * mode. Must be called on the cpu for which the device delivers per 546 * cpu timer events. If called for the broadcast device the core takes 547 * care of serialization. 548 * 549 * Returns 0 on success, -ETIME when the event is in the past. 550 */ 551 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 552 { 553 unsigned long flags; 554 int ret; 555 556 local_irq_save(flags); 557 ret = tick_broadcast_update_freq(dev, freq); 558 if (ret == -ENODEV) 559 ret = __clockevents_update_freq(dev, freq); 560 local_irq_restore(flags); 561 return ret; 562 } 563 564 /* 565 * Noop handler when we shut down an event device 566 */ 567 void clockevents_handle_noop(struct clock_event_device *dev) 568 { 569 } 570 571 /** 572 * clockevents_exchange_device - release and request clock devices 573 * @old: device to release (can be NULL) 574 * @new: device to request (can be NULL) 575 * 576 * Called from various tick functions with clockevents_lock held and 577 * interrupts disabled. 578 */ 579 void clockevents_exchange_device(struct clock_event_device *old, 580 struct clock_event_device *new) 581 { 582 /* 583 * Caller releases a clock event device. We queue it into the 584 * released list and do a notify add later. 585 */ 586 if (old) { 587 module_put(old->owner); 588 clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); 589 list_del(&old->list); 590 list_add(&old->list, &clockevents_released); 591 } 592 593 if (new) { 594 BUG_ON(!clockevent_state_detached(new)); 595 clockevents_shutdown(new); 596 } 597 } 598 599 /** 600 * clockevents_suspend - suspend clock devices 601 */ 602 void clockevents_suspend(void) 603 { 604 struct clock_event_device *dev; 605 606 list_for_each_entry_reverse(dev, &clockevent_devices, list) 607 if (dev->suspend && !clockevent_state_detached(dev)) 608 dev->suspend(dev); 609 } 610 611 /** 612 * clockevents_resume - resume clock devices 613 */ 614 void clockevents_resume(void) 615 { 616 struct clock_event_device *dev; 617 618 list_for_each_entry(dev, &clockevent_devices, list) 619 if (dev->resume && !clockevent_state_detached(dev)) 620 dev->resume(dev); 621 } 622 623 #ifdef CONFIG_HOTPLUG_CPU 624 /** 625 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu 626 */ 627 void tick_cleanup_dead_cpu(int cpu) 628 { 629 struct clock_event_device *dev, *tmp; 630 unsigned long flags; 631 632 raw_spin_lock_irqsave(&clockevents_lock, flags); 633 634 tick_shutdown_broadcast_oneshot(cpu); 635 tick_shutdown_broadcast(cpu); 636 tick_shutdown(cpu); 637 /* 638 * Unregister the clock event devices which were 639 * released from the users in the notify chain. 640 */ 641 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 642 list_del(&dev->list); 643 /* 644 * Now check whether the CPU has left unused per cpu devices 645 */ 646 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 647 if (cpumask_test_cpu(cpu, dev->cpumask) && 648 cpumask_weight(dev->cpumask) == 1 && 649 !tick_is_broadcast_device(dev)) { 650 BUG_ON(!clockevent_state_detached(dev)); 651 list_del(&dev->list); 652 } 653 } 654 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 655 } 656 #endif 657 658 #ifdef CONFIG_SYSFS 659 static struct bus_type clockevents_subsys = { 660 .name = "clockevents", 661 .dev_name = "clockevent", 662 }; 663 664 static DEFINE_PER_CPU(struct device, tick_percpu_dev); 665 static struct tick_device *tick_get_tick_dev(struct device *dev); 666 667 static ssize_t sysfs_show_current_tick_dev(struct device *dev, 668 struct device_attribute *attr, 669 char *buf) 670 { 671 struct tick_device *td; 672 ssize_t count = 0; 673 674 raw_spin_lock_irq(&clockevents_lock); 675 td = tick_get_tick_dev(dev); 676 if (td && td->evtdev) 677 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 678 raw_spin_unlock_irq(&clockevents_lock); 679 return count; 680 } 681 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 682 683 /* We don't support the abomination of removable broadcast devices */ 684 static ssize_t sysfs_unbind_tick_dev(struct device *dev, 685 struct device_attribute *attr, 686 const char *buf, size_t count) 687 { 688 char name[CS_NAME_LEN]; 689 ssize_t ret = sysfs_get_uname(buf, name, count); 690 struct clock_event_device *ce; 691 692 if (ret < 0) 693 return ret; 694 695 ret = -ENODEV; 696 mutex_lock(&clockevents_mutex); 697 raw_spin_lock_irq(&clockevents_lock); 698 list_for_each_entry(ce, &clockevent_devices, list) { 699 if (!strcmp(ce->name, name)) { 700 ret = __clockevents_try_unbind(ce, dev->id); 701 break; 702 } 703 } 704 raw_spin_unlock_irq(&clockevents_lock); 705 /* 706 * We hold clockevents_mutex, so ce can't go away 707 */ 708 if (ret == -EAGAIN) 709 ret = clockevents_unbind(ce, dev->id); 710 mutex_unlock(&clockevents_mutex); 711 return ret ? ret : count; 712 } 713 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 714 715 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 716 static struct device tick_bc_dev = { 717 .init_name = "broadcast", 718 .id = 0, 719 .bus = &clockevents_subsys, 720 }; 721 722 static struct tick_device *tick_get_tick_dev(struct device *dev) 723 { 724 return dev == &tick_bc_dev ? tick_get_broadcast_device() : 725 &per_cpu(tick_cpu_device, dev->id); 726 } 727 728 static __init int tick_broadcast_init_sysfs(void) 729 { 730 int err = device_register(&tick_bc_dev); 731 732 if (!err) 733 err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 734 return err; 735 } 736 #else 737 static struct tick_device *tick_get_tick_dev(struct device *dev) 738 { 739 return &per_cpu(tick_cpu_device, dev->id); 740 } 741 static inline int tick_broadcast_init_sysfs(void) { return 0; } 742 #endif 743 744 static int __init tick_init_sysfs(void) 745 { 746 int cpu; 747 748 for_each_possible_cpu(cpu) { 749 struct device *dev = &per_cpu(tick_percpu_dev, cpu); 750 int err; 751 752 dev->id = cpu; 753 dev->bus = &clockevents_subsys; 754 err = device_register(dev); 755 if (!err) 756 err = device_create_file(dev, &dev_attr_current_device); 757 if (!err) 758 err = device_create_file(dev, &dev_attr_unbind_device); 759 if (err) 760 return err; 761 } 762 return tick_broadcast_init_sysfs(); 763 } 764 765 static int __init clockevents_init_sysfs(void) 766 { 767 int err = subsys_system_register(&clockevents_subsys, NULL); 768 769 if (!err) 770 err = tick_init_sysfs(); 771 return err; 772 } 773 device_initcall(clockevents_init_sysfs); 774 #endif /* SYSFS */ 775