1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/smp.h> 19 #include <linux/device.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 /* Protection for the above */ 27 static DEFINE_RAW_SPINLOCK(clockevents_lock); 28 /* Protection for unbind operations */ 29 static DEFINE_MUTEX(clockevents_mutex); 30 31 struct ce_unbind { 32 struct clock_event_device *ce; 33 int res; 34 }; 35 36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 37 bool ismax) 38 { 39 u64 clc = (u64) latch << evt->shift; 40 u64 rnd; 41 42 if (unlikely(!evt->mult)) { 43 evt->mult = 1; 44 WARN_ON(1); 45 } 46 rnd = (u64) evt->mult - 1; 47 48 /* 49 * Upper bound sanity check. If the backwards conversion is 50 * not equal latch, we know that the above shift overflowed. 51 */ 52 if ((clc >> evt->shift) != (u64)latch) 53 clc = ~0ULL; 54 55 /* 56 * Scaled math oddities: 57 * 58 * For mult <= (1 << shift) we can safely add mult - 1 to 59 * prevent integer rounding loss. So the backwards conversion 60 * from nsec to device ticks will be correct. 61 * 62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we 63 * need to be careful. Adding mult - 1 will result in a value 64 * which when converted back to device ticks can be larger 65 * than latch by up to (mult - 1) >> shift. For the min_delta 66 * calculation we still want to apply this in order to stay 67 * above the minimum device ticks limit. For the upper limit 68 * we would end up with a latch value larger than the upper 69 * limit of the device, so we omit the add to stay below the 70 * device upper boundary. 71 * 72 * Also omit the add if it would overflow the u64 boundary. 73 */ 74 if ((~0ULL - clc > rnd) && 75 (!ismax || evt->mult <= (1ULL << evt->shift))) 76 clc += rnd; 77 78 do_div(clc, evt->mult); 79 80 /* Deltas less than 1usec are pointless noise */ 81 return clc > 1000 ? clc : 1000; 82 } 83 84 /** 85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86 * @latch: value to convert 87 * @evt: pointer to clock event device descriptor 88 * 89 * Math helper, returns latch value converted to nanoseconds (bound checked) 90 */ 91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92 { 93 return cev_delta2ns(latch, evt, false); 94 } 95 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96 97 static int __clockevents_set_state(struct clock_event_device *dev, 98 enum clock_event_state state) 99 { 100 /* Transition with legacy set_mode() callback */ 101 if (dev->set_mode) { 102 /* Legacy callback doesn't support new modes */ 103 if (state > CLOCK_EVT_STATE_ONESHOT) 104 return -ENOSYS; 105 /* 106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1 107 * mapping until *_ONESHOT, and so a simple cast will work. 108 */ 109 dev->set_mode((enum clock_event_mode)state, dev); 110 dev->mode = (enum clock_event_mode)state; 111 return 0; 112 } 113 114 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 115 return 0; 116 117 /* Transition with new state-specific callbacks */ 118 switch (state) { 119 case CLOCK_EVT_STATE_DETACHED: 120 /* 121 * This is an internal state, which is guaranteed to go from 122 * SHUTDOWN to DETACHED. No driver interaction required. 123 */ 124 return 0; 125 126 case CLOCK_EVT_STATE_SHUTDOWN: 127 return dev->set_state_shutdown(dev); 128 129 case CLOCK_EVT_STATE_PERIODIC: 130 /* Core internal bug */ 131 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 132 return -ENOSYS; 133 return dev->set_state_periodic(dev); 134 135 case CLOCK_EVT_STATE_ONESHOT: 136 /* Core internal bug */ 137 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 138 return -ENOSYS; 139 return dev->set_state_oneshot(dev); 140 141 default: 142 return -ENOSYS; 143 } 144 } 145 146 /** 147 * clockevents_set_state - set the operating state of a clock event device 148 * @dev: device to modify 149 * @state: new state 150 * 151 * Must be called with interrupts disabled ! 152 */ 153 void clockevents_set_state(struct clock_event_device *dev, 154 enum clock_event_state state) 155 { 156 if (dev->state != state) { 157 if (__clockevents_set_state(dev, state)) 158 return; 159 160 dev->state = state; 161 162 /* 163 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 164 * on it, so fix it up and emit a warning: 165 */ 166 if (state == CLOCK_EVT_STATE_ONESHOT) { 167 if (unlikely(!dev->mult)) { 168 dev->mult = 1; 169 WARN_ON(1); 170 } 171 } 172 } 173 } 174 175 /** 176 * clockevents_shutdown - shutdown the device and clear next_event 177 * @dev: device to shutdown 178 */ 179 void clockevents_shutdown(struct clock_event_device *dev) 180 { 181 clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 182 dev->next_event.tv64 = KTIME_MAX; 183 } 184 185 /** 186 * clockevents_tick_resume - Resume the tick device before using it again 187 * @dev: device to resume 188 */ 189 int clockevents_tick_resume(struct clock_event_device *dev) 190 { 191 int ret = 0; 192 193 if (dev->set_mode) { 194 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); 195 dev->mode = CLOCK_EVT_MODE_RESUME; 196 } else if (dev->tick_resume) { 197 ret = dev->tick_resume(dev); 198 } 199 200 return ret; 201 } 202 203 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 204 205 /* Limit min_delta to a jiffie */ 206 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 207 208 /** 209 * clockevents_increase_min_delta - raise minimum delta of a clock event device 210 * @dev: device to increase the minimum delta 211 * 212 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 213 */ 214 static int clockevents_increase_min_delta(struct clock_event_device *dev) 215 { 216 /* Nothing to do if we already reached the limit */ 217 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 218 printk_deferred(KERN_WARNING 219 "CE: Reprogramming failure. Giving up\n"); 220 dev->next_event.tv64 = KTIME_MAX; 221 return -ETIME; 222 } 223 224 if (dev->min_delta_ns < 5000) 225 dev->min_delta_ns = 5000; 226 else 227 dev->min_delta_ns += dev->min_delta_ns >> 1; 228 229 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 230 dev->min_delta_ns = MIN_DELTA_LIMIT; 231 232 printk_deferred(KERN_WARNING 233 "CE: %s increased min_delta_ns to %llu nsec\n", 234 dev->name ? dev->name : "?", 235 (unsigned long long) dev->min_delta_ns); 236 return 0; 237 } 238 239 /** 240 * clockevents_program_min_delta - Set clock event device to the minimum delay. 241 * @dev: device to program 242 * 243 * Returns 0 on success, -ETIME when the retry loop failed. 244 */ 245 static int clockevents_program_min_delta(struct clock_event_device *dev) 246 { 247 unsigned long long clc; 248 int64_t delta; 249 int i; 250 251 for (i = 0;;) { 252 delta = dev->min_delta_ns; 253 dev->next_event = ktime_add_ns(ktime_get(), delta); 254 255 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 256 return 0; 257 258 dev->retries++; 259 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 260 if (dev->set_next_event((unsigned long) clc, dev) == 0) 261 return 0; 262 263 if (++i > 2) { 264 /* 265 * We tried 3 times to program the device with the 266 * given min_delta_ns. Try to increase the minimum 267 * delta, if that fails as well get out of here. 268 */ 269 if (clockevents_increase_min_delta(dev)) 270 return -ETIME; 271 i = 0; 272 } 273 } 274 } 275 276 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 277 278 /** 279 * clockevents_program_min_delta - Set clock event device to the minimum delay. 280 * @dev: device to program 281 * 282 * Returns 0 on success, -ETIME when the retry loop failed. 283 */ 284 static int clockevents_program_min_delta(struct clock_event_device *dev) 285 { 286 unsigned long long clc; 287 int64_t delta; 288 289 delta = dev->min_delta_ns; 290 dev->next_event = ktime_add_ns(ktime_get(), delta); 291 292 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 293 return 0; 294 295 dev->retries++; 296 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 297 return dev->set_next_event((unsigned long) clc, dev); 298 } 299 300 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 301 302 /** 303 * clockevents_program_event - Reprogram the clock event device. 304 * @dev: device to program 305 * @expires: absolute expiry time (monotonic clock) 306 * @force: program minimum delay if expires can not be set 307 * 308 * Returns 0 on success, -ETIME when the event is in the past. 309 */ 310 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 311 bool force) 312 { 313 unsigned long long clc; 314 int64_t delta; 315 int rc; 316 317 if (unlikely(expires.tv64 < 0)) { 318 WARN_ON_ONCE(1); 319 return -ETIME; 320 } 321 322 dev->next_event = expires; 323 324 if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 325 return 0; 326 327 /* Shortcut for clockevent devices that can deal with ktime. */ 328 if (dev->features & CLOCK_EVT_FEAT_KTIME) 329 return dev->set_next_ktime(expires, dev); 330 331 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 332 if (delta <= 0) 333 return force ? clockevents_program_min_delta(dev) : -ETIME; 334 335 delta = min(delta, (int64_t) dev->max_delta_ns); 336 delta = max(delta, (int64_t) dev->min_delta_ns); 337 338 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 339 rc = dev->set_next_event((unsigned long) clc, dev); 340 341 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 342 } 343 344 /* 345 * Called after a notify add to make devices available which were 346 * released from the notifier call. 347 */ 348 static void clockevents_notify_released(void) 349 { 350 struct clock_event_device *dev; 351 352 while (!list_empty(&clockevents_released)) { 353 dev = list_entry(clockevents_released.next, 354 struct clock_event_device, list); 355 list_del(&dev->list); 356 list_add(&dev->list, &clockevent_devices); 357 tick_check_new_device(dev); 358 } 359 } 360 361 /* 362 * Try to install a replacement clock event device 363 */ 364 static int clockevents_replace(struct clock_event_device *ced) 365 { 366 struct clock_event_device *dev, *newdev = NULL; 367 368 list_for_each_entry(dev, &clockevent_devices, list) { 369 if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED) 370 continue; 371 372 if (!tick_check_replacement(newdev, dev)) 373 continue; 374 375 if (!try_module_get(dev->owner)) 376 continue; 377 378 if (newdev) 379 module_put(newdev->owner); 380 newdev = dev; 381 } 382 if (newdev) { 383 tick_install_replacement(newdev); 384 list_del_init(&ced->list); 385 } 386 return newdev ? 0 : -EBUSY; 387 } 388 389 /* 390 * Called with clockevents_mutex and clockevents_lock held 391 */ 392 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 393 { 394 /* Fast track. Device is unused */ 395 if (ced->state == CLOCK_EVT_STATE_DETACHED) { 396 list_del_init(&ced->list); 397 return 0; 398 } 399 400 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 401 } 402 403 /* 404 * SMP function call to unbind a device 405 */ 406 static void __clockevents_unbind(void *arg) 407 { 408 struct ce_unbind *cu = arg; 409 int res; 410 411 raw_spin_lock(&clockevents_lock); 412 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 413 if (res == -EAGAIN) 414 res = clockevents_replace(cu->ce); 415 cu->res = res; 416 raw_spin_unlock(&clockevents_lock); 417 } 418 419 /* 420 * Issues smp function call to unbind a per cpu device. Called with 421 * clockevents_mutex held. 422 */ 423 static int clockevents_unbind(struct clock_event_device *ced, int cpu) 424 { 425 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 426 427 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 428 return cu.res; 429 } 430 431 /* 432 * Unbind a clockevents device. 433 */ 434 int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 435 { 436 int ret; 437 438 mutex_lock(&clockevents_mutex); 439 ret = clockevents_unbind(ced, cpu); 440 mutex_unlock(&clockevents_mutex); 441 return ret; 442 } 443 EXPORT_SYMBOL_GPL(clockevents_unbind_device); 444 445 /* Sanity check of state transition callbacks */ 446 static int clockevents_sanity_check(struct clock_event_device *dev) 447 { 448 /* Legacy set_mode() callback */ 449 if (dev->set_mode) { 450 /* We shouldn't be supporting new modes now */ 451 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || 452 dev->set_state_shutdown || dev->tick_resume); 453 454 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 455 return 0; 456 } 457 458 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 459 return 0; 460 461 /* New state-specific callbacks */ 462 if (!dev->set_state_shutdown) 463 return -EINVAL; 464 465 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && 466 !dev->set_state_periodic) 467 return -EINVAL; 468 469 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && 470 !dev->set_state_oneshot) 471 return -EINVAL; 472 473 return 0; 474 } 475 476 /** 477 * clockevents_register_device - register a clock event device 478 * @dev: device to register 479 */ 480 void clockevents_register_device(struct clock_event_device *dev) 481 { 482 unsigned long flags; 483 484 BUG_ON(clockevents_sanity_check(dev)); 485 486 /* Initialize state to DETACHED */ 487 dev->state = CLOCK_EVT_STATE_DETACHED; 488 489 if (!dev->cpumask) { 490 WARN_ON(num_possible_cpus() > 1); 491 dev->cpumask = cpumask_of(smp_processor_id()); 492 } 493 494 raw_spin_lock_irqsave(&clockevents_lock, flags); 495 496 list_add(&dev->list, &clockevent_devices); 497 tick_check_new_device(dev); 498 clockevents_notify_released(); 499 500 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 501 } 502 EXPORT_SYMBOL_GPL(clockevents_register_device); 503 504 void clockevents_config(struct clock_event_device *dev, u32 freq) 505 { 506 u64 sec; 507 508 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 509 return; 510 511 /* 512 * Calculate the maximum number of seconds we can sleep. Limit 513 * to 10 minutes for hardware which can program more than 514 * 32bit ticks so we still get reasonable conversion values. 515 */ 516 sec = dev->max_delta_ticks; 517 do_div(sec, freq); 518 if (!sec) 519 sec = 1; 520 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 521 sec = 600; 522 523 clockevents_calc_mult_shift(dev, freq, sec); 524 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 525 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 526 } 527 528 /** 529 * clockevents_config_and_register - Configure and register a clock event device 530 * @dev: device to register 531 * @freq: The clock frequency 532 * @min_delta: The minimum clock ticks to program in oneshot mode 533 * @max_delta: The maximum clock ticks to program in oneshot mode 534 * 535 * min/max_delta can be 0 for devices which do not support oneshot mode. 536 */ 537 void clockevents_config_and_register(struct clock_event_device *dev, 538 u32 freq, unsigned long min_delta, 539 unsigned long max_delta) 540 { 541 dev->min_delta_ticks = min_delta; 542 dev->max_delta_ticks = max_delta; 543 clockevents_config(dev, freq); 544 clockevents_register_device(dev); 545 } 546 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 547 548 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) 549 { 550 clockevents_config(dev, freq); 551 552 if (dev->state == CLOCK_EVT_STATE_ONESHOT) 553 return clockevents_program_event(dev, dev->next_event, false); 554 555 if (dev->state == CLOCK_EVT_STATE_PERIODIC) 556 return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); 557 558 return 0; 559 } 560 561 /** 562 * clockevents_update_freq - Update frequency and reprogram a clock event device. 563 * @dev: device to modify 564 * @freq: new device frequency 565 * 566 * Reconfigure and reprogram a clock event device in oneshot 567 * mode. Must be called on the cpu for which the device delivers per 568 * cpu timer events. If called for the broadcast device the core takes 569 * care of serialization. 570 * 571 * Returns 0 on success, -ETIME when the event is in the past. 572 */ 573 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 574 { 575 unsigned long flags; 576 int ret; 577 578 local_irq_save(flags); 579 ret = tick_broadcast_update_freq(dev, freq); 580 if (ret == -ENODEV) 581 ret = __clockevents_update_freq(dev, freq); 582 local_irq_restore(flags); 583 return ret; 584 } 585 586 /* 587 * Noop handler when we shut down an event device 588 */ 589 void clockevents_handle_noop(struct clock_event_device *dev) 590 { 591 } 592 593 /** 594 * clockevents_exchange_device - release and request clock devices 595 * @old: device to release (can be NULL) 596 * @new: device to request (can be NULL) 597 * 598 * Called from various tick functions with clockevents_lock held and 599 * interrupts disabled. 600 */ 601 void clockevents_exchange_device(struct clock_event_device *old, 602 struct clock_event_device *new) 603 { 604 /* 605 * Caller releases a clock event device. We queue it into the 606 * released list and do a notify add later. 607 */ 608 if (old) { 609 module_put(old->owner); 610 clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED); 611 list_del(&old->list); 612 list_add(&old->list, &clockevents_released); 613 } 614 615 if (new) { 616 BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED); 617 clockevents_shutdown(new); 618 } 619 } 620 621 /** 622 * clockevents_suspend - suspend clock devices 623 */ 624 void clockevents_suspend(void) 625 { 626 struct clock_event_device *dev; 627 628 list_for_each_entry_reverse(dev, &clockevent_devices, list) 629 if (dev->suspend) 630 dev->suspend(dev); 631 } 632 633 /** 634 * clockevents_resume - resume clock devices 635 */ 636 void clockevents_resume(void) 637 { 638 struct clock_event_device *dev; 639 640 list_for_each_entry(dev, &clockevent_devices, list) 641 if (dev->resume) 642 dev->resume(dev); 643 } 644 645 #ifdef CONFIG_HOTPLUG_CPU 646 /** 647 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu 648 */ 649 void tick_cleanup_dead_cpu(int cpu) 650 { 651 struct clock_event_device *dev, *tmp; 652 unsigned long flags; 653 654 raw_spin_lock_irqsave(&clockevents_lock, flags); 655 656 tick_shutdown_broadcast_oneshot(cpu); 657 tick_shutdown_broadcast(cpu); 658 tick_shutdown(cpu); 659 /* 660 * Unregister the clock event devices which were 661 * released from the users in the notify chain. 662 */ 663 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 664 list_del(&dev->list); 665 /* 666 * Now check whether the CPU has left unused per cpu devices 667 */ 668 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 669 if (cpumask_test_cpu(cpu, dev->cpumask) && 670 cpumask_weight(dev->cpumask) == 1 && 671 !tick_is_broadcast_device(dev)) { 672 BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED); 673 list_del(&dev->list); 674 } 675 } 676 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 677 } 678 #endif 679 680 #ifdef CONFIG_SYSFS 681 struct bus_type clockevents_subsys = { 682 .name = "clockevents", 683 .dev_name = "clockevent", 684 }; 685 686 static DEFINE_PER_CPU(struct device, tick_percpu_dev); 687 static struct tick_device *tick_get_tick_dev(struct device *dev); 688 689 static ssize_t sysfs_show_current_tick_dev(struct device *dev, 690 struct device_attribute *attr, 691 char *buf) 692 { 693 struct tick_device *td; 694 ssize_t count = 0; 695 696 raw_spin_lock_irq(&clockevents_lock); 697 td = tick_get_tick_dev(dev); 698 if (td && td->evtdev) 699 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 700 raw_spin_unlock_irq(&clockevents_lock); 701 return count; 702 } 703 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 704 705 /* We don't support the abomination of removable broadcast devices */ 706 static ssize_t sysfs_unbind_tick_dev(struct device *dev, 707 struct device_attribute *attr, 708 const char *buf, size_t count) 709 { 710 char name[CS_NAME_LEN]; 711 ssize_t ret = sysfs_get_uname(buf, name, count); 712 struct clock_event_device *ce; 713 714 if (ret < 0) 715 return ret; 716 717 ret = -ENODEV; 718 mutex_lock(&clockevents_mutex); 719 raw_spin_lock_irq(&clockevents_lock); 720 list_for_each_entry(ce, &clockevent_devices, list) { 721 if (!strcmp(ce->name, name)) { 722 ret = __clockevents_try_unbind(ce, dev->id); 723 break; 724 } 725 } 726 raw_spin_unlock_irq(&clockevents_lock); 727 /* 728 * We hold clockevents_mutex, so ce can't go away 729 */ 730 if (ret == -EAGAIN) 731 ret = clockevents_unbind(ce, dev->id); 732 mutex_unlock(&clockevents_mutex); 733 return ret ? ret : count; 734 } 735 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 736 737 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 738 static struct device tick_bc_dev = { 739 .init_name = "broadcast", 740 .id = 0, 741 .bus = &clockevents_subsys, 742 }; 743 744 static struct tick_device *tick_get_tick_dev(struct device *dev) 745 { 746 return dev == &tick_bc_dev ? tick_get_broadcast_device() : 747 &per_cpu(tick_cpu_device, dev->id); 748 } 749 750 static __init int tick_broadcast_init_sysfs(void) 751 { 752 int err = device_register(&tick_bc_dev); 753 754 if (!err) 755 err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 756 return err; 757 } 758 #else 759 static struct tick_device *tick_get_tick_dev(struct device *dev) 760 { 761 return &per_cpu(tick_cpu_device, dev->id); 762 } 763 static inline int tick_broadcast_init_sysfs(void) { return 0; } 764 #endif 765 766 static int __init tick_init_sysfs(void) 767 { 768 int cpu; 769 770 for_each_possible_cpu(cpu) { 771 struct device *dev = &per_cpu(tick_percpu_dev, cpu); 772 int err; 773 774 dev->id = cpu; 775 dev->bus = &clockevents_subsys; 776 err = device_register(dev); 777 if (!err) 778 err = device_create_file(dev, &dev_attr_current_device); 779 if (!err) 780 err = device_create_file(dev, &dev_attr_unbind_device); 781 if (err) 782 return err; 783 } 784 return tick_broadcast_init_sysfs(); 785 } 786 787 static int __init clockevents_init_sysfs(void) 788 { 789 int err = subsys_system_register(&clockevents_subsys, NULL); 790 791 if (!err) 792 err = tick_init_sysfs(); 793 return err; 794 } 795 device_initcall(clockevents_init_sysfs); 796 #endif /* SYSFS */ 797