1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/smp.h> 19 #include <linux/device.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 /* Protection for the above */ 27 static DEFINE_RAW_SPINLOCK(clockevents_lock); 28 /* Protection for unbind operations */ 29 static DEFINE_MUTEX(clockevents_mutex); 30 31 struct ce_unbind { 32 struct clock_event_device *ce; 33 int res; 34 }; 35 36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 37 bool ismax) 38 { 39 u64 clc = (u64) latch << evt->shift; 40 u64 rnd; 41 42 if (unlikely(!evt->mult)) { 43 evt->mult = 1; 44 WARN_ON(1); 45 } 46 rnd = (u64) evt->mult - 1; 47 48 /* 49 * Upper bound sanity check. If the backwards conversion is 50 * not equal latch, we know that the above shift overflowed. 51 */ 52 if ((clc >> evt->shift) != (u64)latch) 53 clc = ~0ULL; 54 55 /* 56 * Scaled math oddities: 57 * 58 * For mult <= (1 << shift) we can safely add mult - 1 to 59 * prevent integer rounding loss. So the backwards conversion 60 * from nsec to device ticks will be correct. 61 * 62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we 63 * need to be careful. Adding mult - 1 will result in a value 64 * which when converted back to device ticks can be larger 65 * than latch by up to (mult - 1) >> shift. For the min_delta 66 * calculation we still want to apply this in order to stay 67 * above the minimum device ticks limit. For the upper limit 68 * we would end up with a latch value larger than the upper 69 * limit of the device, so we omit the add to stay below the 70 * device upper boundary. 71 * 72 * Also omit the add if it would overflow the u64 boundary. 73 */ 74 if ((~0ULL - clc > rnd) && 75 (!ismax || evt->mult <= (1ULL << evt->shift))) 76 clc += rnd; 77 78 do_div(clc, evt->mult); 79 80 /* Deltas less than 1usec are pointless noise */ 81 return clc > 1000 ? clc : 1000; 82 } 83 84 /** 85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86 * @latch: value to convert 87 * @evt: pointer to clock event device descriptor 88 * 89 * Math helper, returns latch value converted to nanoseconds (bound checked) 90 */ 91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92 { 93 return cev_delta2ns(latch, evt, false); 94 } 95 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96 97 static int __clockevents_switch_state(struct clock_event_device *dev, 98 enum clock_event_state state) 99 { 100 /* Transition with legacy set_mode() callback */ 101 if (dev->set_mode) { 102 /* Legacy callback doesn't support new modes */ 103 if (state > CLOCK_EVT_STATE_ONESHOT) 104 return -ENOSYS; 105 /* 106 * 'clock_event_state' and 'clock_event_mode' have 1-to-1 107 * mapping until *_ONESHOT, and so a simple cast will work. 108 */ 109 dev->set_mode((enum clock_event_mode)state, dev); 110 dev->mode = (enum clock_event_mode)state; 111 return 0; 112 } 113 114 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 115 return 0; 116 117 /* Transition with new state-specific callbacks */ 118 switch (state) { 119 case CLOCK_EVT_STATE_DETACHED: 120 /* The clockevent device is getting replaced. Shut it down. */ 121 122 case CLOCK_EVT_STATE_SHUTDOWN: 123 return dev->set_state_shutdown(dev); 124 125 case CLOCK_EVT_STATE_PERIODIC: 126 /* Core internal bug */ 127 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 128 return -ENOSYS; 129 return dev->set_state_periodic(dev); 130 131 case CLOCK_EVT_STATE_ONESHOT: 132 /* Core internal bug */ 133 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 134 return -ENOSYS; 135 return dev->set_state_oneshot(dev); 136 137 case CLOCK_EVT_STATE_ONESHOT_STOPPED: 138 /* Core internal bug */ 139 if (WARN_ONCE(!clockevent_state_oneshot(dev), 140 "Current state: %d\n", 141 clockevent_get_state(dev))) 142 return -EINVAL; 143 144 if (dev->set_state_oneshot_stopped) 145 return dev->set_state_oneshot_stopped(dev); 146 else 147 return -ENOSYS; 148 149 default: 150 return -ENOSYS; 151 } 152 } 153 154 /** 155 * clockevents_switch_state - set the operating state of a clock event device 156 * @dev: device to modify 157 * @state: new state 158 * 159 * Must be called with interrupts disabled ! 160 */ 161 void clockevents_switch_state(struct clock_event_device *dev, 162 enum clock_event_state state) 163 { 164 if (clockevent_get_state(dev) != state) { 165 if (__clockevents_switch_state(dev, state)) 166 return; 167 168 clockevent_set_state(dev, state); 169 170 /* 171 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 172 * on it, so fix it up and emit a warning: 173 */ 174 if (clockevent_state_oneshot(dev)) { 175 if (unlikely(!dev->mult)) { 176 dev->mult = 1; 177 WARN_ON(1); 178 } 179 } 180 } 181 } 182 183 /** 184 * clockevents_shutdown - shutdown the device and clear next_event 185 * @dev: device to shutdown 186 */ 187 void clockevents_shutdown(struct clock_event_device *dev) 188 { 189 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 190 dev->next_event.tv64 = KTIME_MAX; 191 } 192 193 /** 194 * clockevents_tick_resume - Resume the tick device before using it again 195 * @dev: device to resume 196 */ 197 int clockevents_tick_resume(struct clock_event_device *dev) 198 { 199 int ret = 0; 200 201 if (dev->set_mode) { 202 dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); 203 dev->mode = CLOCK_EVT_MODE_RESUME; 204 } else if (dev->tick_resume) { 205 ret = dev->tick_resume(dev); 206 } 207 208 return ret; 209 } 210 211 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 212 213 /* Limit min_delta to a jiffie */ 214 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 215 216 /** 217 * clockevents_increase_min_delta - raise minimum delta of a clock event device 218 * @dev: device to increase the minimum delta 219 * 220 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 221 */ 222 static int clockevents_increase_min_delta(struct clock_event_device *dev) 223 { 224 /* Nothing to do if we already reached the limit */ 225 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 226 printk_deferred(KERN_WARNING 227 "CE: Reprogramming failure. Giving up\n"); 228 dev->next_event.tv64 = KTIME_MAX; 229 return -ETIME; 230 } 231 232 if (dev->min_delta_ns < 5000) 233 dev->min_delta_ns = 5000; 234 else 235 dev->min_delta_ns += dev->min_delta_ns >> 1; 236 237 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 238 dev->min_delta_ns = MIN_DELTA_LIMIT; 239 240 printk_deferred(KERN_WARNING 241 "CE: %s increased min_delta_ns to %llu nsec\n", 242 dev->name ? dev->name : "?", 243 (unsigned long long) dev->min_delta_ns); 244 return 0; 245 } 246 247 /** 248 * clockevents_program_min_delta - Set clock event device to the minimum delay. 249 * @dev: device to program 250 * 251 * Returns 0 on success, -ETIME when the retry loop failed. 252 */ 253 static int clockevents_program_min_delta(struct clock_event_device *dev) 254 { 255 unsigned long long clc; 256 int64_t delta; 257 int i; 258 259 for (i = 0;;) { 260 delta = dev->min_delta_ns; 261 dev->next_event = ktime_add_ns(ktime_get(), delta); 262 263 if (clockevent_state_shutdown(dev)) 264 return 0; 265 266 dev->retries++; 267 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 268 if (dev->set_next_event((unsigned long) clc, dev) == 0) 269 return 0; 270 271 if (++i > 2) { 272 /* 273 * We tried 3 times to program the device with the 274 * given min_delta_ns. Try to increase the minimum 275 * delta, if that fails as well get out of here. 276 */ 277 if (clockevents_increase_min_delta(dev)) 278 return -ETIME; 279 i = 0; 280 } 281 } 282 } 283 284 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 285 286 /** 287 * clockevents_program_min_delta - Set clock event device to the minimum delay. 288 * @dev: device to program 289 * 290 * Returns 0 on success, -ETIME when the retry loop failed. 291 */ 292 static int clockevents_program_min_delta(struct clock_event_device *dev) 293 { 294 unsigned long long clc; 295 int64_t delta; 296 297 delta = dev->min_delta_ns; 298 dev->next_event = ktime_add_ns(ktime_get(), delta); 299 300 if (clockevent_state_shutdown(dev)) 301 return 0; 302 303 dev->retries++; 304 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 305 return dev->set_next_event((unsigned long) clc, dev); 306 } 307 308 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 309 310 /** 311 * clockevents_program_event - Reprogram the clock event device. 312 * @dev: device to program 313 * @expires: absolute expiry time (monotonic clock) 314 * @force: program minimum delay if expires can not be set 315 * 316 * Returns 0 on success, -ETIME when the event is in the past. 317 */ 318 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 319 bool force) 320 { 321 unsigned long long clc; 322 int64_t delta; 323 int rc; 324 325 if (unlikely(expires.tv64 < 0)) { 326 WARN_ON_ONCE(1); 327 return -ETIME; 328 } 329 330 dev->next_event = expires; 331 332 if (clockevent_state_shutdown(dev)) 333 return 0; 334 335 /* We must be in ONESHOT state here */ 336 WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n", 337 clockevent_get_state(dev)); 338 339 /* Shortcut for clockevent devices that can deal with ktime. */ 340 if (dev->features & CLOCK_EVT_FEAT_KTIME) 341 return dev->set_next_ktime(expires, dev); 342 343 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 344 if (delta <= 0) 345 return force ? clockevents_program_min_delta(dev) : -ETIME; 346 347 delta = min(delta, (int64_t) dev->max_delta_ns); 348 delta = max(delta, (int64_t) dev->min_delta_ns); 349 350 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 351 rc = dev->set_next_event((unsigned long) clc, dev); 352 353 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 354 } 355 356 /* 357 * Called after a notify add to make devices available which were 358 * released from the notifier call. 359 */ 360 static void clockevents_notify_released(void) 361 { 362 struct clock_event_device *dev; 363 364 while (!list_empty(&clockevents_released)) { 365 dev = list_entry(clockevents_released.next, 366 struct clock_event_device, list); 367 list_del(&dev->list); 368 list_add(&dev->list, &clockevent_devices); 369 tick_check_new_device(dev); 370 } 371 } 372 373 /* 374 * Try to install a replacement clock event device 375 */ 376 static int clockevents_replace(struct clock_event_device *ced) 377 { 378 struct clock_event_device *dev, *newdev = NULL; 379 380 list_for_each_entry(dev, &clockevent_devices, list) { 381 if (dev == ced || !clockevent_state_detached(dev)) 382 continue; 383 384 if (!tick_check_replacement(newdev, dev)) 385 continue; 386 387 if (!try_module_get(dev->owner)) 388 continue; 389 390 if (newdev) 391 module_put(newdev->owner); 392 newdev = dev; 393 } 394 if (newdev) { 395 tick_install_replacement(newdev); 396 list_del_init(&ced->list); 397 } 398 return newdev ? 0 : -EBUSY; 399 } 400 401 /* 402 * Called with clockevents_mutex and clockevents_lock held 403 */ 404 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 405 { 406 /* Fast track. Device is unused */ 407 if (clockevent_state_detached(ced)) { 408 list_del_init(&ced->list); 409 return 0; 410 } 411 412 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 413 } 414 415 /* 416 * SMP function call to unbind a device 417 */ 418 static void __clockevents_unbind(void *arg) 419 { 420 struct ce_unbind *cu = arg; 421 int res; 422 423 raw_spin_lock(&clockevents_lock); 424 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 425 if (res == -EAGAIN) 426 res = clockevents_replace(cu->ce); 427 cu->res = res; 428 raw_spin_unlock(&clockevents_lock); 429 } 430 431 /* 432 * Issues smp function call to unbind a per cpu device. Called with 433 * clockevents_mutex held. 434 */ 435 static int clockevents_unbind(struct clock_event_device *ced, int cpu) 436 { 437 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 438 439 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 440 return cu.res; 441 } 442 443 /* 444 * Unbind a clockevents device. 445 */ 446 int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 447 { 448 int ret; 449 450 mutex_lock(&clockevents_mutex); 451 ret = clockevents_unbind(ced, cpu); 452 mutex_unlock(&clockevents_mutex); 453 return ret; 454 } 455 EXPORT_SYMBOL_GPL(clockevents_unbind_device); 456 457 /* Sanity check of state transition callbacks */ 458 static int clockevents_sanity_check(struct clock_event_device *dev) 459 { 460 /* Legacy set_mode() callback */ 461 if (dev->set_mode) { 462 /* We shouldn't be supporting new modes now */ 463 WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || 464 dev->set_state_shutdown || dev->tick_resume || 465 dev->set_state_oneshot_stopped); 466 467 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 468 return 0; 469 } 470 471 if (dev->features & CLOCK_EVT_FEAT_DUMMY) 472 return 0; 473 474 /* New state-specific callbacks */ 475 if (!dev->set_state_shutdown) 476 return -EINVAL; 477 478 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && 479 !dev->set_state_periodic) 480 return -EINVAL; 481 482 if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && 483 !dev->set_state_oneshot) 484 return -EINVAL; 485 486 return 0; 487 } 488 489 /** 490 * clockevents_register_device - register a clock event device 491 * @dev: device to register 492 */ 493 void clockevents_register_device(struct clock_event_device *dev) 494 { 495 unsigned long flags; 496 497 BUG_ON(clockevents_sanity_check(dev)); 498 499 /* Initialize state to DETACHED */ 500 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED); 501 502 if (!dev->cpumask) { 503 WARN_ON(num_possible_cpus() > 1); 504 dev->cpumask = cpumask_of(smp_processor_id()); 505 } 506 507 raw_spin_lock_irqsave(&clockevents_lock, flags); 508 509 list_add(&dev->list, &clockevent_devices); 510 tick_check_new_device(dev); 511 clockevents_notify_released(); 512 513 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 514 } 515 EXPORT_SYMBOL_GPL(clockevents_register_device); 516 517 void clockevents_config(struct clock_event_device *dev, u32 freq) 518 { 519 u64 sec; 520 521 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 522 return; 523 524 /* 525 * Calculate the maximum number of seconds we can sleep. Limit 526 * to 10 minutes for hardware which can program more than 527 * 32bit ticks so we still get reasonable conversion values. 528 */ 529 sec = dev->max_delta_ticks; 530 do_div(sec, freq); 531 if (!sec) 532 sec = 1; 533 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 534 sec = 600; 535 536 clockevents_calc_mult_shift(dev, freq, sec); 537 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 538 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 539 } 540 541 /** 542 * clockevents_config_and_register - Configure and register a clock event device 543 * @dev: device to register 544 * @freq: The clock frequency 545 * @min_delta: The minimum clock ticks to program in oneshot mode 546 * @max_delta: The maximum clock ticks to program in oneshot mode 547 * 548 * min/max_delta can be 0 for devices which do not support oneshot mode. 549 */ 550 void clockevents_config_and_register(struct clock_event_device *dev, 551 u32 freq, unsigned long min_delta, 552 unsigned long max_delta) 553 { 554 dev->min_delta_ticks = min_delta; 555 dev->max_delta_ticks = max_delta; 556 clockevents_config(dev, freq); 557 clockevents_register_device(dev); 558 } 559 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 560 561 int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) 562 { 563 clockevents_config(dev, freq); 564 565 if (clockevent_state_oneshot(dev)) 566 return clockevents_program_event(dev, dev->next_event, false); 567 568 if (clockevent_state_periodic(dev)) 569 return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); 570 571 return 0; 572 } 573 574 /** 575 * clockevents_update_freq - Update frequency and reprogram a clock event device. 576 * @dev: device to modify 577 * @freq: new device frequency 578 * 579 * Reconfigure and reprogram a clock event device in oneshot 580 * mode. Must be called on the cpu for which the device delivers per 581 * cpu timer events. If called for the broadcast device the core takes 582 * care of serialization. 583 * 584 * Returns 0 on success, -ETIME when the event is in the past. 585 */ 586 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 587 { 588 unsigned long flags; 589 int ret; 590 591 local_irq_save(flags); 592 ret = tick_broadcast_update_freq(dev, freq); 593 if (ret == -ENODEV) 594 ret = __clockevents_update_freq(dev, freq); 595 local_irq_restore(flags); 596 return ret; 597 } 598 599 /* 600 * Noop handler when we shut down an event device 601 */ 602 void clockevents_handle_noop(struct clock_event_device *dev) 603 { 604 } 605 606 /** 607 * clockevents_exchange_device - release and request clock devices 608 * @old: device to release (can be NULL) 609 * @new: device to request (can be NULL) 610 * 611 * Called from various tick functions with clockevents_lock held and 612 * interrupts disabled. 613 */ 614 void clockevents_exchange_device(struct clock_event_device *old, 615 struct clock_event_device *new) 616 { 617 /* 618 * Caller releases a clock event device. We queue it into the 619 * released list and do a notify add later. 620 */ 621 if (old) { 622 module_put(old->owner); 623 clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED); 624 list_del(&old->list); 625 list_add(&old->list, &clockevents_released); 626 } 627 628 if (new) { 629 BUG_ON(!clockevent_state_detached(new)); 630 clockevents_shutdown(new); 631 } 632 } 633 634 /** 635 * clockevents_suspend - suspend clock devices 636 */ 637 void clockevents_suspend(void) 638 { 639 struct clock_event_device *dev; 640 641 list_for_each_entry_reverse(dev, &clockevent_devices, list) 642 if (dev->suspend && !clockevent_state_detached(dev)) 643 dev->suspend(dev); 644 } 645 646 /** 647 * clockevents_resume - resume clock devices 648 */ 649 void clockevents_resume(void) 650 { 651 struct clock_event_device *dev; 652 653 list_for_each_entry(dev, &clockevent_devices, list) 654 if (dev->resume && !clockevent_state_detached(dev)) 655 dev->resume(dev); 656 } 657 658 #ifdef CONFIG_HOTPLUG_CPU 659 /** 660 * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu 661 */ 662 void tick_cleanup_dead_cpu(int cpu) 663 { 664 struct clock_event_device *dev, *tmp; 665 unsigned long flags; 666 667 raw_spin_lock_irqsave(&clockevents_lock, flags); 668 669 tick_shutdown_broadcast_oneshot(cpu); 670 tick_shutdown_broadcast(cpu); 671 tick_shutdown(cpu); 672 /* 673 * Unregister the clock event devices which were 674 * released from the users in the notify chain. 675 */ 676 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 677 list_del(&dev->list); 678 /* 679 * Now check whether the CPU has left unused per cpu devices 680 */ 681 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 682 if (cpumask_test_cpu(cpu, dev->cpumask) && 683 cpumask_weight(dev->cpumask) == 1 && 684 !tick_is_broadcast_device(dev)) { 685 BUG_ON(!clockevent_state_detached(dev)); 686 list_del(&dev->list); 687 } 688 } 689 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 690 } 691 #endif 692 693 #ifdef CONFIG_SYSFS 694 struct bus_type clockevents_subsys = { 695 .name = "clockevents", 696 .dev_name = "clockevent", 697 }; 698 699 static DEFINE_PER_CPU(struct device, tick_percpu_dev); 700 static struct tick_device *tick_get_tick_dev(struct device *dev); 701 702 static ssize_t sysfs_show_current_tick_dev(struct device *dev, 703 struct device_attribute *attr, 704 char *buf) 705 { 706 struct tick_device *td; 707 ssize_t count = 0; 708 709 raw_spin_lock_irq(&clockevents_lock); 710 td = tick_get_tick_dev(dev); 711 if (td && td->evtdev) 712 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 713 raw_spin_unlock_irq(&clockevents_lock); 714 return count; 715 } 716 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 717 718 /* We don't support the abomination of removable broadcast devices */ 719 static ssize_t sysfs_unbind_tick_dev(struct device *dev, 720 struct device_attribute *attr, 721 const char *buf, size_t count) 722 { 723 char name[CS_NAME_LEN]; 724 ssize_t ret = sysfs_get_uname(buf, name, count); 725 struct clock_event_device *ce; 726 727 if (ret < 0) 728 return ret; 729 730 ret = -ENODEV; 731 mutex_lock(&clockevents_mutex); 732 raw_spin_lock_irq(&clockevents_lock); 733 list_for_each_entry(ce, &clockevent_devices, list) { 734 if (!strcmp(ce->name, name)) { 735 ret = __clockevents_try_unbind(ce, dev->id); 736 break; 737 } 738 } 739 raw_spin_unlock_irq(&clockevents_lock); 740 /* 741 * We hold clockevents_mutex, so ce can't go away 742 */ 743 if (ret == -EAGAIN) 744 ret = clockevents_unbind(ce, dev->id); 745 mutex_unlock(&clockevents_mutex); 746 return ret ? ret : count; 747 } 748 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 749 750 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 751 static struct device tick_bc_dev = { 752 .init_name = "broadcast", 753 .id = 0, 754 .bus = &clockevents_subsys, 755 }; 756 757 static struct tick_device *tick_get_tick_dev(struct device *dev) 758 { 759 return dev == &tick_bc_dev ? tick_get_broadcast_device() : 760 &per_cpu(tick_cpu_device, dev->id); 761 } 762 763 static __init int tick_broadcast_init_sysfs(void) 764 { 765 int err = device_register(&tick_bc_dev); 766 767 if (!err) 768 err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 769 return err; 770 } 771 #else 772 static struct tick_device *tick_get_tick_dev(struct device *dev) 773 { 774 return &per_cpu(tick_cpu_device, dev->id); 775 } 776 static inline int tick_broadcast_init_sysfs(void) { return 0; } 777 #endif 778 779 static int __init tick_init_sysfs(void) 780 { 781 int cpu; 782 783 for_each_possible_cpu(cpu) { 784 struct device *dev = &per_cpu(tick_percpu_dev, cpu); 785 int err; 786 787 dev->id = cpu; 788 dev->bus = &clockevents_subsys; 789 err = device_register(dev); 790 if (!err) 791 err = device_create_file(dev, &dev_attr_current_device); 792 if (!err) 793 err = device_create_file(dev, &dev_attr_unbind_device); 794 if (err) 795 return err; 796 } 797 return tick_broadcast_init_sysfs(); 798 } 799 800 static int __init clockevents_init_sysfs(void) 801 { 802 int err = subsys_system_register(&clockevents_subsys, NULL); 803 804 if (!err) 805 err = tick_init_sysfs(); 806 return err; 807 } 808 device_initcall(clockevents_init_sysfs); 809 #endif /* SYSFS */ 810