1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/smp.h> 19 #include <linux/device.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 /* Protection for the above */ 27 static DEFINE_RAW_SPINLOCK(clockevents_lock); 28 /* Protection for unbind operations */ 29 static DEFINE_MUTEX(clockevents_mutex); 30 31 struct ce_unbind { 32 struct clock_event_device *ce; 33 int res; 34 }; 35 36 static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 37 bool ismax) 38 { 39 u64 clc = (u64) latch << evt->shift; 40 u64 rnd; 41 42 if (unlikely(!evt->mult)) { 43 evt->mult = 1; 44 WARN_ON(1); 45 } 46 rnd = (u64) evt->mult - 1; 47 48 /* 49 * Upper bound sanity check. If the backwards conversion is 50 * not equal latch, we know that the above shift overflowed. 51 */ 52 if ((clc >> evt->shift) != (u64)latch) 53 clc = ~0ULL; 54 55 /* 56 * Scaled math oddities: 57 * 58 * For mult <= (1 << shift) we can safely add mult - 1 to 59 * prevent integer rounding loss. So the backwards conversion 60 * from nsec to device ticks will be correct. 61 * 62 * For mult > (1 << shift), i.e. device frequency is > 1GHz we 63 * need to be careful. Adding mult - 1 will result in a value 64 * which when converted back to device ticks can be larger 65 * than latch by up to (mult - 1) >> shift. For the min_delta 66 * calculation we still want to apply this in order to stay 67 * above the minimum device ticks limit. For the upper limit 68 * we would end up with a latch value larger than the upper 69 * limit of the device, so we omit the add to stay below the 70 * device upper boundary. 71 * 72 * Also omit the add if it would overflow the u64 boundary. 73 */ 74 if ((~0ULL - clc > rnd) && 75 (!ismax || evt->mult <= (1U << evt->shift))) 76 clc += rnd; 77 78 do_div(clc, evt->mult); 79 80 /* Deltas less than 1usec are pointless noise */ 81 return clc > 1000 ? clc : 1000; 82 } 83 84 /** 85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86 * @latch: value to convert 87 * @evt: pointer to clock event device descriptor 88 * 89 * Math helper, returns latch value converted to nanoseconds (bound checked) 90 */ 91 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92 { 93 return cev_delta2ns(latch, evt, false); 94 } 95 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96 97 /** 98 * clockevents_set_mode - set the operating mode of a clock event device 99 * @dev: device to modify 100 * @mode: new mode 101 * 102 * Must be called with interrupts disabled ! 103 */ 104 void clockevents_set_mode(struct clock_event_device *dev, 105 enum clock_event_mode mode) 106 { 107 if (dev->mode != mode) { 108 dev->set_mode(mode, dev); 109 dev->mode = mode; 110 111 /* 112 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 113 * on it, so fix it up and emit a warning: 114 */ 115 if (mode == CLOCK_EVT_MODE_ONESHOT) { 116 if (unlikely(!dev->mult)) { 117 dev->mult = 1; 118 WARN_ON(1); 119 } 120 } 121 } 122 } 123 124 /** 125 * clockevents_shutdown - shutdown the device and clear next_event 126 * @dev: device to shutdown 127 */ 128 void clockevents_shutdown(struct clock_event_device *dev) 129 { 130 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 131 dev->next_event.tv64 = KTIME_MAX; 132 } 133 134 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 135 136 /* Limit min_delta to a jiffie */ 137 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 138 139 /** 140 * clockevents_increase_min_delta - raise minimum delta of a clock event device 141 * @dev: device to increase the minimum delta 142 * 143 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 144 */ 145 static int clockevents_increase_min_delta(struct clock_event_device *dev) 146 { 147 /* Nothing to do if we already reached the limit */ 148 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 149 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); 150 dev->next_event.tv64 = KTIME_MAX; 151 return -ETIME; 152 } 153 154 if (dev->min_delta_ns < 5000) 155 dev->min_delta_ns = 5000; 156 else 157 dev->min_delta_ns += dev->min_delta_ns >> 1; 158 159 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 160 dev->min_delta_ns = MIN_DELTA_LIMIT; 161 162 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", 163 dev->name ? dev->name : "?", 164 (unsigned long long) dev->min_delta_ns); 165 return 0; 166 } 167 168 /** 169 * clockevents_program_min_delta - Set clock event device to the minimum delay. 170 * @dev: device to program 171 * 172 * Returns 0 on success, -ETIME when the retry loop failed. 173 */ 174 static int clockevents_program_min_delta(struct clock_event_device *dev) 175 { 176 unsigned long long clc; 177 int64_t delta; 178 int i; 179 180 for (i = 0;;) { 181 delta = dev->min_delta_ns; 182 dev->next_event = ktime_add_ns(ktime_get(), delta); 183 184 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 185 return 0; 186 187 dev->retries++; 188 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 189 if (dev->set_next_event((unsigned long) clc, dev) == 0) 190 return 0; 191 192 if (++i > 2) { 193 /* 194 * We tried 3 times to program the device with the 195 * given min_delta_ns. Try to increase the minimum 196 * delta, if that fails as well get out of here. 197 */ 198 if (clockevents_increase_min_delta(dev)) 199 return -ETIME; 200 i = 0; 201 } 202 } 203 } 204 205 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 206 207 /** 208 * clockevents_program_min_delta - Set clock event device to the minimum delay. 209 * @dev: device to program 210 * 211 * Returns 0 on success, -ETIME when the retry loop failed. 212 */ 213 static int clockevents_program_min_delta(struct clock_event_device *dev) 214 { 215 unsigned long long clc; 216 int64_t delta; 217 218 delta = dev->min_delta_ns; 219 dev->next_event = ktime_add_ns(ktime_get(), delta); 220 221 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 222 return 0; 223 224 dev->retries++; 225 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 226 return dev->set_next_event((unsigned long) clc, dev); 227 } 228 229 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 230 231 /** 232 * clockevents_program_event - Reprogram the clock event device. 233 * @dev: device to program 234 * @expires: absolute expiry time (monotonic clock) 235 * @force: program minimum delay if expires can not be set 236 * 237 * Returns 0 on success, -ETIME when the event is in the past. 238 */ 239 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 240 bool force) 241 { 242 unsigned long long clc; 243 int64_t delta; 244 int rc; 245 246 if (unlikely(expires.tv64 < 0)) { 247 WARN_ON_ONCE(1); 248 return -ETIME; 249 } 250 251 dev->next_event = expires; 252 253 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 254 return 0; 255 256 /* Shortcut for clockevent devices that can deal with ktime. */ 257 if (dev->features & CLOCK_EVT_FEAT_KTIME) 258 return dev->set_next_ktime(expires, dev); 259 260 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 261 if (delta <= 0) 262 return force ? clockevents_program_min_delta(dev) : -ETIME; 263 264 delta = min(delta, (int64_t) dev->max_delta_ns); 265 delta = max(delta, (int64_t) dev->min_delta_ns); 266 267 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 268 rc = dev->set_next_event((unsigned long) clc, dev); 269 270 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 271 } 272 273 /* 274 * Called after a notify add to make devices available which were 275 * released from the notifier call. 276 */ 277 static void clockevents_notify_released(void) 278 { 279 struct clock_event_device *dev; 280 281 while (!list_empty(&clockevents_released)) { 282 dev = list_entry(clockevents_released.next, 283 struct clock_event_device, list); 284 list_del(&dev->list); 285 list_add(&dev->list, &clockevent_devices); 286 tick_check_new_device(dev); 287 } 288 } 289 290 /* 291 * Try to install a replacement clock event device 292 */ 293 static int clockevents_replace(struct clock_event_device *ced) 294 { 295 struct clock_event_device *dev, *newdev = NULL; 296 297 list_for_each_entry(dev, &clockevent_devices, list) { 298 if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) 299 continue; 300 301 if (!tick_check_replacement(newdev, dev)) 302 continue; 303 304 if (!try_module_get(dev->owner)) 305 continue; 306 307 if (newdev) 308 module_put(newdev->owner); 309 newdev = dev; 310 } 311 if (newdev) { 312 tick_install_replacement(newdev); 313 list_del_init(&ced->list); 314 } 315 return newdev ? 0 : -EBUSY; 316 } 317 318 /* 319 * Called with clockevents_mutex and clockevents_lock held 320 */ 321 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 322 { 323 /* Fast track. Device is unused */ 324 if (ced->mode == CLOCK_EVT_MODE_UNUSED) { 325 list_del_init(&ced->list); 326 return 0; 327 } 328 329 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 330 } 331 332 /* 333 * SMP function call to unbind a device 334 */ 335 static void __clockevents_unbind(void *arg) 336 { 337 struct ce_unbind *cu = arg; 338 int res; 339 340 raw_spin_lock(&clockevents_lock); 341 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 342 if (res == -EAGAIN) 343 res = clockevents_replace(cu->ce); 344 cu->res = res; 345 raw_spin_unlock(&clockevents_lock); 346 } 347 348 /* 349 * Issues smp function call to unbind a per cpu device. Called with 350 * clockevents_mutex held. 351 */ 352 static int clockevents_unbind(struct clock_event_device *ced, int cpu) 353 { 354 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 355 356 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 357 return cu.res; 358 } 359 360 /* 361 * Unbind a clockevents device. 362 */ 363 int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 364 { 365 int ret; 366 367 mutex_lock(&clockevents_mutex); 368 ret = clockevents_unbind(ced, cpu); 369 mutex_unlock(&clockevents_mutex); 370 return ret; 371 } 372 EXPORT_SYMBOL_GPL(clockevents_unbind); 373 374 /** 375 * clockevents_register_device - register a clock event device 376 * @dev: device to register 377 */ 378 void clockevents_register_device(struct clock_event_device *dev) 379 { 380 unsigned long flags; 381 382 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 383 if (!dev->cpumask) { 384 WARN_ON(num_possible_cpus() > 1); 385 dev->cpumask = cpumask_of(smp_processor_id()); 386 } 387 388 raw_spin_lock_irqsave(&clockevents_lock, flags); 389 390 list_add(&dev->list, &clockevent_devices); 391 tick_check_new_device(dev); 392 clockevents_notify_released(); 393 394 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 395 } 396 EXPORT_SYMBOL_GPL(clockevents_register_device); 397 398 void clockevents_config(struct clock_event_device *dev, u32 freq) 399 { 400 u64 sec; 401 402 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 403 return; 404 405 /* 406 * Calculate the maximum number of seconds we can sleep. Limit 407 * to 10 minutes for hardware which can program more than 408 * 32bit ticks so we still get reasonable conversion values. 409 */ 410 sec = dev->max_delta_ticks; 411 do_div(sec, freq); 412 if (!sec) 413 sec = 1; 414 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 415 sec = 600; 416 417 clockevents_calc_mult_shift(dev, freq, sec); 418 dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 419 dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 420 } 421 422 /** 423 * clockevents_config_and_register - Configure and register a clock event device 424 * @dev: device to register 425 * @freq: The clock frequency 426 * @min_delta: The minimum clock ticks to program in oneshot mode 427 * @max_delta: The maximum clock ticks to program in oneshot mode 428 * 429 * min/max_delta can be 0 for devices which do not support oneshot mode. 430 */ 431 void clockevents_config_and_register(struct clock_event_device *dev, 432 u32 freq, unsigned long min_delta, 433 unsigned long max_delta) 434 { 435 dev->min_delta_ticks = min_delta; 436 dev->max_delta_ticks = max_delta; 437 clockevents_config(dev, freq); 438 clockevents_register_device(dev); 439 } 440 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 441 442 /** 443 * clockevents_update_freq - Update frequency and reprogram a clock event device. 444 * @dev: device to modify 445 * @freq: new device frequency 446 * 447 * Reconfigure and reprogram a clock event device in oneshot 448 * mode. Must be called on the cpu for which the device delivers per 449 * cpu timer events with interrupts disabled! Returns 0 on success, 450 * -ETIME when the event is in the past. 451 */ 452 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 453 { 454 clockevents_config(dev, freq); 455 456 if (dev->mode != CLOCK_EVT_MODE_ONESHOT) 457 return 0; 458 459 return clockevents_program_event(dev, dev->next_event, false); 460 } 461 462 /* 463 * Noop handler when we shut down an event device 464 */ 465 void clockevents_handle_noop(struct clock_event_device *dev) 466 { 467 } 468 469 /** 470 * clockevents_exchange_device - release and request clock devices 471 * @old: device to release (can be NULL) 472 * @new: device to request (can be NULL) 473 * 474 * Called from the notifier chain. clockevents_lock is held already 475 */ 476 void clockevents_exchange_device(struct clock_event_device *old, 477 struct clock_event_device *new) 478 { 479 unsigned long flags; 480 481 local_irq_save(flags); 482 /* 483 * Caller releases a clock event device. We queue it into the 484 * released list and do a notify add later. 485 */ 486 if (old) { 487 module_put(old->owner); 488 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 489 list_del(&old->list); 490 list_add(&old->list, &clockevents_released); 491 } 492 493 if (new) { 494 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 495 clockevents_shutdown(new); 496 } 497 local_irq_restore(flags); 498 } 499 500 /** 501 * clockevents_suspend - suspend clock devices 502 */ 503 void clockevents_suspend(void) 504 { 505 struct clock_event_device *dev; 506 507 list_for_each_entry_reverse(dev, &clockevent_devices, list) 508 if (dev->suspend) 509 dev->suspend(dev); 510 } 511 512 /** 513 * clockevents_resume - resume clock devices 514 */ 515 void clockevents_resume(void) 516 { 517 struct clock_event_device *dev; 518 519 list_for_each_entry(dev, &clockevent_devices, list) 520 if (dev->resume) 521 dev->resume(dev); 522 } 523 524 #ifdef CONFIG_GENERIC_CLOCKEVENTS 525 /** 526 * clockevents_notify - notification about relevant events 527 */ 528 void clockevents_notify(unsigned long reason, void *arg) 529 { 530 struct clock_event_device *dev, *tmp; 531 unsigned long flags; 532 int cpu; 533 534 raw_spin_lock_irqsave(&clockevents_lock, flags); 535 536 switch (reason) { 537 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 538 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 539 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 540 tick_broadcast_on_off(reason, arg); 541 break; 542 543 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: 544 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: 545 tick_broadcast_oneshot_control(reason); 546 break; 547 548 case CLOCK_EVT_NOTIFY_CPU_DYING: 549 tick_handover_do_timer(arg); 550 break; 551 552 case CLOCK_EVT_NOTIFY_SUSPEND: 553 tick_suspend(); 554 tick_suspend_broadcast(); 555 break; 556 557 case CLOCK_EVT_NOTIFY_RESUME: 558 tick_resume(); 559 break; 560 561 case CLOCK_EVT_NOTIFY_CPU_DEAD: 562 tick_shutdown_broadcast_oneshot(arg); 563 tick_shutdown_broadcast(arg); 564 tick_shutdown(arg); 565 /* 566 * Unregister the clock event devices which were 567 * released from the users in the notify chain. 568 */ 569 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 570 list_del(&dev->list); 571 /* 572 * Now check whether the CPU has left unused per cpu devices 573 */ 574 cpu = *((int *)arg); 575 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 576 if (cpumask_test_cpu(cpu, dev->cpumask) && 577 cpumask_weight(dev->cpumask) == 1 && 578 !tick_is_broadcast_device(dev)) { 579 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 580 list_del(&dev->list); 581 } 582 } 583 break; 584 default: 585 break; 586 } 587 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 588 } 589 EXPORT_SYMBOL_GPL(clockevents_notify); 590 591 #ifdef CONFIG_SYSFS 592 struct bus_type clockevents_subsys = { 593 .name = "clockevents", 594 .dev_name = "clockevent", 595 }; 596 597 static DEFINE_PER_CPU(struct device, tick_percpu_dev); 598 static struct tick_device *tick_get_tick_dev(struct device *dev); 599 600 static ssize_t sysfs_show_current_tick_dev(struct device *dev, 601 struct device_attribute *attr, 602 char *buf) 603 { 604 struct tick_device *td; 605 ssize_t count = 0; 606 607 raw_spin_lock_irq(&clockevents_lock); 608 td = tick_get_tick_dev(dev); 609 if (td && td->evtdev) 610 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 611 raw_spin_unlock_irq(&clockevents_lock); 612 return count; 613 } 614 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 615 616 /* We don't support the abomination of removable broadcast devices */ 617 static ssize_t sysfs_unbind_tick_dev(struct device *dev, 618 struct device_attribute *attr, 619 const char *buf, size_t count) 620 { 621 char name[CS_NAME_LEN]; 622 size_t ret = sysfs_get_uname(buf, name, count); 623 struct clock_event_device *ce; 624 625 if (ret < 0) 626 return ret; 627 628 ret = -ENODEV; 629 mutex_lock(&clockevents_mutex); 630 raw_spin_lock_irq(&clockevents_lock); 631 list_for_each_entry(ce, &clockevent_devices, list) { 632 if (!strcmp(ce->name, name)) { 633 ret = __clockevents_try_unbind(ce, dev->id); 634 break; 635 } 636 } 637 raw_spin_unlock_irq(&clockevents_lock); 638 /* 639 * We hold clockevents_mutex, so ce can't go away 640 */ 641 if (ret == -EAGAIN) 642 ret = clockevents_unbind(ce, dev->id); 643 mutex_unlock(&clockevents_mutex); 644 return ret ? ret : count; 645 } 646 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 647 648 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 649 static struct device tick_bc_dev = { 650 .init_name = "broadcast", 651 .id = 0, 652 .bus = &clockevents_subsys, 653 }; 654 655 static struct tick_device *tick_get_tick_dev(struct device *dev) 656 { 657 return dev == &tick_bc_dev ? tick_get_broadcast_device() : 658 &per_cpu(tick_cpu_device, dev->id); 659 } 660 661 static __init int tick_broadcast_init_sysfs(void) 662 { 663 int err = device_register(&tick_bc_dev); 664 665 if (!err) 666 err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 667 return err; 668 } 669 #else 670 static struct tick_device *tick_get_tick_dev(struct device *dev) 671 { 672 return &per_cpu(tick_cpu_device, dev->id); 673 } 674 static inline int tick_broadcast_init_sysfs(void) { return 0; } 675 #endif 676 677 static int __init tick_init_sysfs(void) 678 { 679 int cpu; 680 681 for_each_possible_cpu(cpu) { 682 struct device *dev = &per_cpu(tick_percpu_dev, cpu); 683 int err; 684 685 dev->id = cpu; 686 dev->bus = &clockevents_subsys; 687 err = device_register(dev); 688 if (!err) 689 err = device_create_file(dev, &dev_attr_current_device); 690 if (!err) 691 err = device_create_file(dev, &dev_attr_unbind_device); 692 if (err) 693 return err; 694 } 695 return tick_broadcast_init_sysfs(); 696 } 697 698 static int __init clockevents_init_sysfs(void) 699 { 700 int err = subsys_system_register(&clockevents_subsys, NULL); 701 702 if (!err) 703 err = tick_init_sysfs(); 704 return err; 705 } 706 device_initcall(clockevents_init_sysfs); 707 #endif /* SYSFS */ 708 709 #endif /* GENERIC_CLOCK_EVENTS */ 710