1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/smp.h> 19 #include <linux/device.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 /* Protection for the above */ 27 static DEFINE_RAW_SPINLOCK(clockevents_lock); 28 /* Protection for unbind operations */ 29 static DEFINE_MUTEX(clockevents_mutex); 30 31 struct ce_unbind { 32 struct clock_event_device *ce; 33 int res; 34 }; 35 36 /** 37 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 38 * @latch: value to convert 39 * @evt: pointer to clock event device descriptor 40 * 41 * Math helper, returns latch value converted to nanoseconds (bound checked) 42 */ 43 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 44 { 45 u64 clc = (u64) latch << evt->shift; 46 47 if (unlikely(!evt->mult)) { 48 evt->mult = 1; 49 WARN_ON(1); 50 } 51 52 do_div(clc, evt->mult); 53 if (clc < 1000) 54 clc = 1000; 55 if (clc > KTIME_MAX) 56 clc = KTIME_MAX; 57 58 return clc; 59 } 60 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 61 62 /** 63 * clockevents_set_mode - set the operating mode of a clock event device 64 * @dev: device to modify 65 * @mode: new mode 66 * 67 * Must be called with interrupts disabled ! 68 */ 69 void clockevents_set_mode(struct clock_event_device *dev, 70 enum clock_event_mode mode) 71 { 72 if (dev->mode != mode) { 73 dev->set_mode(mode, dev); 74 dev->mode = mode; 75 76 /* 77 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 78 * on it, so fix it up and emit a warning: 79 */ 80 if (mode == CLOCK_EVT_MODE_ONESHOT) { 81 if (unlikely(!dev->mult)) { 82 dev->mult = 1; 83 WARN_ON(1); 84 } 85 } 86 } 87 } 88 89 /** 90 * clockevents_shutdown - shutdown the device and clear next_event 91 * @dev: device to shutdown 92 */ 93 void clockevents_shutdown(struct clock_event_device *dev) 94 { 95 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 96 dev->next_event.tv64 = KTIME_MAX; 97 } 98 99 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 100 101 /* Limit min_delta to a jiffie */ 102 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 103 104 /** 105 * clockevents_increase_min_delta - raise minimum delta of a clock event device 106 * @dev: device to increase the minimum delta 107 * 108 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 109 */ 110 static int clockevents_increase_min_delta(struct clock_event_device *dev) 111 { 112 /* Nothing to do if we already reached the limit */ 113 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 114 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); 115 dev->next_event.tv64 = KTIME_MAX; 116 return -ETIME; 117 } 118 119 if (dev->min_delta_ns < 5000) 120 dev->min_delta_ns = 5000; 121 else 122 dev->min_delta_ns += dev->min_delta_ns >> 1; 123 124 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 125 dev->min_delta_ns = MIN_DELTA_LIMIT; 126 127 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", 128 dev->name ? dev->name : "?", 129 (unsigned long long) dev->min_delta_ns); 130 return 0; 131 } 132 133 /** 134 * clockevents_program_min_delta - Set clock event device to the minimum delay. 135 * @dev: device to program 136 * 137 * Returns 0 on success, -ETIME when the retry loop failed. 138 */ 139 static int clockevents_program_min_delta(struct clock_event_device *dev) 140 { 141 unsigned long long clc; 142 int64_t delta; 143 int i; 144 145 for (i = 0;;) { 146 delta = dev->min_delta_ns; 147 dev->next_event = ktime_add_ns(ktime_get(), delta); 148 149 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 150 return 0; 151 152 dev->retries++; 153 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 154 if (dev->set_next_event((unsigned long) clc, dev) == 0) 155 return 0; 156 157 if (++i > 2) { 158 /* 159 * We tried 3 times to program the device with the 160 * given min_delta_ns. Try to increase the minimum 161 * delta, if that fails as well get out of here. 162 */ 163 if (clockevents_increase_min_delta(dev)) 164 return -ETIME; 165 i = 0; 166 } 167 } 168 } 169 170 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 171 172 /** 173 * clockevents_program_min_delta - Set clock event device to the minimum delay. 174 * @dev: device to program 175 * 176 * Returns 0 on success, -ETIME when the retry loop failed. 177 */ 178 static int clockevents_program_min_delta(struct clock_event_device *dev) 179 { 180 unsigned long long clc; 181 int64_t delta; 182 183 delta = dev->min_delta_ns; 184 dev->next_event = ktime_add_ns(ktime_get(), delta); 185 186 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 187 return 0; 188 189 dev->retries++; 190 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 191 return dev->set_next_event((unsigned long) clc, dev); 192 } 193 194 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 195 196 /** 197 * clockevents_program_event - Reprogram the clock event device. 198 * @dev: device to program 199 * @expires: absolute expiry time (monotonic clock) 200 * @force: program minimum delay if expires can not be set 201 * 202 * Returns 0 on success, -ETIME when the event is in the past. 203 */ 204 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 205 bool force) 206 { 207 unsigned long long clc; 208 int64_t delta; 209 int rc; 210 211 if (unlikely(expires.tv64 < 0)) { 212 WARN_ON_ONCE(1); 213 return -ETIME; 214 } 215 216 dev->next_event = expires; 217 218 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 219 return 0; 220 221 /* Shortcut for clockevent devices that can deal with ktime. */ 222 if (dev->features & CLOCK_EVT_FEAT_KTIME) 223 return dev->set_next_ktime(expires, dev); 224 225 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 226 if (delta <= 0) 227 return force ? clockevents_program_min_delta(dev) : -ETIME; 228 229 delta = min(delta, (int64_t) dev->max_delta_ns); 230 delta = max(delta, (int64_t) dev->min_delta_ns); 231 232 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 233 rc = dev->set_next_event((unsigned long) clc, dev); 234 235 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 236 } 237 238 /* 239 * Called after a notify add to make devices available which were 240 * released from the notifier call. 241 */ 242 static void clockevents_notify_released(void) 243 { 244 struct clock_event_device *dev; 245 246 while (!list_empty(&clockevents_released)) { 247 dev = list_entry(clockevents_released.next, 248 struct clock_event_device, list); 249 list_del(&dev->list); 250 list_add(&dev->list, &clockevent_devices); 251 tick_check_new_device(dev); 252 } 253 } 254 255 /* 256 * Try to install a replacement clock event device 257 */ 258 static int clockevents_replace(struct clock_event_device *ced) 259 { 260 struct clock_event_device *dev, *newdev = NULL; 261 262 list_for_each_entry(dev, &clockevent_devices, list) { 263 if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) 264 continue; 265 266 if (!tick_check_replacement(newdev, dev)) 267 continue; 268 269 if (!try_module_get(dev->owner)) 270 continue; 271 272 if (newdev) 273 module_put(newdev->owner); 274 newdev = dev; 275 } 276 if (newdev) { 277 tick_install_replacement(newdev); 278 list_del_init(&ced->list); 279 } 280 return newdev ? 0 : -EBUSY; 281 } 282 283 /* 284 * Called with clockevents_mutex and clockevents_lock held 285 */ 286 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 287 { 288 /* Fast track. Device is unused */ 289 if (ced->mode == CLOCK_EVT_MODE_UNUSED) { 290 list_del_init(&ced->list); 291 return 0; 292 } 293 294 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 295 } 296 297 /* 298 * SMP function call to unbind a device 299 */ 300 static void __clockevents_unbind(void *arg) 301 { 302 struct ce_unbind *cu = arg; 303 int res; 304 305 raw_spin_lock(&clockevents_lock); 306 res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 307 if (res == -EAGAIN) 308 res = clockevents_replace(cu->ce); 309 cu->res = res; 310 raw_spin_unlock(&clockevents_lock); 311 } 312 313 /* 314 * Issues smp function call to unbind a per cpu device. Called with 315 * clockevents_mutex held. 316 */ 317 static int clockevents_unbind(struct clock_event_device *ced, int cpu) 318 { 319 struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 320 321 smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 322 return cu.res; 323 } 324 325 /* 326 * Unbind a clockevents device. 327 */ 328 int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 329 { 330 int ret; 331 332 mutex_lock(&clockevents_mutex); 333 ret = clockevents_unbind(ced, cpu); 334 mutex_unlock(&clockevents_mutex); 335 return ret; 336 } 337 EXPORT_SYMBOL_GPL(clockevents_unbind); 338 339 /** 340 * clockevents_register_device - register a clock event device 341 * @dev: device to register 342 */ 343 void clockevents_register_device(struct clock_event_device *dev) 344 { 345 unsigned long flags; 346 347 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 348 if (!dev->cpumask) { 349 WARN_ON(num_possible_cpus() > 1); 350 dev->cpumask = cpumask_of(smp_processor_id()); 351 } 352 353 raw_spin_lock_irqsave(&clockevents_lock, flags); 354 355 list_add(&dev->list, &clockevent_devices); 356 tick_check_new_device(dev); 357 clockevents_notify_released(); 358 359 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 360 } 361 EXPORT_SYMBOL_GPL(clockevents_register_device); 362 363 void clockevents_config(struct clock_event_device *dev, u32 freq) 364 { 365 u64 sec; 366 367 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 368 return; 369 370 /* 371 * Calculate the maximum number of seconds we can sleep. Limit 372 * to 10 minutes for hardware which can program more than 373 * 32bit ticks so we still get reasonable conversion values. 374 */ 375 sec = dev->max_delta_ticks; 376 do_div(sec, freq); 377 if (!sec) 378 sec = 1; 379 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 380 sec = 600; 381 382 clockevents_calc_mult_shift(dev, freq, sec); 383 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); 384 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); 385 } 386 387 /** 388 * clockevents_config_and_register - Configure and register a clock event device 389 * @dev: device to register 390 * @freq: The clock frequency 391 * @min_delta: The minimum clock ticks to program in oneshot mode 392 * @max_delta: The maximum clock ticks to program in oneshot mode 393 * 394 * min/max_delta can be 0 for devices which do not support oneshot mode. 395 */ 396 void clockevents_config_and_register(struct clock_event_device *dev, 397 u32 freq, unsigned long min_delta, 398 unsigned long max_delta) 399 { 400 dev->min_delta_ticks = min_delta; 401 dev->max_delta_ticks = max_delta; 402 clockevents_config(dev, freq); 403 clockevents_register_device(dev); 404 } 405 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 406 407 /** 408 * clockevents_update_freq - Update frequency and reprogram a clock event device. 409 * @dev: device to modify 410 * @freq: new device frequency 411 * 412 * Reconfigure and reprogram a clock event device in oneshot 413 * mode. Must be called on the cpu for which the device delivers per 414 * cpu timer events with interrupts disabled! Returns 0 on success, 415 * -ETIME when the event is in the past. 416 */ 417 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 418 { 419 clockevents_config(dev, freq); 420 421 if (dev->mode != CLOCK_EVT_MODE_ONESHOT) 422 return 0; 423 424 return clockevents_program_event(dev, dev->next_event, false); 425 } 426 427 /* 428 * Noop handler when we shut down an event device 429 */ 430 void clockevents_handle_noop(struct clock_event_device *dev) 431 { 432 } 433 434 /** 435 * clockevents_exchange_device - release and request clock devices 436 * @old: device to release (can be NULL) 437 * @new: device to request (can be NULL) 438 * 439 * Called from the notifier chain. clockevents_lock is held already 440 */ 441 void clockevents_exchange_device(struct clock_event_device *old, 442 struct clock_event_device *new) 443 { 444 unsigned long flags; 445 446 local_irq_save(flags); 447 /* 448 * Caller releases a clock event device. We queue it into the 449 * released list and do a notify add later. 450 */ 451 if (old) { 452 module_put(old->owner); 453 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 454 list_del(&old->list); 455 list_add(&old->list, &clockevents_released); 456 } 457 458 if (new) { 459 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 460 clockevents_shutdown(new); 461 } 462 local_irq_restore(flags); 463 } 464 465 /** 466 * clockevents_suspend - suspend clock devices 467 */ 468 void clockevents_suspend(void) 469 { 470 struct clock_event_device *dev; 471 472 list_for_each_entry_reverse(dev, &clockevent_devices, list) 473 if (dev->suspend) 474 dev->suspend(dev); 475 } 476 477 /** 478 * clockevents_resume - resume clock devices 479 */ 480 void clockevents_resume(void) 481 { 482 struct clock_event_device *dev; 483 484 list_for_each_entry(dev, &clockevent_devices, list) 485 if (dev->resume) 486 dev->resume(dev); 487 } 488 489 #ifdef CONFIG_GENERIC_CLOCKEVENTS 490 /** 491 * clockevents_notify - notification about relevant events 492 */ 493 void clockevents_notify(unsigned long reason, void *arg) 494 { 495 struct clock_event_device *dev, *tmp; 496 unsigned long flags; 497 int cpu; 498 499 raw_spin_lock_irqsave(&clockevents_lock, flags); 500 501 switch (reason) { 502 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 503 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 504 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 505 tick_broadcast_on_off(reason, arg); 506 break; 507 508 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: 509 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: 510 tick_broadcast_oneshot_control(reason); 511 break; 512 513 case CLOCK_EVT_NOTIFY_CPU_DYING: 514 tick_handover_do_timer(arg); 515 break; 516 517 case CLOCK_EVT_NOTIFY_SUSPEND: 518 tick_suspend(); 519 tick_suspend_broadcast(); 520 break; 521 522 case CLOCK_EVT_NOTIFY_RESUME: 523 tick_resume(); 524 break; 525 526 case CLOCK_EVT_NOTIFY_CPU_DEAD: 527 tick_shutdown_broadcast_oneshot(arg); 528 tick_shutdown_broadcast(arg); 529 tick_shutdown(arg); 530 /* 531 * Unregister the clock event devices which were 532 * released from the users in the notify chain. 533 */ 534 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 535 list_del(&dev->list); 536 /* 537 * Now check whether the CPU has left unused per cpu devices 538 */ 539 cpu = *((int *)arg); 540 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 541 if (cpumask_test_cpu(cpu, dev->cpumask) && 542 cpumask_weight(dev->cpumask) == 1 && 543 !tick_is_broadcast_device(dev)) { 544 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 545 list_del(&dev->list); 546 } 547 } 548 break; 549 default: 550 break; 551 } 552 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 553 } 554 EXPORT_SYMBOL_GPL(clockevents_notify); 555 556 #ifdef CONFIG_SYSFS 557 struct bus_type clockevents_subsys = { 558 .name = "clockevents", 559 .dev_name = "clockevent", 560 }; 561 562 static DEFINE_PER_CPU(struct device, tick_percpu_dev); 563 static struct tick_device *tick_get_tick_dev(struct device *dev); 564 565 static ssize_t sysfs_show_current_tick_dev(struct device *dev, 566 struct device_attribute *attr, 567 char *buf) 568 { 569 struct tick_device *td; 570 ssize_t count = 0; 571 572 raw_spin_lock_irq(&clockevents_lock); 573 td = tick_get_tick_dev(dev); 574 if (td && td->evtdev) 575 count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 576 raw_spin_unlock_irq(&clockevents_lock); 577 return count; 578 } 579 static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 580 581 /* We don't support the abomination of removable broadcast devices */ 582 static ssize_t sysfs_unbind_tick_dev(struct device *dev, 583 struct device_attribute *attr, 584 const char *buf, size_t count) 585 { 586 char name[CS_NAME_LEN]; 587 size_t ret = sysfs_get_uname(buf, name, count); 588 struct clock_event_device *ce; 589 590 if (ret < 0) 591 return ret; 592 593 ret = -ENODEV; 594 mutex_lock(&clockevents_mutex); 595 raw_spin_lock_irq(&clockevents_lock); 596 list_for_each_entry(ce, &clockevent_devices, list) { 597 if (!strcmp(ce->name, name)) { 598 ret = __clockevents_try_unbind(ce, dev->id); 599 break; 600 } 601 } 602 raw_spin_unlock_irq(&clockevents_lock); 603 /* 604 * We hold clockevents_mutex, so ce can't go away 605 */ 606 if (ret == -EAGAIN) 607 ret = clockevents_unbind(ce, dev->id); 608 mutex_unlock(&clockevents_mutex); 609 return ret ? ret : count; 610 } 611 static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 612 613 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 614 static struct device tick_bc_dev = { 615 .init_name = "broadcast", 616 .id = 0, 617 .bus = &clockevents_subsys, 618 }; 619 620 static struct tick_device *tick_get_tick_dev(struct device *dev) 621 { 622 return dev == &tick_bc_dev ? tick_get_broadcast_device() : 623 &per_cpu(tick_cpu_device, dev->id); 624 } 625 626 static __init int tick_broadcast_init_sysfs(void) 627 { 628 int err = device_register(&tick_bc_dev); 629 630 if (!err) 631 err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 632 return err; 633 } 634 #else 635 static struct tick_device *tick_get_tick_dev(struct device *dev) 636 { 637 return &per_cpu(tick_cpu_device, dev->id); 638 } 639 static inline int tick_broadcast_init_sysfs(void) { return 0; } 640 #endif 641 642 static int __init tick_init_sysfs(void) 643 { 644 int cpu; 645 646 for_each_possible_cpu(cpu) { 647 struct device *dev = &per_cpu(tick_percpu_dev, cpu); 648 int err; 649 650 dev->id = cpu; 651 dev->bus = &clockevents_subsys; 652 err = device_register(dev); 653 if (!err) 654 err = device_create_file(dev, &dev_attr_current_device); 655 if (!err) 656 err = device_create_file(dev, &dev_attr_unbind_device); 657 if (err) 658 return err; 659 } 660 return tick_broadcast_init_sysfs(); 661 } 662 663 static int __init clockevents_init_sysfs(void) 664 { 665 int err = subsys_system_register(&clockevents_subsys, NULL); 666 667 if (!err) 668 err = tick_init_sysfs(); 669 return err; 670 } 671 device_initcall(clockevents_init_sysfs); 672 #endif /* SYSFS */ 673 674 #endif /* GENERIC_CLOCK_EVENTS */ 675