1d316c57fSThomas Gleixner /* 2d316c57fSThomas Gleixner * linux/kernel/time/clockevents.c 3d316c57fSThomas Gleixner * 4d316c57fSThomas Gleixner * This file contains functions which manage clock event devices. 5d316c57fSThomas Gleixner * 6d316c57fSThomas Gleixner * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7d316c57fSThomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8d316c57fSThomas Gleixner * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9d316c57fSThomas Gleixner * 10d316c57fSThomas Gleixner * This code is licenced under the GPL version 2. For details see 11d316c57fSThomas Gleixner * kernel-base/COPYING. 12d316c57fSThomas Gleixner */ 13d316c57fSThomas Gleixner 14d316c57fSThomas Gleixner #include <linux/clockchips.h> 15d316c57fSThomas Gleixner #include <linux/hrtimer.h> 16d316c57fSThomas Gleixner #include <linux/init.h> 17d316c57fSThomas Gleixner #include <linux/module.h> 18d316c57fSThomas Gleixner #include <linux/smp.h> 19501f8670SThomas Gleixner #include <linux/device.h> 20d316c57fSThomas Gleixner 218e1a928aSH Hartley Sweeten #include "tick-internal.h" 228e1a928aSH Hartley Sweeten 23d316c57fSThomas Gleixner /* The registered clock event devices */ 24d316c57fSThomas Gleixner static LIST_HEAD(clockevent_devices); 25d316c57fSThomas Gleixner static LIST_HEAD(clockevents_released); 26d316c57fSThomas Gleixner /* Protection for the above */ 27b5f91da0SThomas Gleixner static DEFINE_RAW_SPINLOCK(clockevents_lock); 2803e13cf5SThomas Gleixner /* Protection for unbind operations */ 2903e13cf5SThomas Gleixner static DEFINE_MUTEX(clockevents_mutex); 3003e13cf5SThomas Gleixner 3103e13cf5SThomas Gleixner struct ce_unbind { 3203e13cf5SThomas Gleixner struct clock_event_device *ce; 3303e13cf5SThomas Gleixner int res; 3403e13cf5SThomas Gleixner }; 35d316c57fSThomas Gleixner 3697b94106SThomas Gleixner static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 3797b94106SThomas Gleixner bool ismax) 3897b94106SThomas Gleixner { 3997b94106SThomas Gleixner u64 clc = (u64) latch << evt->shift; 4097b94106SThomas Gleixner u64 rnd; 4197b94106SThomas Gleixner 4297b94106SThomas Gleixner if (unlikely(!evt->mult)) { 4397b94106SThomas Gleixner evt->mult = 1; 4497b94106SThomas Gleixner WARN_ON(1); 4597b94106SThomas Gleixner } 4697b94106SThomas Gleixner rnd = (u64) evt->mult - 1; 4797b94106SThomas Gleixner 4897b94106SThomas Gleixner /* 4997b94106SThomas Gleixner * Upper bound sanity check. If the backwards conversion is 5097b94106SThomas Gleixner * not equal latch, we know that the above shift overflowed. 5197b94106SThomas Gleixner */ 5297b94106SThomas Gleixner if ((clc >> evt->shift) != (u64)latch) 5397b94106SThomas Gleixner clc = ~0ULL; 5497b94106SThomas Gleixner 5597b94106SThomas Gleixner /* 5697b94106SThomas Gleixner * Scaled math oddities: 5797b94106SThomas Gleixner * 5897b94106SThomas Gleixner * For mult <= (1 << shift) we can safely add mult - 1 to 5997b94106SThomas Gleixner * prevent integer rounding loss. So the backwards conversion 6097b94106SThomas Gleixner * from nsec to device ticks will be correct. 6197b94106SThomas Gleixner * 6297b94106SThomas Gleixner * For mult > (1 << shift), i.e. device frequency is > 1GHz we 6397b94106SThomas Gleixner * need to be careful. Adding mult - 1 will result in a value 6497b94106SThomas Gleixner * which when converted back to device ticks can be larger 6597b94106SThomas Gleixner * than latch by up to (mult - 1) >> shift. For the min_delta 6697b94106SThomas Gleixner * calculation we still want to apply this in order to stay 6797b94106SThomas Gleixner * above the minimum device ticks limit. For the upper limit 6897b94106SThomas Gleixner * we would end up with a latch value larger than the upper 6997b94106SThomas Gleixner * limit of the device, so we omit the add to stay below the 7097b94106SThomas Gleixner * device upper boundary. 7197b94106SThomas Gleixner * 7297b94106SThomas Gleixner * Also omit the add if it would overflow the u64 boundary. 7397b94106SThomas Gleixner */ 7497b94106SThomas Gleixner if ((~0ULL - clc > rnd) && 7597b94106SThomas Gleixner (!ismax || evt->mult <= (1U << evt->shift))) 7697b94106SThomas Gleixner clc += rnd; 7797b94106SThomas Gleixner 7897b94106SThomas Gleixner do_div(clc, evt->mult); 7997b94106SThomas Gleixner 8097b94106SThomas Gleixner /* Deltas less than 1usec are pointless noise */ 8197b94106SThomas Gleixner return clc > 1000 ? clc : 1000; 8297b94106SThomas Gleixner } 8397b94106SThomas Gleixner 84d316c57fSThomas Gleixner /** 85d316c57fSThomas Gleixner * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86d316c57fSThomas Gleixner * @latch: value to convert 87d316c57fSThomas Gleixner * @evt: pointer to clock event device descriptor 88d316c57fSThomas Gleixner * 89d316c57fSThomas Gleixner * Math helper, returns latch value converted to nanoseconds (bound checked) 90d316c57fSThomas Gleixner */ 9197813f2fSJon Hunter u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92d316c57fSThomas Gleixner { 9397b94106SThomas Gleixner return cev_delta2ns(latch, evt, false); 94d316c57fSThomas Gleixner } 95c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96d316c57fSThomas Gleixner 97d316c57fSThomas Gleixner /** 98d316c57fSThomas Gleixner * clockevents_set_mode - set the operating mode of a clock event device 99d316c57fSThomas Gleixner * @dev: device to modify 100d316c57fSThomas Gleixner * @mode: new mode 101d316c57fSThomas Gleixner * 102d316c57fSThomas Gleixner * Must be called with interrupts disabled ! 103d316c57fSThomas Gleixner */ 104d316c57fSThomas Gleixner void clockevents_set_mode(struct clock_event_device *dev, 105d316c57fSThomas Gleixner enum clock_event_mode mode) 106d316c57fSThomas Gleixner { 107d316c57fSThomas Gleixner if (dev->mode != mode) { 108d316c57fSThomas Gleixner dev->set_mode(mode, dev); 109d316c57fSThomas Gleixner dev->mode = mode; 1102d68259dSMagnus Damm 1112d68259dSMagnus Damm /* 1122d68259dSMagnus Damm * A nsec2cyc multiplicator of 0 is invalid and we'd crash 1132d68259dSMagnus Damm * on it, so fix it up and emit a warning: 1142d68259dSMagnus Damm */ 1152d68259dSMagnus Damm if (mode == CLOCK_EVT_MODE_ONESHOT) { 1162d68259dSMagnus Damm if (unlikely(!dev->mult)) { 1172d68259dSMagnus Damm dev->mult = 1; 1182d68259dSMagnus Damm WARN_ON(1); 1192d68259dSMagnus Damm } 1202d68259dSMagnus Damm } 121d316c57fSThomas Gleixner } 122d316c57fSThomas Gleixner } 123d316c57fSThomas Gleixner 124d316c57fSThomas Gleixner /** 1252344abbcSThomas Gleixner * clockevents_shutdown - shutdown the device and clear next_event 1262344abbcSThomas Gleixner * @dev: device to shutdown 1272344abbcSThomas Gleixner */ 1282344abbcSThomas Gleixner void clockevents_shutdown(struct clock_event_device *dev) 1292344abbcSThomas Gleixner { 1302344abbcSThomas Gleixner clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 1312344abbcSThomas Gleixner dev->next_event.tv64 = KTIME_MAX; 1322344abbcSThomas Gleixner } 1332344abbcSThomas Gleixner 134d1748302SMartin Schwidefsky #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 135d1748302SMartin Schwidefsky 136d1748302SMartin Schwidefsky /* Limit min_delta to a jiffie */ 137d1748302SMartin Schwidefsky #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 138d1748302SMartin Schwidefsky 139d1748302SMartin Schwidefsky /** 140d1748302SMartin Schwidefsky * clockevents_increase_min_delta - raise minimum delta of a clock event device 141d1748302SMartin Schwidefsky * @dev: device to increase the minimum delta 142d1748302SMartin Schwidefsky * 143d1748302SMartin Schwidefsky * Returns 0 on success, -ETIME when the minimum delta reached the limit. 144d1748302SMartin Schwidefsky */ 145d1748302SMartin Schwidefsky static int clockevents_increase_min_delta(struct clock_event_device *dev) 146d1748302SMartin Schwidefsky { 147d1748302SMartin Schwidefsky /* Nothing to do if we already reached the limit */ 148d1748302SMartin Schwidefsky if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 149d1748302SMartin Schwidefsky printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); 150d1748302SMartin Schwidefsky dev->next_event.tv64 = KTIME_MAX; 151d1748302SMartin Schwidefsky return -ETIME; 152d1748302SMartin Schwidefsky } 153d1748302SMartin Schwidefsky 154d1748302SMartin Schwidefsky if (dev->min_delta_ns < 5000) 155d1748302SMartin Schwidefsky dev->min_delta_ns = 5000; 156d1748302SMartin Schwidefsky else 157d1748302SMartin Schwidefsky dev->min_delta_ns += dev->min_delta_ns >> 1; 158d1748302SMartin Schwidefsky 159d1748302SMartin Schwidefsky if (dev->min_delta_ns > MIN_DELTA_LIMIT) 160d1748302SMartin Schwidefsky dev->min_delta_ns = MIN_DELTA_LIMIT; 161d1748302SMartin Schwidefsky 162d1748302SMartin Schwidefsky printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", 163d1748302SMartin Schwidefsky dev->name ? dev->name : "?", 164d1748302SMartin Schwidefsky (unsigned long long) dev->min_delta_ns); 165d1748302SMartin Schwidefsky return 0; 166d1748302SMartin Schwidefsky } 167d1748302SMartin Schwidefsky 168d1748302SMartin Schwidefsky /** 169d1748302SMartin Schwidefsky * clockevents_program_min_delta - Set clock event device to the minimum delay. 170d1748302SMartin Schwidefsky * @dev: device to program 171d1748302SMartin Schwidefsky * 172d1748302SMartin Schwidefsky * Returns 0 on success, -ETIME when the retry loop failed. 173d1748302SMartin Schwidefsky */ 174d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev) 175d1748302SMartin Schwidefsky { 176d1748302SMartin Schwidefsky unsigned long long clc; 177d1748302SMartin Schwidefsky int64_t delta; 178d1748302SMartin Schwidefsky int i; 179d1748302SMartin Schwidefsky 180d1748302SMartin Schwidefsky for (i = 0;;) { 181d1748302SMartin Schwidefsky delta = dev->min_delta_ns; 182d1748302SMartin Schwidefsky dev->next_event = ktime_add_ns(ktime_get(), delta); 183d1748302SMartin Schwidefsky 184d1748302SMartin Schwidefsky if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 185d1748302SMartin Schwidefsky return 0; 186d1748302SMartin Schwidefsky 187d1748302SMartin Schwidefsky dev->retries++; 188d1748302SMartin Schwidefsky clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 189d1748302SMartin Schwidefsky if (dev->set_next_event((unsigned long) clc, dev) == 0) 190d1748302SMartin Schwidefsky return 0; 191d1748302SMartin Schwidefsky 192d1748302SMartin Schwidefsky if (++i > 2) { 193d1748302SMartin Schwidefsky /* 194d1748302SMartin Schwidefsky * We tried 3 times to program the device with the 195d1748302SMartin Schwidefsky * given min_delta_ns. Try to increase the minimum 196d1748302SMartin Schwidefsky * delta, if that fails as well get out of here. 197d1748302SMartin Schwidefsky */ 198d1748302SMartin Schwidefsky if (clockevents_increase_min_delta(dev)) 199d1748302SMartin Schwidefsky return -ETIME; 200d1748302SMartin Schwidefsky i = 0; 201d1748302SMartin Schwidefsky } 202d1748302SMartin Schwidefsky } 203d1748302SMartin Schwidefsky } 204d1748302SMartin Schwidefsky 205d1748302SMartin Schwidefsky #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 206d1748302SMartin Schwidefsky 207d1748302SMartin Schwidefsky /** 208d1748302SMartin Schwidefsky * clockevents_program_min_delta - Set clock event device to the minimum delay. 209d1748302SMartin Schwidefsky * @dev: device to program 210d1748302SMartin Schwidefsky * 211d1748302SMartin Schwidefsky * Returns 0 on success, -ETIME when the retry loop failed. 212d1748302SMartin Schwidefsky */ 213d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev) 214d1748302SMartin Schwidefsky { 215d1748302SMartin Schwidefsky unsigned long long clc; 216d1748302SMartin Schwidefsky int64_t delta; 217d1748302SMartin Schwidefsky 218d1748302SMartin Schwidefsky delta = dev->min_delta_ns; 219d1748302SMartin Schwidefsky dev->next_event = ktime_add_ns(ktime_get(), delta); 220d1748302SMartin Schwidefsky 221d1748302SMartin Schwidefsky if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 222d1748302SMartin Schwidefsky return 0; 223d1748302SMartin Schwidefsky 224d1748302SMartin Schwidefsky dev->retries++; 225d1748302SMartin Schwidefsky clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 226d1748302SMartin Schwidefsky return dev->set_next_event((unsigned long) clc, dev); 227d1748302SMartin Schwidefsky } 228d1748302SMartin Schwidefsky 229d1748302SMartin Schwidefsky #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 230d1748302SMartin Schwidefsky 2312344abbcSThomas Gleixner /** 232d316c57fSThomas Gleixner * clockevents_program_event - Reprogram the clock event device. 233d1748302SMartin Schwidefsky * @dev: device to program 234d316c57fSThomas Gleixner * @expires: absolute expiry time (monotonic clock) 235d1748302SMartin Schwidefsky * @force: program minimum delay if expires can not be set 236d316c57fSThomas Gleixner * 237d316c57fSThomas Gleixner * Returns 0 on success, -ETIME when the event is in the past. 238d316c57fSThomas Gleixner */ 239d316c57fSThomas Gleixner int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 240d1748302SMartin Schwidefsky bool force) 241d316c57fSThomas Gleixner { 242d316c57fSThomas Gleixner unsigned long long clc; 243d316c57fSThomas Gleixner int64_t delta; 244d1748302SMartin Schwidefsky int rc; 245d316c57fSThomas Gleixner 246167b1de3SThomas Gleixner if (unlikely(expires.tv64 < 0)) { 247167b1de3SThomas Gleixner WARN_ON_ONCE(1); 248167b1de3SThomas Gleixner return -ETIME; 249167b1de3SThomas Gleixner } 250167b1de3SThomas Gleixner 251d316c57fSThomas Gleixner dev->next_event = expires; 252d316c57fSThomas Gleixner 253d316c57fSThomas Gleixner if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 254d316c57fSThomas Gleixner return 0; 255d316c57fSThomas Gleixner 25665516f8aSMartin Schwidefsky /* Shortcut for clockevent devices that can deal with ktime. */ 25765516f8aSMartin Schwidefsky if (dev->features & CLOCK_EVT_FEAT_KTIME) 25865516f8aSMartin Schwidefsky return dev->set_next_ktime(expires, dev); 25965516f8aSMartin Schwidefsky 260d1748302SMartin Schwidefsky delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 261d1748302SMartin Schwidefsky if (delta <= 0) 262d1748302SMartin Schwidefsky return force ? clockevents_program_min_delta(dev) : -ETIME; 263d316c57fSThomas Gleixner 264d1748302SMartin Schwidefsky delta = min(delta, (int64_t) dev->max_delta_ns); 265d1748302SMartin Schwidefsky delta = max(delta, (int64_t) dev->min_delta_ns); 266d316c57fSThomas Gleixner 267d1748302SMartin Schwidefsky clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 268d1748302SMartin Schwidefsky rc = dev->set_next_event((unsigned long) clc, dev); 269d1748302SMartin Schwidefsky 270d1748302SMartin Schwidefsky return (rc && force) ? clockevents_program_min_delta(dev) : rc; 271d316c57fSThomas Gleixner } 272d316c57fSThomas Gleixner 273d316c57fSThomas Gleixner /* 2743eb05676SLi Zefan * Called after a notify add to make devices available which were 275d316c57fSThomas Gleixner * released from the notifier call. 276d316c57fSThomas Gleixner */ 277d316c57fSThomas Gleixner static void clockevents_notify_released(void) 278d316c57fSThomas Gleixner { 279d316c57fSThomas Gleixner struct clock_event_device *dev; 280d316c57fSThomas Gleixner 281d316c57fSThomas Gleixner while (!list_empty(&clockevents_released)) { 282d316c57fSThomas Gleixner dev = list_entry(clockevents_released.next, 283d316c57fSThomas Gleixner struct clock_event_device, list); 284d316c57fSThomas Gleixner list_del(&dev->list); 285d316c57fSThomas Gleixner list_add(&dev->list, &clockevent_devices); 2867172a286SThomas Gleixner tick_check_new_device(dev); 287d316c57fSThomas Gleixner } 288d316c57fSThomas Gleixner } 289d316c57fSThomas Gleixner 29003e13cf5SThomas Gleixner /* 29103e13cf5SThomas Gleixner * Try to install a replacement clock event device 29203e13cf5SThomas Gleixner */ 29303e13cf5SThomas Gleixner static int clockevents_replace(struct clock_event_device *ced) 29403e13cf5SThomas Gleixner { 29503e13cf5SThomas Gleixner struct clock_event_device *dev, *newdev = NULL; 29603e13cf5SThomas Gleixner 29703e13cf5SThomas Gleixner list_for_each_entry(dev, &clockevent_devices, list) { 29803e13cf5SThomas Gleixner if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) 29903e13cf5SThomas Gleixner continue; 30003e13cf5SThomas Gleixner 30103e13cf5SThomas Gleixner if (!tick_check_replacement(newdev, dev)) 30203e13cf5SThomas Gleixner continue; 30303e13cf5SThomas Gleixner 30403e13cf5SThomas Gleixner if (!try_module_get(dev->owner)) 30503e13cf5SThomas Gleixner continue; 30603e13cf5SThomas Gleixner 30703e13cf5SThomas Gleixner if (newdev) 30803e13cf5SThomas Gleixner module_put(newdev->owner); 30903e13cf5SThomas Gleixner newdev = dev; 31003e13cf5SThomas Gleixner } 31103e13cf5SThomas Gleixner if (newdev) { 31203e13cf5SThomas Gleixner tick_install_replacement(newdev); 31303e13cf5SThomas Gleixner list_del_init(&ced->list); 31403e13cf5SThomas Gleixner } 31503e13cf5SThomas Gleixner return newdev ? 0 : -EBUSY; 31603e13cf5SThomas Gleixner } 31703e13cf5SThomas Gleixner 31803e13cf5SThomas Gleixner /* 31903e13cf5SThomas Gleixner * Called with clockevents_mutex and clockevents_lock held 32003e13cf5SThomas Gleixner */ 32103e13cf5SThomas Gleixner static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 32203e13cf5SThomas Gleixner { 32303e13cf5SThomas Gleixner /* Fast track. Device is unused */ 32403e13cf5SThomas Gleixner if (ced->mode == CLOCK_EVT_MODE_UNUSED) { 32503e13cf5SThomas Gleixner list_del_init(&ced->list); 32603e13cf5SThomas Gleixner return 0; 32703e13cf5SThomas Gleixner } 32803e13cf5SThomas Gleixner 32903e13cf5SThomas Gleixner return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 33003e13cf5SThomas Gleixner } 33103e13cf5SThomas Gleixner 33203e13cf5SThomas Gleixner /* 33303e13cf5SThomas Gleixner * SMP function call to unbind a device 33403e13cf5SThomas Gleixner */ 33503e13cf5SThomas Gleixner static void __clockevents_unbind(void *arg) 33603e13cf5SThomas Gleixner { 33703e13cf5SThomas Gleixner struct ce_unbind *cu = arg; 33803e13cf5SThomas Gleixner int res; 33903e13cf5SThomas Gleixner 34003e13cf5SThomas Gleixner raw_spin_lock(&clockevents_lock); 34103e13cf5SThomas Gleixner res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 34203e13cf5SThomas Gleixner if (res == -EAGAIN) 34303e13cf5SThomas Gleixner res = clockevents_replace(cu->ce); 34403e13cf5SThomas Gleixner cu->res = res; 34503e13cf5SThomas Gleixner raw_spin_unlock(&clockevents_lock); 34603e13cf5SThomas Gleixner } 34703e13cf5SThomas Gleixner 34803e13cf5SThomas Gleixner /* 34903e13cf5SThomas Gleixner * Issues smp function call to unbind a per cpu device. Called with 35003e13cf5SThomas Gleixner * clockevents_mutex held. 35103e13cf5SThomas Gleixner */ 35203e13cf5SThomas Gleixner static int clockevents_unbind(struct clock_event_device *ced, int cpu) 35303e13cf5SThomas Gleixner { 35403e13cf5SThomas Gleixner struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 35503e13cf5SThomas Gleixner 35603e13cf5SThomas Gleixner smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 35703e13cf5SThomas Gleixner return cu.res; 35803e13cf5SThomas Gleixner } 35903e13cf5SThomas Gleixner 36003e13cf5SThomas Gleixner /* 36103e13cf5SThomas Gleixner * Unbind a clockevents device. 36203e13cf5SThomas Gleixner */ 36303e13cf5SThomas Gleixner int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 36403e13cf5SThomas Gleixner { 36503e13cf5SThomas Gleixner int ret; 36603e13cf5SThomas Gleixner 36703e13cf5SThomas Gleixner mutex_lock(&clockevents_mutex); 36803e13cf5SThomas Gleixner ret = clockevents_unbind(ced, cpu); 36903e13cf5SThomas Gleixner mutex_unlock(&clockevents_mutex); 37003e13cf5SThomas Gleixner return ret; 37103e13cf5SThomas Gleixner } 37203e13cf5SThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_unbind); 37303e13cf5SThomas Gleixner 374d316c57fSThomas Gleixner /** 375d316c57fSThomas Gleixner * clockevents_register_device - register a clock event device 376d316c57fSThomas Gleixner * @dev: device to register 377d316c57fSThomas Gleixner */ 378d316c57fSThomas Gleixner void clockevents_register_device(struct clock_event_device *dev) 379d316c57fSThomas Gleixner { 380f833bab8SSuresh Siddha unsigned long flags; 381f833bab8SSuresh Siddha 382d316c57fSThomas Gleixner BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 3831b054b67SThomas Gleixner if (!dev->cpumask) { 3841b054b67SThomas Gleixner WARN_ON(num_possible_cpus() > 1); 3851b054b67SThomas Gleixner dev->cpumask = cpumask_of(smp_processor_id()); 3861b054b67SThomas Gleixner } 387320ab2b0SRusty Russell 388b5f91da0SThomas Gleixner raw_spin_lock_irqsave(&clockevents_lock, flags); 389d316c57fSThomas Gleixner 390d316c57fSThomas Gleixner list_add(&dev->list, &clockevent_devices); 3917172a286SThomas Gleixner tick_check_new_device(dev); 392d316c57fSThomas Gleixner clockevents_notify_released(); 393d316c57fSThomas Gleixner 394b5f91da0SThomas Gleixner raw_spin_unlock_irqrestore(&clockevents_lock, flags); 395d316c57fSThomas Gleixner } 396c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevents_register_device); 397d316c57fSThomas Gleixner 398e5400321SMagnus Damm void clockevents_config(struct clock_event_device *dev, u32 freq) 39957f0fcbeSThomas Gleixner { 400c0e299b1SThomas Gleixner u64 sec; 40157f0fcbeSThomas Gleixner 40257f0fcbeSThomas Gleixner if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 40357f0fcbeSThomas Gleixner return; 40457f0fcbeSThomas Gleixner 40557f0fcbeSThomas Gleixner /* 40657f0fcbeSThomas Gleixner * Calculate the maximum number of seconds we can sleep. Limit 40757f0fcbeSThomas Gleixner * to 10 minutes for hardware which can program more than 40857f0fcbeSThomas Gleixner * 32bit ticks so we still get reasonable conversion values. 40957f0fcbeSThomas Gleixner */ 41057f0fcbeSThomas Gleixner sec = dev->max_delta_ticks; 41157f0fcbeSThomas Gleixner do_div(sec, freq); 41257f0fcbeSThomas Gleixner if (!sec) 41357f0fcbeSThomas Gleixner sec = 1; 41457f0fcbeSThomas Gleixner else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 41557f0fcbeSThomas Gleixner sec = 600; 41657f0fcbeSThomas Gleixner 41757f0fcbeSThomas Gleixner clockevents_calc_mult_shift(dev, freq, sec); 41897b94106SThomas Gleixner dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 41997b94106SThomas Gleixner dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 42057f0fcbeSThomas Gleixner } 42157f0fcbeSThomas Gleixner 42257f0fcbeSThomas Gleixner /** 42357f0fcbeSThomas Gleixner * clockevents_config_and_register - Configure and register a clock event device 42457f0fcbeSThomas Gleixner * @dev: device to register 42557f0fcbeSThomas Gleixner * @freq: The clock frequency 42657f0fcbeSThomas Gleixner * @min_delta: The minimum clock ticks to program in oneshot mode 42757f0fcbeSThomas Gleixner * @max_delta: The maximum clock ticks to program in oneshot mode 42857f0fcbeSThomas Gleixner * 42957f0fcbeSThomas Gleixner * min/max_delta can be 0 for devices which do not support oneshot mode. 43057f0fcbeSThomas Gleixner */ 43157f0fcbeSThomas Gleixner void clockevents_config_and_register(struct clock_event_device *dev, 43257f0fcbeSThomas Gleixner u32 freq, unsigned long min_delta, 43357f0fcbeSThomas Gleixner unsigned long max_delta) 43457f0fcbeSThomas Gleixner { 43557f0fcbeSThomas Gleixner dev->min_delta_ticks = min_delta; 43657f0fcbeSThomas Gleixner dev->max_delta_ticks = max_delta; 43757f0fcbeSThomas Gleixner clockevents_config(dev, freq); 43857f0fcbeSThomas Gleixner clockevents_register_device(dev); 43957f0fcbeSThomas Gleixner } 440c35ef95cSShawn Guo EXPORT_SYMBOL_GPL(clockevents_config_and_register); 44157f0fcbeSThomas Gleixner 442627ee794SThomas Gleixner int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) 44380b816b7SThomas Gleixner { 44480b816b7SThomas Gleixner clockevents_config(dev, freq); 44580b816b7SThomas Gleixner 446fe79a9baSSoren Brinkmann if (dev->mode == CLOCK_EVT_MODE_ONESHOT) 447d1748302SMartin Schwidefsky return clockevents_program_event(dev, dev->next_event, false); 448fe79a9baSSoren Brinkmann 449fe79a9baSSoren Brinkmann if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 450fe79a9baSSoren Brinkmann dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev); 451fe79a9baSSoren Brinkmann 452fe79a9baSSoren Brinkmann return 0; 45380b816b7SThomas Gleixner } 45480b816b7SThomas Gleixner 455627ee794SThomas Gleixner /** 456627ee794SThomas Gleixner * clockevents_update_freq - Update frequency and reprogram a clock event device. 457627ee794SThomas Gleixner * @dev: device to modify 458627ee794SThomas Gleixner * @freq: new device frequency 459627ee794SThomas Gleixner * 460627ee794SThomas Gleixner * Reconfigure and reprogram a clock event device in oneshot 461627ee794SThomas Gleixner * mode. Must be called on the cpu for which the device delivers per 462627ee794SThomas Gleixner * cpu timer events. If called for the broadcast device the core takes 463627ee794SThomas Gleixner * care of serialization. 464627ee794SThomas Gleixner * 465627ee794SThomas Gleixner * Returns 0 on success, -ETIME when the event is in the past. 466627ee794SThomas Gleixner */ 467627ee794SThomas Gleixner int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 468627ee794SThomas Gleixner { 469627ee794SThomas Gleixner unsigned long flags; 470627ee794SThomas Gleixner int ret; 471627ee794SThomas Gleixner 472627ee794SThomas Gleixner local_irq_save(flags); 473627ee794SThomas Gleixner ret = tick_broadcast_update_freq(dev, freq); 474627ee794SThomas Gleixner if (ret == -ENODEV) 475627ee794SThomas Gleixner ret = __clockevents_update_freq(dev, freq); 476627ee794SThomas Gleixner local_irq_restore(flags); 477627ee794SThomas Gleixner return ret; 478627ee794SThomas Gleixner } 479627ee794SThomas Gleixner 480d316c57fSThomas Gleixner /* 481d316c57fSThomas Gleixner * Noop handler when we shut down an event device 482d316c57fSThomas Gleixner */ 4837c1e7689SVenkatesh Pallipadi void clockevents_handle_noop(struct clock_event_device *dev) 484d316c57fSThomas Gleixner { 485d316c57fSThomas Gleixner } 486d316c57fSThomas Gleixner 487d316c57fSThomas Gleixner /** 488d316c57fSThomas Gleixner * clockevents_exchange_device - release and request clock devices 489d316c57fSThomas Gleixner * @old: device to release (can be NULL) 490d316c57fSThomas Gleixner * @new: device to request (can be NULL) 491d316c57fSThomas Gleixner * 492d316c57fSThomas Gleixner * Called from the notifier chain. clockevents_lock is held already 493d316c57fSThomas Gleixner */ 494d316c57fSThomas Gleixner void clockevents_exchange_device(struct clock_event_device *old, 495d316c57fSThomas Gleixner struct clock_event_device *new) 496d316c57fSThomas Gleixner { 497d316c57fSThomas Gleixner unsigned long flags; 498d316c57fSThomas Gleixner 499d316c57fSThomas Gleixner local_irq_save(flags); 500d316c57fSThomas Gleixner /* 501d316c57fSThomas Gleixner * Caller releases a clock event device. We queue it into the 502d316c57fSThomas Gleixner * released list and do a notify add later. 503d316c57fSThomas Gleixner */ 504d316c57fSThomas Gleixner if (old) { 505ccf33d68SThomas Gleixner module_put(old->owner); 506d316c57fSThomas Gleixner clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 507d316c57fSThomas Gleixner list_del(&old->list); 508d316c57fSThomas Gleixner list_add(&old->list, &clockevents_released); 509d316c57fSThomas Gleixner } 510d316c57fSThomas Gleixner 511d316c57fSThomas Gleixner if (new) { 512d316c57fSThomas Gleixner BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 5132344abbcSThomas Gleixner clockevents_shutdown(new); 514d316c57fSThomas Gleixner } 515d316c57fSThomas Gleixner local_irq_restore(flags); 516d316c57fSThomas Gleixner } 517d316c57fSThomas Gleixner 518adc78e6bSRafael J. Wysocki /** 519adc78e6bSRafael J. Wysocki * clockevents_suspend - suspend clock devices 520adc78e6bSRafael J. Wysocki */ 521adc78e6bSRafael J. Wysocki void clockevents_suspend(void) 522adc78e6bSRafael J. Wysocki { 523adc78e6bSRafael J. Wysocki struct clock_event_device *dev; 524adc78e6bSRafael J. Wysocki 525adc78e6bSRafael J. Wysocki list_for_each_entry_reverse(dev, &clockevent_devices, list) 526adc78e6bSRafael J. Wysocki if (dev->suspend) 527adc78e6bSRafael J. Wysocki dev->suspend(dev); 528adc78e6bSRafael J. Wysocki } 529adc78e6bSRafael J. Wysocki 530adc78e6bSRafael J. Wysocki /** 531adc78e6bSRafael J. Wysocki * clockevents_resume - resume clock devices 532adc78e6bSRafael J. Wysocki */ 533adc78e6bSRafael J. Wysocki void clockevents_resume(void) 534adc78e6bSRafael J. Wysocki { 535adc78e6bSRafael J. Wysocki struct clock_event_device *dev; 536adc78e6bSRafael J. Wysocki 537adc78e6bSRafael J. Wysocki list_for_each_entry(dev, &clockevent_devices, list) 538adc78e6bSRafael J. Wysocki if (dev->resume) 539adc78e6bSRafael J. Wysocki dev->resume(dev); 540adc78e6bSRafael J. Wysocki } 541adc78e6bSRafael J. Wysocki 542de68d9b1SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS 543d316c57fSThomas Gleixner /** 544d316c57fSThomas Gleixner * clockevents_notify - notification about relevant events 545da7e6f45SPreeti U Murthy * Returns 0 on success, any other value on error 546d316c57fSThomas Gleixner */ 547da7e6f45SPreeti U Murthy int clockevents_notify(unsigned long reason, void *arg) 548d316c57fSThomas Gleixner { 549bb6eddf7SThomas Gleixner struct clock_event_device *dev, *tmp; 550f833bab8SSuresh Siddha unsigned long flags; 551da7e6f45SPreeti U Murthy int cpu, ret = 0; 5520b858e6fSLi Zefan 553b5f91da0SThomas Gleixner raw_spin_lock_irqsave(&clockevents_lock, flags); 554d316c57fSThomas Gleixner 555d316c57fSThomas Gleixner switch (reason) { 5568c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_BROADCAST_ON: 5578c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 5588c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 5598c53daf6SThomas Gleixner tick_broadcast_on_off(reason, arg); 5608c53daf6SThomas Gleixner break; 5618c53daf6SThomas Gleixner 5628c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: 5638c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: 564da7e6f45SPreeti U Murthy ret = tick_broadcast_oneshot_control(reason); 5658c53daf6SThomas Gleixner break; 5668c53daf6SThomas Gleixner 5678c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_CPU_DYING: 5688c53daf6SThomas Gleixner tick_handover_do_timer(arg); 5698c53daf6SThomas Gleixner break; 5708c53daf6SThomas Gleixner 5718c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_SUSPEND: 5728c53daf6SThomas Gleixner tick_suspend(); 5738c53daf6SThomas Gleixner tick_suspend_broadcast(); 5748c53daf6SThomas Gleixner break; 5758c53daf6SThomas Gleixner 5768c53daf6SThomas Gleixner case CLOCK_EVT_NOTIFY_RESUME: 5778c53daf6SThomas Gleixner tick_resume(); 5788c53daf6SThomas Gleixner break; 5798c53daf6SThomas Gleixner 580d316c57fSThomas Gleixner case CLOCK_EVT_NOTIFY_CPU_DEAD: 5818c53daf6SThomas Gleixner tick_shutdown_broadcast_oneshot(arg); 5828c53daf6SThomas Gleixner tick_shutdown_broadcast(arg); 5838c53daf6SThomas Gleixner tick_shutdown(arg); 584d316c57fSThomas Gleixner /* 585d316c57fSThomas Gleixner * Unregister the clock event devices which were 586d316c57fSThomas Gleixner * released from the users in the notify chain. 587d316c57fSThomas Gleixner */ 588bb6eddf7SThomas Gleixner list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 589bb6eddf7SThomas Gleixner list_del(&dev->list); 590bb6eddf7SThomas Gleixner /* 591bb6eddf7SThomas Gleixner * Now check whether the CPU has left unused per cpu devices 592bb6eddf7SThomas Gleixner */ 593bb6eddf7SThomas Gleixner cpu = *((int *)arg); 594bb6eddf7SThomas Gleixner list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 595bb6eddf7SThomas Gleixner if (cpumask_test_cpu(cpu, dev->cpumask) && 596ea9d8e3fSXiaotian Feng cpumask_weight(dev->cpumask) == 1 && 597ea9d8e3fSXiaotian Feng !tick_is_broadcast_device(dev)) { 598bb6eddf7SThomas Gleixner BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 599bb6eddf7SThomas Gleixner list_del(&dev->list); 600bb6eddf7SThomas Gleixner } 601bb6eddf7SThomas Gleixner } 602d316c57fSThomas Gleixner break; 603d316c57fSThomas Gleixner default: 604d316c57fSThomas Gleixner break; 605d316c57fSThomas Gleixner } 606b5f91da0SThomas Gleixner raw_spin_unlock_irqrestore(&clockevents_lock, flags); 607da7e6f45SPreeti U Murthy return ret; 608d316c57fSThomas Gleixner } 609d316c57fSThomas Gleixner EXPORT_SYMBOL_GPL(clockevents_notify); 610501f8670SThomas Gleixner 611501f8670SThomas Gleixner #ifdef CONFIG_SYSFS 612501f8670SThomas Gleixner struct bus_type clockevents_subsys = { 613501f8670SThomas Gleixner .name = "clockevents", 614501f8670SThomas Gleixner .dev_name = "clockevent", 615501f8670SThomas Gleixner }; 616501f8670SThomas Gleixner 617501f8670SThomas Gleixner static DEFINE_PER_CPU(struct device, tick_percpu_dev); 618501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev); 619501f8670SThomas Gleixner 620501f8670SThomas Gleixner static ssize_t sysfs_show_current_tick_dev(struct device *dev, 621501f8670SThomas Gleixner struct device_attribute *attr, 622501f8670SThomas Gleixner char *buf) 623501f8670SThomas Gleixner { 624501f8670SThomas Gleixner struct tick_device *td; 625501f8670SThomas Gleixner ssize_t count = 0; 626501f8670SThomas Gleixner 627501f8670SThomas Gleixner raw_spin_lock_irq(&clockevents_lock); 628501f8670SThomas Gleixner td = tick_get_tick_dev(dev); 629501f8670SThomas Gleixner if (td && td->evtdev) 630501f8670SThomas Gleixner count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 631501f8670SThomas Gleixner raw_spin_unlock_irq(&clockevents_lock); 632501f8670SThomas Gleixner return count; 633501f8670SThomas Gleixner } 634501f8670SThomas Gleixner static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 635501f8670SThomas Gleixner 63603e13cf5SThomas Gleixner /* We don't support the abomination of removable broadcast devices */ 63703e13cf5SThomas Gleixner static ssize_t sysfs_unbind_tick_dev(struct device *dev, 63803e13cf5SThomas Gleixner struct device_attribute *attr, 63903e13cf5SThomas Gleixner const char *buf, size_t count) 64003e13cf5SThomas Gleixner { 64103e13cf5SThomas Gleixner char name[CS_NAME_LEN]; 642891292a7SPatrick Palka ssize_t ret = sysfs_get_uname(buf, name, count); 64303e13cf5SThomas Gleixner struct clock_event_device *ce; 64403e13cf5SThomas Gleixner 64503e13cf5SThomas Gleixner if (ret < 0) 64603e13cf5SThomas Gleixner return ret; 64703e13cf5SThomas Gleixner 64803e13cf5SThomas Gleixner ret = -ENODEV; 64903e13cf5SThomas Gleixner mutex_lock(&clockevents_mutex); 65003e13cf5SThomas Gleixner raw_spin_lock_irq(&clockevents_lock); 65103e13cf5SThomas Gleixner list_for_each_entry(ce, &clockevent_devices, list) { 65203e13cf5SThomas Gleixner if (!strcmp(ce->name, name)) { 65303e13cf5SThomas Gleixner ret = __clockevents_try_unbind(ce, dev->id); 65403e13cf5SThomas Gleixner break; 65503e13cf5SThomas Gleixner } 65603e13cf5SThomas Gleixner } 65703e13cf5SThomas Gleixner raw_spin_unlock_irq(&clockevents_lock); 65803e13cf5SThomas Gleixner /* 65903e13cf5SThomas Gleixner * We hold clockevents_mutex, so ce can't go away 66003e13cf5SThomas Gleixner */ 66103e13cf5SThomas Gleixner if (ret == -EAGAIN) 66203e13cf5SThomas Gleixner ret = clockevents_unbind(ce, dev->id); 66303e13cf5SThomas Gleixner mutex_unlock(&clockevents_mutex); 66403e13cf5SThomas Gleixner return ret ? ret : count; 66503e13cf5SThomas Gleixner } 66603e13cf5SThomas Gleixner static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 66703e13cf5SThomas Gleixner 668501f8670SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 669501f8670SThomas Gleixner static struct device tick_bc_dev = { 670501f8670SThomas Gleixner .init_name = "broadcast", 671501f8670SThomas Gleixner .id = 0, 672501f8670SThomas Gleixner .bus = &clockevents_subsys, 673501f8670SThomas Gleixner }; 674501f8670SThomas Gleixner 675501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev) 676501f8670SThomas Gleixner { 677501f8670SThomas Gleixner return dev == &tick_bc_dev ? tick_get_broadcast_device() : 678501f8670SThomas Gleixner &per_cpu(tick_cpu_device, dev->id); 679501f8670SThomas Gleixner } 680501f8670SThomas Gleixner 681501f8670SThomas Gleixner static __init int tick_broadcast_init_sysfs(void) 682501f8670SThomas Gleixner { 683501f8670SThomas Gleixner int err = device_register(&tick_bc_dev); 684501f8670SThomas Gleixner 685501f8670SThomas Gleixner if (!err) 686501f8670SThomas Gleixner err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 687501f8670SThomas Gleixner return err; 688501f8670SThomas Gleixner } 689501f8670SThomas Gleixner #else 690501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev) 691501f8670SThomas Gleixner { 692501f8670SThomas Gleixner return &per_cpu(tick_cpu_device, dev->id); 693501f8670SThomas Gleixner } 694501f8670SThomas Gleixner static inline int tick_broadcast_init_sysfs(void) { return 0; } 695de68d9b1SThomas Gleixner #endif 696501f8670SThomas Gleixner 697501f8670SThomas Gleixner static int __init tick_init_sysfs(void) 698501f8670SThomas Gleixner { 699501f8670SThomas Gleixner int cpu; 700501f8670SThomas Gleixner 701501f8670SThomas Gleixner for_each_possible_cpu(cpu) { 702501f8670SThomas Gleixner struct device *dev = &per_cpu(tick_percpu_dev, cpu); 703501f8670SThomas Gleixner int err; 704501f8670SThomas Gleixner 705501f8670SThomas Gleixner dev->id = cpu; 706501f8670SThomas Gleixner dev->bus = &clockevents_subsys; 707501f8670SThomas Gleixner err = device_register(dev); 708501f8670SThomas Gleixner if (!err) 709501f8670SThomas Gleixner err = device_create_file(dev, &dev_attr_current_device); 71003e13cf5SThomas Gleixner if (!err) 71103e13cf5SThomas Gleixner err = device_create_file(dev, &dev_attr_unbind_device); 712501f8670SThomas Gleixner if (err) 713501f8670SThomas Gleixner return err; 714501f8670SThomas Gleixner } 715501f8670SThomas Gleixner return tick_broadcast_init_sysfs(); 716501f8670SThomas Gleixner } 717501f8670SThomas Gleixner 718501f8670SThomas Gleixner static int __init clockevents_init_sysfs(void) 719501f8670SThomas Gleixner { 720501f8670SThomas Gleixner int err = subsys_system_register(&clockevents_subsys, NULL); 721501f8670SThomas Gleixner 722501f8670SThomas Gleixner if (!err) 723501f8670SThomas Gleixner err = tick_init_sysfs(); 724501f8670SThomas Gleixner return err; 725501f8670SThomas Gleixner } 726501f8670SThomas Gleixner device_initcall(clockevents_init_sysfs); 727501f8670SThomas Gleixner #endif /* SYSFS */ 728501f8670SThomas Gleixner 729501f8670SThomas Gleixner #endif /* GENERIC_CLOCK_EVENTS */ 730