1d316c57fSThomas Gleixner /* 2d316c57fSThomas Gleixner * linux/kernel/time/clockevents.c 3d316c57fSThomas Gleixner * 4d316c57fSThomas Gleixner * This file contains functions which manage clock event devices. 5d316c57fSThomas Gleixner * 6d316c57fSThomas Gleixner * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7d316c57fSThomas Gleixner * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8d316c57fSThomas Gleixner * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9d316c57fSThomas Gleixner * 10d316c57fSThomas Gleixner * This code is licenced under the GPL version 2. For details see 11d316c57fSThomas Gleixner * kernel-base/COPYING. 12d316c57fSThomas Gleixner */ 13d316c57fSThomas Gleixner 14d316c57fSThomas Gleixner #include <linux/clockchips.h> 15d316c57fSThomas Gleixner #include <linux/hrtimer.h> 16d316c57fSThomas Gleixner #include <linux/init.h> 17d316c57fSThomas Gleixner #include <linux/module.h> 18d316c57fSThomas Gleixner #include <linux/smp.h> 19501f8670SThomas Gleixner #include <linux/device.h> 20d316c57fSThomas Gleixner 218e1a928aSH Hartley Sweeten #include "tick-internal.h" 228e1a928aSH Hartley Sweeten 23d316c57fSThomas Gleixner /* The registered clock event devices */ 24d316c57fSThomas Gleixner static LIST_HEAD(clockevent_devices); 25d316c57fSThomas Gleixner static LIST_HEAD(clockevents_released); 26d316c57fSThomas Gleixner /* Protection for the above */ 27b5f91da0SThomas Gleixner static DEFINE_RAW_SPINLOCK(clockevents_lock); 2803e13cf5SThomas Gleixner /* Protection for unbind operations */ 2903e13cf5SThomas Gleixner static DEFINE_MUTEX(clockevents_mutex); 3003e13cf5SThomas Gleixner 3103e13cf5SThomas Gleixner struct ce_unbind { 3203e13cf5SThomas Gleixner struct clock_event_device *ce; 3303e13cf5SThomas Gleixner int res; 3403e13cf5SThomas Gleixner }; 35d316c57fSThomas Gleixner 3697b94106SThomas Gleixner static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, 3797b94106SThomas Gleixner bool ismax) 3897b94106SThomas Gleixner { 3997b94106SThomas Gleixner u64 clc = (u64) latch << evt->shift; 4097b94106SThomas Gleixner u64 rnd; 4197b94106SThomas Gleixner 4297b94106SThomas Gleixner if (unlikely(!evt->mult)) { 4397b94106SThomas Gleixner evt->mult = 1; 4497b94106SThomas Gleixner WARN_ON(1); 4597b94106SThomas Gleixner } 4697b94106SThomas Gleixner rnd = (u64) evt->mult - 1; 4797b94106SThomas Gleixner 4897b94106SThomas Gleixner /* 4997b94106SThomas Gleixner * Upper bound sanity check. If the backwards conversion is 5097b94106SThomas Gleixner * not equal latch, we know that the above shift overflowed. 5197b94106SThomas Gleixner */ 5297b94106SThomas Gleixner if ((clc >> evt->shift) != (u64)latch) 5397b94106SThomas Gleixner clc = ~0ULL; 5497b94106SThomas Gleixner 5597b94106SThomas Gleixner /* 5697b94106SThomas Gleixner * Scaled math oddities: 5797b94106SThomas Gleixner * 5897b94106SThomas Gleixner * For mult <= (1 << shift) we can safely add mult - 1 to 5997b94106SThomas Gleixner * prevent integer rounding loss. So the backwards conversion 6097b94106SThomas Gleixner * from nsec to device ticks will be correct. 6197b94106SThomas Gleixner * 6297b94106SThomas Gleixner * For mult > (1 << shift), i.e. device frequency is > 1GHz we 6397b94106SThomas Gleixner * need to be careful. Adding mult - 1 will result in a value 6497b94106SThomas Gleixner * which when converted back to device ticks can be larger 6597b94106SThomas Gleixner * than latch by up to (mult - 1) >> shift. For the min_delta 6697b94106SThomas Gleixner * calculation we still want to apply this in order to stay 6797b94106SThomas Gleixner * above the minimum device ticks limit. For the upper limit 6897b94106SThomas Gleixner * we would end up with a latch value larger than the upper 6997b94106SThomas Gleixner * limit of the device, so we omit the add to stay below the 7097b94106SThomas Gleixner * device upper boundary. 7197b94106SThomas Gleixner * 7297b94106SThomas Gleixner * Also omit the add if it would overflow the u64 boundary. 7397b94106SThomas Gleixner */ 7497b94106SThomas Gleixner if ((~0ULL - clc > rnd) && 7510632008SThomas Gleixner (!ismax || evt->mult <= (1ULL << evt->shift))) 7697b94106SThomas Gleixner clc += rnd; 7797b94106SThomas Gleixner 7897b94106SThomas Gleixner do_div(clc, evt->mult); 7997b94106SThomas Gleixner 8097b94106SThomas Gleixner /* Deltas less than 1usec are pointless noise */ 8197b94106SThomas Gleixner return clc > 1000 ? clc : 1000; 8297b94106SThomas Gleixner } 8397b94106SThomas Gleixner 84d316c57fSThomas Gleixner /** 85d316c57fSThomas Gleixner * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 86d316c57fSThomas Gleixner * @latch: value to convert 87d316c57fSThomas Gleixner * @evt: pointer to clock event device descriptor 88d316c57fSThomas Gleixner * 89d316c57fSThomas Gleixner * Math helper, returns latch value converted to nanoseconds (bound checked) 90d316c57fSThomas Gleixner */ 9197813f2fSJon Hunter u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 92d316c57fSThomas Gleixner { 9397b94106SThomas Gleixner return cev_delta2ns(latch, evt, false); 94d316c57fSThomas Gleixner } 95c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevent_delta2ns); 96d316c57fSThomas Gleixner 9777e32c89SViresh Kumar static int __clockevents_set_state(struct clock_event_device *dev, 9877e32c89SViresh Kumar enum clock_event_state state) 99bd624d75SViresh Kumar { 100bd624d75SViresh Kumar /* Transition with legacy set_mode() callback */ 101bd624d75SViresh Kumar if (dev->set_mode) { 102bd624d75SViresh Kumar /* Legacy callback doesn't support new modes */ 10377e32c89SViresh Kumar if (state > CLOCK_EVT_STATE_ONESHOT) 104bd624d75SViresh Kumar return -ENOSYS; 10577e32c89SViresh Kumar /* 10677e32c89SViresh Kumar * 'clock_event_state' and 'clock_event_mode' have 1-to-1 10777e32c89SViresh Kumar * mapping until *_ONESHOT, and so a simple cast will work. 10877e32c89SViresh Kumar */ 10977e32c89SViresh Kumar dev->set_mode((enum clock_event_mode)state, dev); 11077e32c89SViresh Kumar dev->mode = (enum clock_event_mode)state; 111bd624d75SViresh Kumar return 0; 112bd624d75SViresh Kumar } 113bd624d75SViresh Kumar 114bd624d75SViresh Kumar if (dev->features & CLOCK_EVT_FEAT_DUMMY) 115bd624d75SViresh Kumar return 0; 116bd624d75SViresh Kumar 11777e32c89SViresh Kumar /* Transition with new state-specific callbacks */ 11877e32c89SViresh Kumar switch (state) { 11977e32c89SViresh Kumar case CLOCK_EVT_STATE_DETACHED: 120149aabccSViresh Kumar /* The clockevent device is getting replaced. Shut it down. */ 121bd624d75SViresh Kumar 12277e32c89SViresh Kumar case CLOCK_EVT_STATE_SHUTDOWN: 12377e32c89SViresh Kumar return dev->set_state_shutdown(dev); 124bd624d75SViresh Kumar 12577e32c89SViresh Kumar case CLOCK_EVT_STATE_PERIODIC: 126bd624d75SViresh Kumar /* Core internal bug */ 127bd624d75SViresh Kumar if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) 128bd624d75SViresh Kumar return -ENOSYS; 12977e32c89SViresh Kumar return dev->set_state_periodic(dev); 130bd624d75SViresh Kumar 13177e32c89SViresh Kumar case CLOCK_EVT_STATE_ONESHOT: 132bd624d75SViresh Kumar /* Core internal bug */ 133bd624d75SViresh Kumar if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 134bd624d75SViresh Kumar return -ENOSYS; 13577e32c89SViresh Kumar return dev->set_state_oneshot(dev); 136bd624d75SViresh Kumar 1378fff52fdSViresh Kumar case CLOCK_EVT_STATE_ONESHOT_STOPPED: 1388fff52fdSViresh Kumar /* Core internal bug */ 1398fff52fdSViresh Kumar if (WARN_ONCE(dev->state != CLOCK_EVT_STATE_ONESHOT, 1408fff52fdSViresh Kumar "Current state: %d\n", dev->state)) 1418fff52fdSViresh Kumar return -EINVAL; 1428fff52fdSViresh Kumar 1438fff52fdSViresh Kumar if (dev->set_state_oneshot_stopped) 1448fff52fdSViresh Kumar return dev->set_state_oneshot_stopped(dev); 1458fff52fdSViresh Kumar else 1468fff52fdSViresh Kumar return -ENOSYS; 1478fff52fdSViresh Kumar 148bd624d75SViresh Kumar default: 149bd624d75SViresh Kumar return -ENOSYS; 150bd624d75SViresh Kumar } 151bd624d75SViresh Kumar } 152bd624d75SViresh Kumar 153d316c57fSThomas Gleixner /** 15477e32c89SViresh Kumar * clockevents_set_state - set the operating state of a clock event device 155d316c57fSThomas Gleixner * @dev: device to modify 15677e32c89SViresh Kumar * @state: new state 157d316c57fSThomas Gleixner * 158d316c57fSThomas Gleixner * Must be called with interrupts disabled ! 159d316c57fSThomas Gleixner */ 16077e32c89SViresh Kumar void clockevents_set_state(struct clock_event_device *dev, 16177e32c89SViresh Kumar enum clock_event_state state) 162d316c57fSThomas Gleixner { 16377e32c89SViresh Kumar if (dev->state != state) { 16477e32c89SViresh Kumar if (__clockevents_set_state(dev, state)) 165bd624d75SViresh Kumar return; 166bd624d75SViresh Kumar 16777e32c89SViresh Kumar dev->state = state; 1682d68259dSMagnus Damm 1692d68259dSMagnus Damm /* 1702d68259dSMagnus Damm * A nsec2cyc multiplicator of 0 is invalid and we'd crash 1712d68259dSMagnus Damm * on it, so fix it up and emit a warning: 1722d68259dSMagnus Damm */ 17377e32c89SViresh Kumar if (state == CLOCK_EVT_STATE_ONESHOT) { 1742d68259dSMagnus Damm if (unlikely(!dev->mult)) { 1752d68259dSMagnus Damm dev->mult = 1; 1762d68259dSMagnus Damm WARN_ON(1); 1772d68259dSMagnus Damm } 1782d68259dSMagnus Damm } 179d316c57fSThomas Gleixner } 180d316c57fSThomas Gleixner } 181d316c57fSThomas Gleixner 182d316c57fSThomas Gleixner /** 1832344abbcSThomas Gleixner * clockevents_shutdown - shutdown the device and clear next_event 1842344abbcSThomas Gleixner * @dev: device to shutdown 1852344abbcSThomas Gleixner */ 1862344abbcSThomas Gleixner void clockevents_shutdown(struct clock_event_device *dev) 1872344abbcSThomas Gleixner { 18877e32c89SViresh Kumar clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN); 1892344abbcSThomas Gleixner dev->next_event.tv64 = KTIME_MAX; 1902344abbcSThomas Gleixner } 1912344abbcSThomas Gleixner 192554ef387SViresh Kumar /** 193554ef387SViresh Kumar * clockevents_tick_resume - Resume the tick device before using it again 194554ef387SViresh Kumar * @dev: device to resume 195554ef387SViresh Kumar */ 196554ef387SViresh Kumar int clockevents_tick_resume(struct clock_event_device *dev) 197554ef387SViresh Kumar { 198554ef387SViresh Kumar int ret = 0; 199554ef387SViresh Kumar 20077e32c89SViresh Kumar if (dev->set_mode) { 201554ef387SViresh Kumar dev->set_mode(CLOCK_EVT_MODE_RESUME, dev); 202554ef387SViresh Kumar dev->mode = CLOCK_EVT_MODE_RESUME; 20377e32c89SViresh Kumar } else if (dev->tick_resume) { 20477e32c89SViresh Kumar ret = dev->tick_resume(dev); 20577e32c89SViresh Kumar } 206554ef387SViresh Kumar 207554ef387SViresh Kumar return ret; 208554ef387SViresh Kumar } 209554ef387SViresh Kumar 210d1748302SMartin Schwidefsky #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 211d1748302SMartin Schwidefsky 212d1748302SMartin Schwidefsky /* Limit min_delta to a jiffie */ 213d1748302SMartin Schwidefsky #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 214d1748302SMartin Schwidefsky 215d1748302SMartin Schwidefsky /** 216d1748302SMartin Schwidefsky * clockevents_increase_min_delta - raise minimum delta of a clock event device 217d1748302SMartin Schwidefsky * @dev: device to increase the minimum delta 218d1748302SMartin Schwidefsky * 219d1748302SMartin Schwidefsky * Returns 0 on success, -ETIME when the minimum delta reached the limit. 220d1748302SMartin Schwidefsky */ 221d1748302SMartin Schwidefsky static int clockevents_increase_min_delta(struct clock_event_device *dev) 222d1748302SMartin Schwidefsky { 223d1748302SMartin Schwidefsky /* Nothing to do if we already reached the limit */ 224d1748302SMartin Schwidefsky if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 225504d5874SJan Kara printk_deferred(KERN_WARNING 226504d5874SJan Kara "CE: Reprogramming failure. Giving up\n"); 227d1748302SMartin Schwidefsky dev->next_event.tv64 = KTIME_MAX; 228d1748302SMartin Schwidefsky return -ETIME; 229d1748302SMartin Schwidefsky } 230d1748302SMartin Schwidefsky 231d1748302SMartin Schwidefsky if (dev->min_delta_ns < 5000) 232d1748302SMartin Schwidefsky dev->min_delta_ns = 5000; 233d1748302SMartin Schwidefsky else 234d1748302SMartin Schwidefsky dev->min_delta_ns += dev->min_delta_ns >> 1; 235d1748302SMartin Schwidefsky 236d1748302SMartin Schwidefsky if (dev->min_delta_ns > MIN_DELTA_LIMIT) 237d1748302SMartin Schwidefsky dev->min_delta_ns = MIN_DELTA_LIMIT; 238d1748302SMartin Schwidefsky 239504d5874SJan Kara printk_deferred(KERN_WARNING 240504d5874SJan Kara "CE: %s increased min_delta_ns to %llu nsec\n", 241d1748302SMartin Schwidefsky dev->name ? dev->name : "?", 242d1748302SMartin Schwidefsky (unsigned long long) dev->min_delta_ns); 243d1748302SMartin Schwidefsky return 0; 244d1748302SMartin Schwidefsky } 245d1748302SMartin Schwidefsky 246d1748302SMartin Schwidefsky /** 247d1748302SMartin Schwidefsky * clockevents_program_min_delta - Set clock event device to the minimum delay. 248d1748302SMartin Schwidefsky * @dev: device to program 249d1748302SMartin Schwidefsky * 250d1748302SMartin Schwidefsky * Returns 0 on success, -ETIME when the retry loop failed. 251d1748302SMartin Schwidefsky */ 252d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev) 253d1748302SMartin Schwidefsky { 254d1748302SMartin Schwidefsky unsigned long long clc; 255d1748302SMartin Schwidefsky int64_t delta; 256d1748302SMartin Schwidefsky int i; 257d1748302SMartin Schwidefsky 258d1748302SMartin Schwidefsky for (i = 0;;) { 259d1748302SMartin Schwidefsky delta = dev->min_delta_ns; 260d1748302SMartin Schwidefsky dev->next_event = ktime_add_ns(ktime_get(), delta); 261d1748302SMartin Schwidefsky 26277e32c89SViresh Kumar if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 263d1748302SMartin Schwidefsky return 0; 264d1748302SMartin Schwidefsky 265d1748302SMartin Schwidefsky dev->retries++; 266d1748302SMartin Schwidefsky clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 267d1748302SMartin Schwidefsky if (dev->set_next_event((unsigned long) clc, dev) == 0) 268d1748302SMartin Schwidefsky return 0; 269d1748302SMartin Schwidefsky 270d1748302SMartin Schwidefsky if (++i > 2) { 271d1748302SMartin Schwidefsky /* 272d1748302SMartin Schwidefsky * We tried 3 times to program the device with the 273d1748302SMartin Schwidefsky * given min_delta_ns. Try to increase the minimum 274d1748302SMartin Schwidefsky * delta, if that fails as well get out of here. 275d1748302SMartin Schwidefsky */ 276d1748302SMartin Schwidefsky if (clockevents_increase_min_delta(dev)) 277d1748302SMartin Schwidefsky return -ETIME; 278d1748302SMartin Schwidefsky i = 0; 279d1748302SMartin Schwidefsky } 280d1748302SMartin Schwidefsky } 281d1748302SMartin Schwidefsky } 282d1748302SMartin Schwidefsky 283d1748302SMartin Schwidefsky #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 284d1748302SMartin Schwidefsky 285d1748302SMartin Schwidefsky /** 286d1748302SMartin Schwidefsky * clockevents_program_min_delta - Set clock event device to the minimum delay. 287d1748302SMartin Schwidefsky * @dev: device to program 288d1748302SMartin Schwidefsky * 289d1748302SMartin Schwidefsky * Returns 0 on success, -ETIME when the retry loop failed. 290d1748302SMartin Schwidefsky */ 291d1748302SMartin Schwidefsky static int clockevents_program_min_delta(struct clock_event_device *dev) 292d1748302SMartin Schwidefsky { 293d1748302SMartin Schwidefsky unsigned long long clc; 294d1748302SMartin Schwidefsky int64_t delta; 295d1748302SMartin Schwidefsky 296d1748302SMartin Schwidefsky delta = dev->min_delta_ns; 297d1748302SMartin Schwidefsky dev->next_event = ktime_add_ns(ktime_get(), delta); 298d1748302SMartin Schwidefsky 29977e32c89SViresh Kumar if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 300d1748302SMartin Schwidefsky return 0; 301d1748302SMartin Schwidefsky 302d1748302SMartin Schwidefsky dev->retries++; 303d1748302SMartin Schwidefsky clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 304d1748302SMartin Schwidefsky return dev->set_next_event((unsigned long) clc, dev); 305d1748302SMartin Schwidefsky } 306d1748302SMartin Schwidefsky 307d1748302SMartin Schwidefsky #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 308d1748302SMartin Schwidefsky 3092344abbcSThomas Gleixner /** 310d316c57fSThomas Gleixner * clockevents_program_event - Reprogram the clock event device. 311d1748302SMartin Schwidefsky * @dev: device to program 312d316c57fSThomas Gleixner * @expires: absolute expiry time (monotonic clock) 313d1748302SMartin Schwidefsky * @force: program minimum delay if expires can not be set 314d316c57fSThomas Gleixner * 315d316c57fSThomas Gleixner * Returns 0 on success, -ETIME when the event is in the past. 316d316c57fSThomas Gleixner */ 317d316c57fSThomas Gleixner int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 318d1748302SMartin Schwidefsky bool force) 319d316c57fSThomas Gleixner { 320d316c57fSThomas Gleixner unsigned long long clc; 321d316c57fSThomas Gleixner int64_t delta; 322d1748302SMartin Schwidefsky int rc; 323d316c57fSThomas Gleixner 324167b1de3SThomas Gleixner if (unlikely(expires.tv64 < 0)) { 325167b1de3SThomas Gleixner WARN_ON_ONCE(1); 326167b1de3SThomas Gleixner return -ETIME; 327167b1de3SThomas Gleixner } 328167b1de3SThomas Gleixner 329d316c57fSThomas Gleixner dev->next_event = expires; 330d316c57fSThomas Gleixner 33177e32c89SViresh Kumar if (dev->state == CLOCK_EVT_STATE_SHUTDOWN) 332d316c57fSThomas Gleixner return 0; 333d316c57fSThomas Gleixner 33465516f8aSMartin Schwidefsky /* Shortcut for clockevent devices that can deal with ktime. */ 33565516f8aSMartin Schwidefsky if (dev->features & CLOCK_EVT_FEAT_KTIME) 33665516f8aSMartin Schwidefsky return dev->set_next_ktime(expires, dev); 33765516f8aSMartin Schwidefsky 338d1748302SMartin Schwidefsky delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 339d1748302SMartin Schwidefsky if (delta <= 0) 340d1748302SMartin Schwidefsky return force ? clockevents_program_min_delta(dev) : -ETIME; 341d316c57fSThomas Gleixner 342d1748302SMartin Schwidefsky delta = min(delta, (int64_t) dev->max_delta_ns); 343d1748302SMartin Schwidefsky delta = max(delta, (int64_t) dev->min_delta_ns); 344d316c57fSThomas Gleixner 345d1748302SMartin Schwidefsky clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 346d1748302SMartin Schwidefsky rc = dev->set_next_event((unsigned long) clc, dev); 347d1748302SMartin Schwidefsky 348d1748302SMartin Schwidefsky return (rc && force) ? clockevents_program_min_delta(dev) : rc; 349d316c57fSThomas Gleixner } 350d316c57fSThomas Gleixner 351d316c57fSThomas Gleixner /* 3523eb05676SLi Zefan * Called after a notify add to make devices available which were 353d316c57fSThomas Gleixner * released from the notifier call. 354d316c57fSThomas Gleixner */ 355d316c57fSThomas Gleixner static void clockevents_notify_released(void) 356d316c57fSThomas Gleixner { 357d316c57fSThomas Gleixner struct clock_event_device *dev; 358d316c57fSThomas Gleixner 359d316c57fSThomas Gleixner while (!list_empty(&clockevents_released)) { 360d316c57fSThomas Gleixner dev = list_entry(clockevents_released.next, 361d316c57fSThomas Gleixner struct clock_event_device, list); 362d316c57fSThomas Gleixner list_del(&dev->list); 363d316c57fSThomas Gleixner list_add(&dev->list, &clockevent_devices); 3647172a286SThomas Gleixner tick_check_new_device(dev); 365d316c57fSThomas Gleixner } 366d316c57fSThomas Gleixner } 367d316c57fSThomas Gleixner 36803e13cf5SThomas Gleixner /* 36903e13cf5SThomas Gleixner * Try to install a replacement clock event device 37003e13cf5SThomas Gleixner */ 37103e13cf5SThomas Gleixner static int clockevents_replace(struct clock_event_device *ced) 37203e13cf5SThomas Gleixner { 37303e13cf5SThomas Gleixner struct clock_event_device *dev, *newdev = NULL; 37403e13cf5SThomas Gleixner 37503e13cf5SThomas Gleixner list_for_each_entry(dev, &clockevent_devices, list) { 37677e32c89SViresh Kumar if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED) 37703e13cf5SThomas Gleixner continue; 37803e13cf5SThomas Gleixner 37903e13cf5SThomas Gleixner if (!tick_check_replacement(newdev, dev)) 38003e13cf5SThomas Gleixner continue; 38103e13cf5SThomas Gleixner 38203e13cf5SThomas Gleixner if (!try_module_get(dev->owner)) 38303e13cf5SThomas Gleixner continue; 38403e13cf5SThomas Gleixner 38503e13cf5SThomas Gleixner if (newdev) 38603e13cf5SThomas Gleixner module_put(newdev->owner); 38703e13cf5SThomas Gleixner newdev = dev; 38803e13cf5SThomas Gleixner } 38903e13cf5SThomas Gleixner if (newdev) { 39003e13cf5SThomas Gleixner tick_install_replacement(newdev); 39103e13cf5SThomas Gleixner list_del_init(&ced->list); 39203e13cf5SThomas Gleixner } 39303e13cf5SThomas Gleixner return newdev ? 0 : -EBUSY; 39403e13cf5SThomas Gleixner } 39503e13cf5SThomas Gleixner 39603e13cf5SThomas Gleixner /* 39703e13cf5SThomas Gleixner * Called with clockevents_mutex and clockevents_lock held 39803e13cf5SThomas Gleixner */ 39903e13cf5SThomas Gleixner static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) 40003e13cf5SThomas Gleixner { 40103e13cf5SThomas Gleixner /* Fast track. Device is unused */ 40277e32c89SViresh Kumar if (ced->state == CLOCK_EVT_STATE_DETACHED) { 40303e13cf5SThomas Gleixner list_del_init(&ced->list); 40403e13cf5SThomas Gleixner return 0; 40503e13cf5SThomas Gleixner } 40603e13cf5SThomas Gleixner 40703e13cf5SThomas Gleixner return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; 40803e13cf5SThomas Gleixner } 40903e13cf5SThomas Gleixner 41003e13cf5SThomas Gleixner /* 41103e13cf5SThomas Gleixner * SMP function call to unbind a device 41203e13cf5SThomas Gleixner */ 41303e13cf5SThomas Gleixner static void __clockevents_unbind(void *arg) 41403e13cf5SThomas Gleixner { 41503e13cf5SThomas Gleixner struct ce_unbind *cu = arg; 41603e13cf5SThomas Gleixner int res; 41703e13cf5SThomas Gleixner 41803e13cf5SThomas Gleixner raw_spin_lock(&clockevents_lock); 41903e13cf5SThomas Gleixner res = __clockevents_try_unbind(cu->ce, smp_processor_id()); 42003e13cf5SThomas Gleixner if (res == -EAGAIN) 42103e13cf5SThomas Gleixner res = clockevents_replace(cu->ce); 42203e13cf5SThomas Gleixner cu->res = res; 42303e13cf5SThomas Gleixner raw_spin_unlock(&clockevents_lock); 42403e13cf5SThomas Gleixner } 42503e13cf5SThomas Gleixner 42603e13cf5SThomas Gleixner /* 42703e13cf5SThomas Gleixner * Issues smp function call to unbind a per cpu device. Called with 42803e13cf5SThomas Gleixner * clockevents_mutex held. 42903e13cf5SThomas Gleixner */ 43003e13cf5SThomas Gleixner static int clockevents_unbind(struct clock_event_device *ced, int cpu) 43103e13cf5SThomas Gleixner { 43203e13cf5SThomas Gleixner struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; 43303e13cf5SThomas Gleixner 43403e13cf5SThomas Gleixner smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); 43503e13cf5SThomas Gleixner return cu.res; 43603e13cf5SThomas Gleixner } 43703e13cf5SThomas Gleixner 43803e13cf5SThomas Gleixner /* 43903e13cf5SThomas Gleixner * Unbind a clockevents device. 44003e13cf5SThomas Gleixner */ 44103e13cf5SThomas Gleixner int clockevents_unbind_device(struct clock_event_device *ced, int cpu) 44203e13cf5SThomas Gleixner { 44303e13cf5SThomas Gleixner int ret; 44403e13cf5SThomas Gleixner 44503e13cf5SThomas Gleixner mutex_lock(&clockevents_mutex); 44603e13cf5SThomas Gleixner ret = clockevents_unbind(ced, cpu); 44703e13cf5SThomas Gleixner mutex_unlock(&clockevents_mutex); 44803e13cf5SThomas Gleixner return ret; 44903e13cf5SThomas Gleixner } 45032a15832SVitaly Kuznetsov EXPORT_SYMBOL_GPL(clockevents_unbind_device); 45103e13cf5SThomas Gleixner 45277e32c89SViresh Kumar /* Sanity check of state transition callbacks */ 453bd624d75SViresh Kumar static int clockevents_sanity_check(struct clock_event_device *dev) 454bd624d75SViresh Kumar { 455bd624d75SViresh Kumar /* Legacy set_mode() callback */ 456bd624d75SViresh Kumar if (dev->set_mode) { 457bd624d75SViresh Kumar /* We shouldn't be supporting new modes now */ 45877e32c89SViresh Kumar WARN_ON(dev->set_state_periodic || dev->set_state_oneshot || 4598fff52fdSViresh Kumar dev->set_state_shutdown || dev->tick_resume || 4608fff52fdSViresh Kumar dev->set_state_oneshot_stopped); 461de81e64bSViresh Kumar 462de81e64bSViresh Kumar BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 463bd624d75SViresh Kumar return 0; 464bd624d75SViresh Kumar } 465bd624d75SViresh Kumar 466bd624d75SViresh Kumar if (dev->features & CLOCK_EVT_FEAT_DUMMY) 467bd624d75SViresh Kumar return 0; 468bd624d75SViresh Kumar 46977e32c89SViresh Kumar /* New state-specific callbacks */ 47077e32c89SViresh Kumar if (!dev->set_state_shutdown) 471bd624d75SViresh Kumar return -EINVAL; 472bd624d75SViresh Kumar 473bd624d75SViresh Kumar if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && 47477e32c89SViresh Kumar !dev->set_state_periodic) 475bd624d75SViresh Kumar return -EINVAL; 476bd624d75SViresh Kumar 477bd624d75SViresh Kumar if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) && 47877e32c89SViresh Kumar !dev->set_state_oneshot) 479bd624d75SViresh Kumar return -EINVAL; 480bd624d75SViresh Kumar 481bd624d75SViresh Kumar return 0; 482bd624d75SViresh Kumar } 483bd624d75SViresh Kumar 484d316c57fSThomas Gleixner /** 485d316c57fSThomas Gleixner * clockevents_register_device - register a clock event device 486d316c57fSThomas Gleixner * @dev: device to register 487d316c57fSThomas Gleixner */ 488d316c57fSThomas Gleixner void clockevents_register_device(struct clock_event_device *dev) 489d316c57fSThomas Gleixner { 490f833bab8SSuresh Siddha unsigned long flags; 491f833bab8SSuresh Siddha 492bd624d75SViresh Kumar BUG_ON(clockevents_sanity_check(dev)); 493bd624d75SViresh Kumar 49477e32c89SViresh Kumar /* Initialize state to DETACHED */ 49577e32c89SViresh Kumar dev->state = CLOCK_EVT_STATE_DETACHED; 49677e32c89SViresh Kumar 4971b054b67SThomas Gleixner if (!dev->cpumask) { 4981b054b67SThomas Gleixner WARN_ON(num_possible_cpus() > 1); 4991b054b67SThomas Gleixner dev->cpumask = cpumask_of(smp_processor_id()); 5001b054b67SThomas Gleixner } 501320ab2b0SRusty Russell 502b5f91da0SThomas Gleixner raw_spin_lock_irqsave(&clockevents_lock, flags); 503d316c57fSThomas Gleixner 504d316c57fSThomas Gleixner list_add(&dev->list, &clockevent_devices); 5057172a286SThomas Gleixner tick_check_new_device(dev); 506d316c57fSThomas Gleixner clockevents_notify_released(); 507d316c57fSThomas Gleixner 508b5f91da0SThomas Gleixner raw_spin_unlock_irqrestore(&clockevents_lock, flags); 509d316c57fSThomas Gleixner } 510c81fc2c3SMagnus Damm EXPORT_SYMBOL_GPL(clockevents_register_device); 511d316c57fSThomas Gleixner 512e5400321SMagnus Damm void clockevents_config(struct clock_event_device *dev, u32 freq) 51357f0fcbeSThomas Gleixner { 514c0e299b1SThomas Gleixner u64 sec; 51557f0fcbeSThomas Gleixner 51657f0fcbeSThomas Gleixner if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 51757f0fcbeSThomas Gleixner return; 51857f0fcbeSThomas Gleixner 51957f0fcbeSThomas Gleixner /* 52057f0fcbeSThomas Gleixner * Calculate the maximum number of seconds we can sleep. Limit 52157f0fcbeSThomas Gleixner * to 10 minutes for hardware which can program more than 52257f0fcbeSThomas Gleixner * 32bit ticks so we still get reasonable conversion values. 52357f0fcbeSThomas Gleixner */ 52457f0fcbeSThomas Gleixner sec = dev->max_delta_ticks; 52557f0fcbeSThomas Gleixner do_div(sec, freq); 52657f0fcbeSThomas Gleixner if (!sec) 52757f0fcbeSThomas Gleixner sec = 1; 52857f0fcbeSThomas Gleixner else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 52957f0fcbeSThomas Gleixner sec = 600; 53057f0fcbeSThomas Gleixner 53157f0fcbeSThomas Gleixner clockevents_calc_mult_shift(dev, freq, sec); 53297b94106SThomas Gleixner dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); 53397b94106SThomas Gleixner dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true); 53457f0fcbeSThomas Gleixner } 53557f0fcbeSThomas Gleixner 53657f0fcbeSThomas Gleixner /** 53757f0fcbeSThomas Gleixner * clockevents_config_and_register - Configure and register a clock event device 53857f0fcbeSThomas Gleixner * @dev: device to register 53957f0fcbeSThomas Gleixner * @freq: The clock frequency 54057f0fcbeSThomas Gleixner * @min_delta: The minimum clock ticks to program in oneshot mode 54157f0fcbeSThomas Gleixner * @max_delta: The maximum clock ticks to program in oneshot mode 54257f0fcbeSThomas Gleixner * 54357f0fcbeSThomas Gleixner * min/max_delta can be 0 for devices which do not support oneshot mode. 54457f0fcbeSThomas Gleixner */ 54557f0fcbeSThomas Gleixner void clockevents_config_and_register(struct clock_event_device *dev, 54657f0fcbeSThomas Gleixner u32 freq, unsigned long min_delta, 54757f0fcbeSThomas Gleixner unsigned long max_delta) 54857f0fcbeSThomas Gleixner { 54957f0fcbeSThomas Gleixner dev->min_delta_ticks = min_delta; 55057f0fcbeSThomas Gleixner dev->max_delta_ticks = max_delta; 55157f0fcbeSThomas Gleixner clockevents_config(dev, freq); 55257f0fcbeSThomas Gleixner clockevents_register_device(dev); 55357f0fcbeSThomas Gleixner } 554c35ef95cSShawn Guo EXPORT_SYMBOL_GPL(clockevents_config_and_register); 55557f0fcbeSThomas Gleixner 556627ee794SThomas Gleixner int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) 55780b816b7SThomas Gleixner { 55880b816b7SThomas Gleixner clockevents_config(dev, freq); 55980b816b7SThomas Gleixner 56077e32c89SViresh Kumar if (dev->state == CLOCK_EVT_STATE_ONESHOT) 561d1748302SMartin Schwidefsky return clockevents_program_event(dev, dev->next_event, false); 562fe79a9baSSoren Brinkmann 56377e32c89SViresh Kumar if (dev->state == CLOCK_EVT_STATE_PERIODIC) 56477e32c89SViresh Kumar return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC); 565fe79a9baSSoren Brinkmann 566fe79a9baSSoren Brinkmann return 0; 56780b816b7SThomas Gleixner } 56880b816b7SThomas Gleixner 569627ee794SThomas Gleixner /** 570627ee794SThomas Gleixner * clockevents_update_freq - Update frequency and reprogram a clock event device. 571627ee794SThomas Gleixner * @dev: device to modify 572627ee794SThomas Gleixner * @freq: new device frequency 573627ee794SThomas Gleixner * 574627ee794SThomas Gleixner * Reconfigure and reprogram a clock event device in oneshot 575627ee794SThomas Gleixner * mode. Must be called on the cpu for which the device delivers per 576627ee794SThomas Gleixner * cpu timer events. If called for the broadcast device the core takes 577627ee794SThomas Gleixner * care of serialization. 578627ee794SThomas Gleixner * 579627ee794SThomas Gleixner * Returns 0 on success, -ETIME when the event is in the past. 580627ee794SThomas Gleixner */ 581627ee794SThomas Gleixner int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 582627ee794SThomas Gleixner { 583627ee794SThomas Gleixner unsigned long flags; 584627ee794SThomas Gleixner int ret; 585627ee794SThomas Gleixner 586627ee794SThomas Gleixner local_irq_save(flags); 587627ee794SThomas Gleixner ret = tick_broadcast_update_freq(dev, freq); 588627ee794SThomas Gleixner if (ret == -ENODEV) 589627ee794SThomas Gleixner ret = __clockevents_update_freq(dev, freq); 590627ee794SThomas Gleixner local_irq_restore(flags); 591627ee794SThomas Gleixner return ret; 592627ee794SThomas Gleixner } 593627ee794SThomas Gleixner 594d316c57fSThomas Gleixner /* 595d316c57fSThomas Gleixner * Noop handler when we shut down an event device 596d316c57fSThomas Gleixner */ 5977c1e7689SVenkatesh Pallipadi void clockevents_handle_noop(struct clock_event_device *dev) 598d316c57fSThomas Gleixner { 599d316c57fSThomas Gleixner } 600d316c57fSThomas Gleixner 601d316c57fSThomas Gleixner /** 602d316c57fSThomas Gleixner * clockevents_exchange_device - release and request clock devices 603d316c57fSThomas Gleixner * @old: device to release (can be NULL) 604d316c57fSThomas Gleixner * @new: device to request (can be NULL) 605d316c57fSThomas Gleixner * 606db6f672eSThomas Gleixner * Called from various tick functions with clockevents_lock held and 607db6f672eSThomas Gleixner * interrupts disabled. 608d316c57fSThomas Gleixner */ 609d316c57fSThomas Gleixner void clockevents_exchange_device(struct clock_event_device *old, 610d316c57fSThomas Gleixner struct clock_event_device *new) 611d316c57fSThomas Gleixner { 612d316c57fSThomas Gleixner /* 613d316c57fSThomas Gleixner * Caller releases a clock event device. We queue it into the 614d316c57fSThomas Gleixner * released list and do a notify add later. 615d316c57fSThomas Gleixner */ 616d316c57fSThomas Gleixner if (old) { 617ccf33d68SThomas Gleixner module_put(old->owner); 61877e32c89SViresh Kumar clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED); 619d316c57fSThomas Gleixner list_del(&old->list); 620d316c57fSThomas Gleixner list_add(&old->list, &clockevents_released); 621d316c57fSThomas Gleixner } 622d316c57fSThomas Gleixner 623d316c57fSThomas Gleixner if (new) { 62477e32c89SViresh Kumar BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED); 6252344abbcSThomas Gleixner clockevents_shutdown(new); 626d316c57fSThomas Gleixner } 627d316c57fSThomas Gleixner } 628d316c57fSThomas Gleixner 629adc78e6bSRafael J. Wysocki /** 630adc78e6bSRafael J. Wysocki * clockevents_suspend - suspend clock devices 631adc78e6bSRafael J. Wysocki */ 632adc78e6bSRafael J. Wysocki void clockevents_suspend(void) 633adc78e6bSRafael J. Wysocki { 634adc78e6bSRafael J. Wysocki struct clock_event_device *dev; 635adc78e6bSRafael J. Wysocki 636adc78e6bSRafael J. Wysocki list_for_each_entry_reverse(dev, &clockevent_devices, list) 637adc78e6bSRafael J. Wysocki if (dev->suspend) 638adc78e6bSRafael J. Wysocki dev->suspend(dev); 639adc78e6bSRafael J. Wysocki } 640adc78e6bSRafael J. Wysocki 641adc78e6bSRafael J. Wysocki /** 642adc78e6bSRafael J. Wysocki * clockevents_resume - resume clock devices 643adc78e6bSRafael J. Wysocki */ 644adc78e6bSRafael J. Wysocki void clockevents_resume(void) 645adc78e6bSRafael J. Wysocki { 646adc78e6bSRafael J. Wysocki struct clock_event_device *dev; 647adc78e6bSRafael J. Wysocki 648adc78e6bSRafael J. Wysocki list_for_each_entry(dev, &clockevent_devices, list) 649adc78e6bSRafael J. Wysocki if (dev->resume) 650adc78e6bSRafael J. Wysocki dev->resume(dev); 651adc78e6bSRafael J. Wysocki } 652adc78e6bSRafael J. Wysocki 653a49b116dSThomas Gleixner #ifdef CONFIG_HOTPLUG_CPU 654d316c57fSThomas Gleixner /** 655a49b116dSThomas Gleixner * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu 656d316c57fSThomas Gleixner */ 657a49b116dSThomas Gleixner void tick_cleanup_dead_cpu(int cpu) 658d316c57fSThomas Gleixner { 659bb6eddf7SThomas Gleixner struct clock_event_device *dev, *tmp; 660f833bab8SSuresh Siddha unsigned long flags; 6610b858e6fSLi Zefan 662b5f91da0SThomas Gleixner raw_spin_lock_irqsave(&clockevents_lock, flags); 663d316c57fSThomas Gleixner 664a49b116dSThomas Gleixner tick_shutdown_broadcast_oneshot(cpu); 665a49b116dSThomas Gleixner tick_shutdown_broadcast(cpu); 666a49b116dSThomas Gleixner tick_shutdown(cpu); 667d316c57fSThomas Gleixner /* 668d316c57fSThomas Gleixner * Unregister the clock event devices which were 669d316c57fSThomas Gleixner * released from the users in the notify chain. 670d316c57fSThomas Gleixner */ 671bb6eddf7SThomas Gleixner list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 672bb6eddf7SThomas Gleixner list_del(&dev->list); 673bb6eddf7SThomas Gleixner /* 674bb6eddf7SThomas Gleixner * Now check whether the CPU has left unused per cpu devices 675bb6eddf7SThomas Gleixner */ 676bb6eddf7SThomas Gleixner list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 677bb6eddf7SThomas Gleixner if (cpumask_test_cpu(cpu, dev->cpumask) && 678ea9d8e3fSXiaotian Feng cpumask_weight(dev->cpumask) == 1 && 679ea9d8e3fSXiaotian Feng !tick_is_broadcast_device(dev)) { 68077e32c89SViresh Kumar BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED); 681bb6eddf7SThomas Gleixner list_del(&dev->list); 682bb6eddf7SThomas Gleixner } 683bb6eddf7SThomas Gleixner } 684b5f91da0SThomas Gleixner raw_spin_unlock_irqrestore(&clockevents_lock, flags); 685d316c57fSThomas Gleixner } 686a49b116dSThomas Gleixner #endif 687501f8670SThomas Gleixner 688501f8670SThomas Gleixner #ifdef CONFIG_SYSFS 689501f8670SThomas Gleixner struct bus_type clockevents_subsys = { 690501f8670SThomas Gleixner .name = "clockevents", 691501f8670SThomas Gleixner .dev_name = "clockevent", 692501f8670SThomas Gleixner }; 693501f8670SThomas Gleixner 694501f8670SThomas Gleixner static DEFINE_PER_CPU(struct device, tick_percpu_dev); 695501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev); 696501f8670SThomas Gleixner 697501f8670SThomas Gleixner static ssize_t sysfs_show_current_tick_dev(struct device *dev, 698501f8670SThomas Gleixner struct device_attribute *attr, 699501f8670SThomas Gleixner char *buf) 700501f8670SThomas Gleixner { 701501f8670SThomas Gleixner struct tick_device *td; 702501f8670SThomas Gleixner ssize_t count = 0; 703501f8670SThomas Gleixner 704501f8670SThomas Gleixner raw_spin_lock_irq(&clockevents_lock); 705501f8670SThomas Gleixner td = tick_get_tick_dev(dev); 706501f8670SThomas Gleixner if (td && td->evtdev) 707501f8670SThomas Gleixner count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); 708501f8670SThomas Gleixner raw_spin_unlock_irq(&clockevents_lock); 709501f8670SThomas Gleixner return count; 710501f8670SThomas Gleixner } 711501f8670SThomas Gleixner static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); 712501f8670SThomas Gleixner 71303e13cf5SThomas Gleixner /* We don't support the abomination of removable broadcast devices */ 71403e13cf5SThomas Gleixner static ssize_t sysfs_unbind_tick_dev(struct device *dev, 71503e13cf5SThomas Gleixner struct device_attribute *attr, 71603e13cf5SThomas Gleixner const char *buf, size_t count) 71703e13cf5SThomas Gleixner { 71803e13cf5SThomas Gleixner char name[CS_NAME_LEN]; 719891292a7SPatrick Palka ssize_t ret = sysfs_get_uname(buf, name, count); 72003e13cf5SThomas Gleixner struct clock_event_device *ce; 72103e13cf5SThomas Gleixner 72203e13cf5SThomas Gleixner if (ret < 0) 72303e13cf5SThomas Gleixner return ret; 72403e13cf5SThomas Gleixner 72503e13cf5SThomas Gleixner ret = -ENODEV; 72603e13cf5SThomas Gleixner mutex_lock(&clockevents_mutex); 72703e13cf5SThomas Gleixner raw_spin_lock_irq(&clockevents_lock); 72803e13cf5SThomas Gleixner list_for_each_entry(ce, &clockevent_devices, list) { 72903e13cf5SThomas Gleixner if (!strcmp(ce->name, name)) { 73003e13cf5SThomas Gleixner ret = __clockevents_try_unbind(ce, dev->id); 73103e13cf5SThomas Gleixner break; 73203e13cf5SThomas Gleixner } 73303e13cf5SThomas Gleixner } 73403e13cf5SThomas Gleixner raw_spin_unlock_irq(&clockevents_lock); 73503e13cf5SThomas Gleixner /* 73603e13cf5SThomas Gleixner * We hold clockevents_mutex, so ce can't go away 73703e13cf5SThomas Gleixner */ 73803e13cf5SThomas Gleixner if (ret == -EAGAIN) 73903e13cf5SThomas Gleixner ret = clockevents_unbind(ce, dev->id); 74003e13cf5SThomas Gleixner mutex_unlock(&clockevents_mutex); 74103e13cf5SThomas Gleixner return ret ? ret : count; 74203e13cf5SThomas Gleixner } 74303e13cf5SThomas Gleixner static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); 74403e13cf5SThomas Gleixner 745501f8670SThomas Gleixner #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 746501f8670SThomas Gleixner static struct device tick_bc_dev = { 747501f8670SThomas Gleixner .init_name = "broadcast", 748501f8670SThomas Gleixner .id = 0, 749501f8670SThomas Gleixner .bus = &clockevents_subsys, 750501f8670SThomas Gleixner }; 751501f8670SThomas Gleixner 752501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev) 753501f8670SThomas Gleixner { 754501f8670SThomas Gleixner return dev == &tick_bc_dev ? tick_get_broadcast_device() : 755501f8670SThomas Gleixner &per_cpu(tick_cpu_device, dev->id); 756501f8670SThomas Gleixner } 757501f8670SThomas Gleixner 758501f8670SThomas Gleixner static __init int tick_broadcast_init_sysfs(void) 759501f8670SThomas Gleixner { 760501f8670SThomas Gleixner int err = device_register(&tick_bc_dev); 761501f8670SThomas Gleixner 762501f8670SThomas Gleixner if (!err) 763501f8670SThomas Gleixner err = device_create_file(&tick_bc_dev, &dev_attr_current_device); 764501f8670SThomas Gleixner return err; 765501f8670SThomas Gleixner } 766501f8670SThomas Gleixner #else 767501f8670SThomas Gleixner static struct tick_device *tick_get_tick_dev(struct device *dev) 768501f8670SThomas Gleixner { 769501f8670SThomas Gleixner return &per_cpu(tick_cpu_device, dev->id); 770501f8670SThomas Gleixner } 771501f8670SThomas Gleixner static inline int tick_broadcast_init_sysfs(void) { return 0; } 772de68d9b1SThomas Gleixner #endif 773501f8670SThomas Gleixner 774501f8670SThomas Gleixner static int __init tick_init_sysfs(void) 775501f8670SThomas Gleixner { 776501f8670SThomas Gleixner int cpu; 777501f8670SThomas Gleixner 778501f8670SThomas Gleixner for_each_possible_cpu(cpu) { 779501f8670SThomas Gleixner struct device *dev = &per_cpu(tick_percpu_dev, cpu); 780501f8670SThomas Gleixner int err; 781501f8670SThomas Gleixner 782501f8670SThomas Gleixner dev->id = cpu; 783501f8670SThomas Gleixner dev->bus = &clockevents_subsys; 784501f8670SThomas Gleixner err = device_register(dev); 785501f8670SThomas Gleixner if (!err) 786501f8670SThomas Gleixner err = device_create_file(dev, &dev_attr_current_device); 78703e13cf5SThomas Gleixner if (!err) 78803e13cf5SThomas Gleixner err = device_create_file(dev, &dev_attr_unbind_device); 789501f8670SThomas Gleixner if (err) 790501f8670SThomas Gleixner return err; 791501f8670SThomas Gleixner } 792501f8670SThomas Gleixner return tick_broadcast_init_sysfs(); 793501f8670SThomas Gleixner } 794501f8670SThomas Gleixner 795501f8670SThomas Gleixner static int __init clockevents_init_sysfs(void) 796501f8670SThomas Gleixner { 797501f8670SThomas Gleixner int err = subsys_system_register(&clockevents_subsys, NULL); 798501f8670SThomas Gleixner 799501f8670SThomas Gleixner if (!err) 800501f8670SThomas Gleixner err = tick_init_sysfs(); 801501f8670SThomas Gleixner return err; 802501f8670SThomas Gleixner } 803501f8670SThomas Gleixner device_initcall(clockevents_init_sysfs); 804501f8670SThomas Gleixner #endif /* SYSFS */ 805