1 /* 2 * linux/kernel/time/clockevents.c 3 * 4 * This file contains functions which manage clock event devices. 5 * 6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 9 * 10 * This code is licenced under the GPL version 2. For details see 11 * kernel-base/COPYING. 12 */ 13 14 #include <linux/clockchips.h> 15 #include <linux/hrtimer.h> 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/notifier.h> 19 #include <linux/smp.h> 20 21 #include "tick-internal.h" 22 23 /* The registered clock event devices */ 24 static LIST_HEAD(clockevent_devices); 25 static LIST_HEAD(clockevents_released); 26 27 /* Notification for clock events */ 28 static RAW_NOTIFIER_HEAD(clockevents_chain); 29 30 /* Protection for the above */ 31 static DEFINE_RAW_SPINLOCK(clockevents_lock); 32 33 /** 34 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds 35 * @latch: value to convert 36 * @evt: pointer to clock event device descriptor 37 * 38 * Math helper, returns latch value converted to nanoseconds (bound checked) 39 */ 40 u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) 41 { 42 u64 clc = (u64) latch << evt->shift; 43 44 if (unlikely(!evt->mult)) { 45 evt->mult = 1; 46 WARN_ON(1); 47 } 48 49 do_div(clc, evt->mult); 50 if (clc < 1000) 51 clc = 1000; 52 if (clc > KTIME_MAX) 53 clc = KTIME_MAX; 54 55 return clc; 56 } 57 EXPORT_SYMBOL_GPL(clockevent_delta2ns); 58 59 /** 60 * clockevents_set_mode - set the operating mode of a clock event device 61 * @dev: device to modify 62 * @mode: new mode 63 * 64 * Must be called with interrupts disabled ! 65 */ 66 void clockevents_set_mode(struct clock_event_device *dev, 67 enum clock_event_mode mode) 68 { 69 if (dev->mode != mode) { 70 dev->set_mode(mode, dev); 71 dev->mode = mode; 72 73 /* 74 * A nsec2cyc multiplicator of 0 is invalid and we'd crash 75 * on it, so fix it up and emit a warning: 76 */ 77 if (mode == CLOCK_EVT_MODE_ONESHOT) { 78 if (unlikely(!dev->mult)) { 79 dev->mult = 1; 80 WARN_ON(1); 81 } 82 } 83 } 84 } 85 86 /** 87 * clockevents_shutdown - shutdown the device and clear next_event 88 * @dev: device to shutdown 89 */ 90 void clockevents_shutdown(struct clock_event_device *dev) 91 { 92 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 93 dev->next_event.tv64 = KTIME_MAX; 94 } 95 96 #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST 97 98 /* Limit min_delta to a jiffie */ 99 #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) 100 101 /** 102 * clockevents_increase_min_delta - raise minimum delta of a clock event device 103 * @dev: device to increase the minimum delta 104 * 105 * Returns 0 on success, -ETIME when the minimum delta reached the limit. 106 */ 107 static int clockevents_increase_min_delta(struct clock_event_device *dev) 108 { 109 /* Nothing to do if we already reached the limit */ 110 if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { 111 printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); 112 dev->next_event.tv64 = KTIME_MAX; 113 return -ETIME; 114 } 115 116 if (dev->min_delta_ns < 5000) 117 dev->min_delta_ns = 5000; 118 else 119 dev->min_delta_ns += dev->min_delta_ns >> 1; 120 121 if (dev->min_delta_ns > MIN_DELTA_LIMIT) 122 dev->min_delta_ns = MIN_DELTA_LIMIT; 123 124 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", 125 dev->name ? dev->name : "?", 126 (unsigned long long) dev->min_delta_ns); 127 return 0; 128 } 129 130 /** 131 * clockevents_program_min_delta - Set clock event device to the minimum delay. 132 * @dev: device to program 133 * 134 * Returns 0 on success, -ETIME when the retry loop failed. 135 */ 136 static int clockevents_program_min_delta(struct clock_event_device *dev) 137 { 138 unsigned long long clc; 139 int64_t delta; 140 int i; 141 142 for (i = 0;;) { 143 delta = dev->min_delta_ns; 144 dev->next_event = ktime_add_ns(ktime_get(), delta); 145 146 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 147 return 0; 148 149 dev->retries++; 150 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 151 if (dev->set_next_event((unsigned long) clc, dev) == 0) 152 return 0; 153 154 if (++i > 2) { 155 /* 156 * We tried 3 times to program the device with the 157 * given min_delta_ns. Try to increase the minimum 158 * delta, if that fails as well get out of here. 159 */ 160 if (clockevents_increase_min_delta(dev)) 161 return -ETIME; 162 i = 0; 163 } 164 } 165 } 166 167 #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 168 169 /** 170 * clockevents_program_min_delta - Set clock event device to the minimum delay. 171 * @dev: device to program 172 * 173 * Returns 0 on success, -ETIME when the retry loop failed. 174 */ 175 static int clockevents_program_min_delta(struct clock_event_device *dev) 176 { 177 unsigned long long clc; 178 int64_t delta; 179 180 delta = dev->min_delta_ns; 181 dev->next_event = ktime_add_ns(ktime_get(), delta); 182 183 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 184 return 0; 185 186 dev->retries++; 187 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 188 return dev->set_next_event((unsigned long) clc, dev); 189 } 190 191 #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ 192 193 /** 194 * clockevents_program_event - Reprogram the clock event device. 195 * @dev: device to program 196 * @expires: absolute expiry time (monotonic clock) 197 * @force: program minimum delay if expires can not be set 198 * 199 * Returns 0 on success, -ETIME when the event is in the past. 200 */ 201 int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, 202 bool force) 203 { 204 unsigned long long clc; 205 int64_t delta; 206 int rc; 207 208 if (unlikely(expires.tv64 < 0)) { 209 WARN_ON_ONCE(1); 210 return -ETIME; 211 } 212 213 dev->next_event = expires; 214 215 if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN) 216 return 0; 217 218 /* Shortcut for clockevent devices that can deal with ktime. */ 219 if (dev->features & CLOCK_EVT_FEAT_KTIME) 220 return dev->set_next_ktime(expires, dev); 221 222 delta = ktime_to_ns(ktime_sub(expires, ktime_get())); 223 if (delta <= 0) 224 return force ? clockevents_program_min_delta(dev) : -ETIME; 225 226 delta = min(delta, (int64_t) dev->max_delta_ns); 227 delta = max(delta, (int64_t) dev->min_delta_ns); 228 229 clc = ((unsigned long long) delta * dev->mult) >> dev->shift; 230 rc = dev->set_next_event((unsigned long) clc, dev); 231 232 return (rc && force) ? clockevents_program_min_delta(dev) : rc; 233 } 234 235 /** 236 * clockevents_register_notifier - register a clock events change listener 237 */ 238 int clockevents_register_notifier(struct notifier_block *nb) 239 { 240 unsigned long flags; 241 int ret; 242 243 raw_spin_lock_irqsave(&clockevents_lock, flags); 244 ret = raw_notifier_chain_register(&clockevents_chain, nb); 245 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 246 247 return ret; 248 } 249 250 /* 251 * Notify about a clock event change. Called with clockevents_lock 252 * held. 253 */ 254 static void clockevents_do_notify(unsigned long reason, void *dev) 255 { 256 raw_notifier_call_chain(&clockevents_chain, reason, dev); 257 } 258 259 /* 260 * Called after a notify add to make devices available which were 261 * released from the notifier call. 262 */ 263 static void clockevents_notify_released(void) 264 { 265 struct clock_event_device *dev; 266 267 while (!list_empty(&clockevents_released)) { 268 dev = list_entry(clockevents_released.next, 269 struct clock_event_device, list); 270 list_del(&dev->list); 271 list_add(&dev->list, &clockevent_devices); 272 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 273 } 274 } 275 276 /** 277 * clockevents_register_device - register a clock event device 278 * @dev: device to register 279 */ 280 void clockevents_register_device(struct clock_event_device *dev) 281 { 282 unsigned long flags; 283 284 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 285 if (!dev->cpumask) { 286 WARN_ON(num_possible_cpus() > 1); 287 dev->cpumask = cpumask_of(smp_processor_id()); 288 } 289 290 raw_spin_lock_irqsave(&clockevents_lock, flags); 291 292 list_add(&dev->list, &clockevent_devices); 293 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 294 clockevents_notify_released(); 295 296 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 297 } 298 EXPORT_SYMBOL_GPL(clockevents_register_device); 299 300 void clockevents_config(struct clock_event_device *dev, u32 freq) 301 { 302 u64 sec; 303 304 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) 305 return; 306 307 /* 308 * Calculate the maximum number of seconds we can sleep. Limit 309 * to 10 minutes for hardware which can program more than 310 * 32bit ticks so we still get reasonable conversion values. 311 */ 312 sec = dev->max_delta_ticks; 313 do_div(sec, freq); 314 if (!sec) 315 sec = 1; 316 else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) 317 sec = 600; 318 319 clockevents_calc_mult_shift(dev, freq, sec); 320 dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); 321 dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); 322 } 323 324 /** 325 * clockevents_config_and_register - Configure and register a clock event device 326 * @dev: device to register 327 * @freq: The clock frequency 328 * @min_delta: The minimum clock ticks to program in oneshot mode 329 * @max_delta: The maximum clock ticks to program in oneshot mode 330 * 331 * min/max_delta can be 0 for devices which do not support oneshot mode. 332 */ 333 void clockevents_config_and_register(struct clock_event_device *dev, 334 u32 freq, unsigned long min_delta, 335 unsigned long max_delta) 336 { 337 dev->min_delta_ticks = min_delta; 338 dev->max_delta_ticks = max_delta; 339 clockevents_config(dev, freq); 340 clockevents_register_device(dev); 341 } 342 EXPORT_SYMBOL_GPL(clockevents_config_and_register); 343 344 /** 345 * clockevents_update_freq - Update frequency and reprogram a clock event device. 346 * @dev: device to modify 347 * @freq: new device frequency 348 * 349 * Reconfigure and reprogram a clock event device in oneshot 350 * mode. Must be called on the cpu for which the device delivers per 351 * cpu timer events with interrupts disabled! Returns 0 on success, 352 * -ETIME when the event is in the past. 353 */ 354 int clockevents_update_freq(struct clock_event_device *dev, u32 freq) 355 { 356 clockevents_config(dev, freq); 357 358 if (dev->mode != CLOCK_EVT_MODE_ONESHOT) 359 return 0; 360 361 return clockevents_program_event(dev, dev->next_event, false); 362 } 363 364 /* 365 * Noop handler when we shut down an event device 366 */ 367 void clockevents_handle_noop(struct clock_event_device *dev) 368 { 369 } 370 371 /** 372 * clockevents_exchange_device - release and request clock devices 373 * @old: device to release (can be NULL) 374 * @new: device to request (can be NULL) 375 * 376 * Called from the notifier chain. clockevents_lock is held already 377 */ 378 void clockevents_exchange_device(struct clock_event_device *old, 379 struct clock_event_device *new) 380 { 381 unsigned long flags; 382 383 local_irq_save(flags); 384 /* 385 * Caller releases a clock event device. We queue it into the 386 * released list and do a notify add later. 387 */ 388 if (old) { 389 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); 390 list_del(&old->list); 391 list_add(&old->list, &clockevents_released); 392 } 393 394 if (new) { 395 BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); 396 clockevents_shutdown(new); 397 } 398 local_irq_restore(flags); 399 } 400 401 /** 402 * clockevents_suspend - suspend clock devices 403 */ 404 void clockevents_suspend(void) 405 { 406 struct clock_event_device *dev; 407 408 list_for_each_entry_reverse(dev, &clockevent_devices, list) 409 if (dev->suspend) 410 dev->suspend(dev); 411 } 412 413 /** 414 * clockevents_resume - resume clock devices 415 */ 416 void clockevents_resume(void) 417 { 418 struct clock_event_device *dev; 419 420 list_for_each_entry(dev, &clockevent_devices, list) 421 if (dev->resume) 422 dev->resume(dev); 423 } 424 425 #ifdef CONFIG_GENERIC_CLOCKEVENTS 426 /** 427 * clockevents_notify - notification about relevant events 428 */ 429 void clockevents_notify(unsigned long reason, void *arg) 430 { 431 struct clock_event_device *dev, *tmp; 432 unsigned long flags; 433 int cpu; 434 435 raw_spin_lock_irqsave(&clockevents_lock, flags); 436 clockevents_do_notify(reason, arg); 437 438 switch (reason) { 439 case CLOCK_EVT_NOTIFY_CPU_DEAD: 440 /* 441 * Unregister the clock event devices which were 442 * released from the users in the notify chain. 443 */ 444 list_for_each_entry_safe(dev, tmp, &clockevents_released, list) 445 list_del(&dev->list); 446 /* 447 * Now check whether the CPU has left unused per cpu devices 448 */ 449 cpu = *((int *)arg); 450 list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { 451 if (cpumask_test_cpu(cpu, dev->cpumask) && 452 cpumask_weight(dev->cpumask) == 1 && 453 !tick_is_broadcast_device(dev)) { 454 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 455 list_del(&dev->list); 456 } 457 } 458 break; 459 default: 460 break; 461 } 462 raw_spin_unlock_irqrestore(&clockevents_lock, flags); 463 } 464 EXPORT_SYMBOL_GPL(clockevents_notify); 465 #endif 466