1 /* 2 * linux/kernel/time/tick-common.c 3 * 4 * This file contains the base functions to manage periodic tick 5 * related events. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/irq.h> 18 #include <linux/percpu.h> 19 #include <linux/profile.h> 20 #include <linux/sched.h> 21 #include <linux/tick.h> 22 23 #include "tick-internal.h" 24 25 /* 26 * Tick devices 27 */ 28 DEFINE_PER_CPU(struct tick_device, tick_cpu_device); 29 /* 30 * Tick next event: keeps track of the tick time 31 */ 32 ktime_t tick_next_period; 33 ktime_t tick_period; 34 int tick_do_timer_cpu __read_mostly = -1; 35 DEFINE_SPINLOCK(tick_device_lock); 36 37 /* 38 * Debugging: see timer_list.c 39 */ 40 struct tick_device *tick_get_device(int cpu) 41 { 42 return &per_cpu(tick_cpu_device, cpu); 43 } 44 45 /** 46 * tick_is_oneshot_available - check for a oneshot capable event device 47 */ 48 int tick_is_oneshot_available(void) 49 { 50 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 51 52 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); 53 } 54 55 /* 56 * Periodic tick 57 */ 58 static void tick_periodic(int cpu) 59 { 60 if (tick_do_timer_cpu == cpu) { 61 write_seqlock(&xtime_lock); 62 63 /* Keep track of the next tick event */ 64 tick_next_period = ktime_add(tick_next_period, tick_period); 65 66 do_timer(1); 67 write_sequnlock(&xtime_lock); 68 } 69 70 update_process_times(user_mode(get_irq_regs())); 71 profile_tick(CPU_PROFILING); 72 } 73 74 /* 75 * Event handler for periodic ticks 76 */ 77 void tick_handle_periodic(struct clock_event_device *dev) 78 { 79 int cpu = smp_processor_id(); 80 ktime_t next; 81 82 tick_periodic(cpu); 83 84 if (dev->mode != CLOCK_EVT_MODE_ONESHOT) 85 return; 86 /* 87 * Setup the next period for devices, which do not have 88 * periodic mode: 89 */ 90 next = ktime_add(dev->next_event, tick_period); 91 for (;;) { 92 if (!clockevents_program_event(dev, next, ktime_get())) 93 return; 94 tick_periodic(cpu); 95 next = ktime_add(next, tick_period); 96 } 97 } 98 99 /* 100 * Setup the device for a periodic tick 101 */ 102 void tick_setup_periodic(struct clock_event_device *dev, int broadcast) 103 { 104 tick_set_periodic_handler(dev, broadcast); 105 106 /* Broadcast setup ? */ 107 if (!tick_device_is_functional(dev)) 108 return; 109 110 if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { 111 clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); 112 } else { 113 unsigned long seq; 114 ktime_t next; 115 116 do { 117 seq = read_seqbegin(&xtime_lock); 118 next = tick_next_period; 119 } while (read_seqretry(&xtime_lock, seq)); 120 121 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 122 123 for (;;) { 124 if (!clockevents_program_event(dev, next, ktime_get())) 125 return; 126 next = ktime_add(next, tick_period); 127 } 128 } 129 } 130 131 /* 132 * Setup the tick device 133 */ 134 static void tick_setup_device(struct tick_device *td, 135 struct clock_event_device *newdev, int cpu, 136 cpumask_t cpumask) 137 { 138 ktime_t next_event; 139 void (*handler)(struct clock_event_device *) = NULL; 140 141 /* 142 * First device setup ? 143 */ 144 if (!td->evtdev) { 145 /* 146 * If no cpu took the do_timer update, assign it to 147 * this cpu: 148 */ 149 if (tick_do_timer_cpu == -1) { 150 tick_do_timer_cpu = cpu; 151 tick_next_period = ktime_get(); 152 tick_period = ktime_set(0, NSEC_PER_SEC / HZ); 153 } 154 155 /* 156 * Startup in periodic mode first. 157 */ 158 td->mode = TICKDEV_MODE_PERIODIC; 159 } else { 160 handler = td->evtdev->event_handler; 161 next_event = td->evtdev->next_event; 162 } 163 164 td->evtdev = newdev; 165 166 /* 167 * When the device is not per cpu, pin the interrupt to the 168 * current cpu: 169 */ 170 if (!cpus_equal(newdev->cpumask, cpumask)) 171 irq_set_affinity(newdev->irq, cpumask); 172 173 /* 174 * When global broadcasting is active, check if the current 175 * device is registered as a placeholder for broadcast mode. 176 * This allows us to handle this x86 misfeature in a generic 177 * way. 178 */ 179 if (tick_device_uses_broadcast(newdev, cpu)) 180 return; 181 182 if (td->mode == TICKDEV_MODE_PERIODIC) 183 tick_setup_periodic(newdev, 0); 184 else 185 tick_setup_oneshot(newdev, handler, next_event); 186 } 187 188 /* 189 * Check, if the new registered device should be used. 190 */ 191 static int tick_check_new_device(struct clock_event_device *newdev) 192 { 193 struct clock_event_device *curdev; 194 struct tick_device *td; 195 int cpu, ret = NOTIFY_OK; 196 unsigned long flags; 197 cpumask_t cpumask; 198 199 spin_lock_irqsave(&tick_device_lock, flags); 200 201 cpu = smp_processor_id(); 202 if (!cpu_isset(cpu, newdev->cpumask)) 203 goto out_bc; 204 205 td = &per_cpu(tick_cpu_device, cpu); 206 curdev = td->evtdev; 207 cpumask = cpumask_of_cpu(cpu); 208 209 /* cpu local device ? */ 210 if (!cpus_equal(newdev->cpumask, cpumask)) { 211 212 /* 213 * If the cpu affinity of the device interrupt can not 214 * be set, ignore it. 215 */ 216 if (!irq_can_set_affinity(newdev->irq)) 217 goto out_bc; 218 219 /* 220 * If we have a cpu local device already, do not replace it 221 * by a non cpu local device 222 */ 223 if (curdev && cpus_equal(curdev->cpumask, cpumask)) 224 goto out_bc; 225 } 226 227 /* 228 * If we have an active device, then check the rating and the oneshot 229 * feature. 230 */ 231 if (curdev) { 232 /* 233 * Prefer one shot capable devices ! 234 */ 235 if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) && 236 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) 237 goto out_bc; 238 /* 239 * Check the rating 240 */ 241 if (curdev->rating >= newdev->rating) 242 goto out_bc; 243 } 244 245 /* 246 * Replace the eventually existing device by the new 247 * device. If the current device is the broadcast device, do 248 * not give it back to the clockevents layer ! 249 */ 250 if (tick_is_broadcast_device(curdev)) { 251 clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); 252 curdev = NULL; 253 } 254 clockevents_exchange_device(curdev, newdev); 255 tick_setup_device(td, newdev, cpu, cpumask); 256 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 257 tick_oneshot_notify(); 258 259 spin_unlock_irqrestore(&tick_device_lock, flags); 260 return NOTIFY_STOP; 261 262 out_bc: 263 /* 264 * Can the new device be used as a broadcast device ? 265 */ 266 if (tick_check_broadcast_device(newdev)) 267 ret = NOTIFY_STOP; 268 269 spin_unlock_irqrestore(&tick_device_lock, flags); 270 271 return ret; 272 } 273 274 /* 275 * Shutdown an event device on a given cpu: 276 * 277 * This is called on a life CPU, when a CPU is dead. So we cannot 278 * access the hardware device itself. 279 * We just set the mode and remove it from the lists. 280 */ 281 static void tick_shutdown(unsigned int *cpup) 282 { 283 struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); 284 struct clock_event_device *dev = td->evtdev; 285 unsigned long flags; 286 287 spin_lock_irqsave(&tick_device_lock, flags); 288 td->mode = TICKDEV_MODE_PERIODIC; 289 if (dev) { 290 /* 291 * Prevent that the clock events layer tries to call 292 * the set mode function! 293 */ 294 dev->mode = CLOCK_EVT_MODE_UNUSED; 295 clockevents_exchange_device(dev, NULL); 296 td->evtdev = NULL; 297 } 298 /* Transfer the do_timer job away from this cpu */ 299 if (*cpup == tick_do_timer_cpu) { 300 int cpu = first_cpu(cpu_online_map); 301 302 tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; 303 } 304 spin_unlock_irqrestore(&tick_device_lock, flags); 305 } 306 307 static void tick_suspend(void) 308 { 309 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 310 unsigned long flags; 311 312 spin_lock_irqsave(&tick_device_lock, flags); 313 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); 314 spin_unlock_irqrestore(&tick_device_lock, flags); 315 } 316 317 static void tick_resume(void) 318 { 319 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 320 unsigned long flags; 321 int broadcast = tick_resume_broadcast(); 322 323 spin_lock_irqsave(&tick_device_lock, flags); 324 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 325 326 if (!broadcast) { 327 if (td->mode == TICKDEV_MODE_PERIODIC) 328 tick_setup_periodic(td->evtdev, 0); 329 else 330 tick_resume_oneshot(); 331 } 332 spin_unlock_irqrestore(&tick_device_lock, flags); 333 } 334 335 /* 336 * Notification about clock event devices 337 */ 338 static int tick_notify(struct notifier_block *nb, unsigned long reason, 339 void *dev) 340 { 341 switch (reason) { 342 343 case CLOCK_EVT_NOTIFY_ADD: 344 return tick_check_new_device(dev); 345 346 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 347 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 348 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 349 tick_broadcast_on_off(reason, dev); 350 break; 351 352 case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: 353 case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: 354 tick_broadcast_oneshot_control(reason); 355 break; 356 357 case CLOCK_EVT_NOTIFY_CPU_DEAD: 358 tick_shutdown_broadcast_oneshot(dev); 359 tick_shutdown_broadcast(dev); 360 tick_shutdown(dev); 361 break; 362 363 case CLOCK_EVT_NOTIFY_SUSPEND: 364 tick_suspend(); 365 tick_suspend_broadcast(); 366 break; 367 368 case CLOCK_EVT_NOTIFY_RESUME: 369 tick_resume(); 370 break; 371 372 default: 373 break; 374 } 375 376 return NOTIFY_OK; 377 } 378 379 static struct notifier_block tick_notifier = { 380 .notifier_call = tick_notify, 381 }; 382 383 /** 384 * tick_init - initialize the tick control 385 * 386 * Register the notifier with the clockevents framework 387 */ 388 void __init tick_init(void) 389 { 390 clockevents_register_notifier(&tick_notifier); 391 } 392