1 /* 2 * linux/kernel/time/tick-broadcast.c 3 * 4 * This file contains functions which emulate a local clock-event 5 * device via a broadcast event source. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/interrupt.h> 18 #include <linux/percpu.h> 19 #include <linux/profile.h> 20 #include <linux/sched.h> 21 #include <linux/tick.h> 22 23 #include "tick-internal.h" 24 25 /* 26 * Broadcast support for broken x86 hardware, where the local apic 27 * timer stops in C3 state. 28 */ 29 30 struct tick_device tick_broadcast_device; 31 static cpumask_t tick_broadcast_mask; 32 static DEFINE_SPINLOCK(tick_broadcast_lock); 33 static int tick_broadcast_force; 34 35 #ifdef CONFIG_TICK_ONESHOT 36 static void tick_broadcast_clear_oneshot(int cpu); 37 #else 38 static inline void tick_broadcast_clear_oneshot(int cpu) { } 39 #endif 40 41 /* 42 * Debugging: see timer_list.c 43 */ 44 struct tick_device *tick_get_broadcast_device(void) 45 { 46 return &tick_broadcast_device; 47 } 48 49 cpumask_t *tick_get_broadcast_mask(void) 50 { 51 return &tick_broadcast_mask; 52 } 53 54 /* 55 * Start the device in periodic mode 56 */ 57 static void tick_broadcast_start_periodic(struct clock_event_device *bc) 58 { 59 if (bc) 60 tick_setup_periodic(bc, 1); 61 } 62 63 /* 64 * Check, if the device can be utilized as broadcast device: 65 */ 66 int tick_check_broadcast_device(struct clock_event_device *dev) 67 { 68 if ((tick_broadcast_device.evtdev && 69 tick_broadcast_device.evtdev->rating >= dev->rating) || 70 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 71 return 0; 72 73 clockevents_exchange_device(NULL, dev); 74 tick_broadcast_device.evtdev = dev; 75 if (!cpus_empty(tick_broadcast_mask)) 76 tick_broadcast_start_periodic(dev); 77 return 1; 78 } 79 80 /* 81 * Check, if the device is the broadcast device 82 */ 83 int tick_is_broadcast_device(struct clock_event_device *dev) 84 { 85 return (dev && tick_broadcast_device.evtdev == dev); 86 } 87 88 /* 89 * Check, if the device is disfunctional and a place holder, which 90 * needs to be handled by the broadcast device. 91 */ 92 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) 93 { 94 unsigned long flags; 95 int ret = 0; 96 97 spin_lock_irqsave(&tick_broadcast_lock, flags); 98 99 /* 100 * Devices might be registered with both periodic and oneshot 101 * mode disabled. This signals, that the device needs to be 102 * operated from the broadcast device and is a placeholder for 103 * the cpu local device. 104 */ 105 if (!tick_device_is_functional(dev)) { 106 dev->event_handler = tick_handle_periodic; 107 cpu_set(cpu, tick_broadcast_mask); 108 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 109 ret = 1; 110 } else { 111 /* 112 * When the new device is not affected by the stop 113 * feature and the cpu is marked in the broadcast mask 114 * then clear the broadcast bit. 115 */ 116 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 117 int cpu = smp_processor_id(); 118 119 cpu_clear(cpu, tick_broadcast_mask); 120 tick_broadcast_clear_oneshot(cpu); 121 } 122 } 123 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 124 return ret; 125 } 126 127 /* 128 * Broadcast the event to the cpus, which are set in the mask 129 */ 130 static void tick_do_broadcast(cpumask_t mask) 131 { 132 int cpu = smp_processor_id(); 133 struct tick_device *td; 134 135 /* 136 * Check, if the current cpu is in the mask 137 */ 138 if (cpu_isset(cpu, mask)) { 139 cpu_clear(cpu, mask); 140 td = &per_cpu(tick_cpu_device, cpu); 141 td->evtdev->event_handler(td->evtdev); 142 } 143 144 if (!cpus_empty(mask)) { 145 /* 146 * It might be necessary to actually check whether the devices 147 * have different broadcast functions. For now, just use the 148 * one of the first device. This works as long as we have this 149 * misfeature only on x86 (lapic) 150 */ 151 cpu = first_cpu(mask); 152 td = &per_cpu(tick_cpu_device, cpu); 153 td->evtdev->broadcast(mask); 154 } 155 } 156 157 /* 158 * Periodic broadcast: 159 * - invoke the broadcast handlers 160 */ 161 static void tick_do_periodic_broadcast(void) 162 { 163 cpumask_t mask; 164 165 spin_lock(&tick_broadcast_lock); 166 167 cpus_and(mask, cpu_online_map, tick_broadcast_mask); 168 tick_do_broadcast(mask); 169 170 spin_unlock(&tick_broadcast_lock); 171 } 172 173 /* 174 * Event handler for periodic broadcast ticks 175 */ 176 static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 177 { 178 tick_do_periodic_broadcast(); 179 180 /* 181 * The device is in periodic mode. No reprogramming necessary: 182 */ 183 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 184 return; 185 186 /* 187 * Setup the next period for devices, which do not have 188 * periodic mode: 189 */ 190 for (;;) { 191 ktime_t next = ktime_add(dev->next_event, tick_period); 192 193 if (!clockevents_program_event(dev, next, ktime_get())) 194 return; 195 tick_do_periodic_broadcast(); 196 } 197 } 198 199 /* 200 * Powerstate information: The system enters/leaves a state, where 201 * affected devices might stop 202 */ 203 static void tick_do_broadcast_on_off(void *why) 204 { 205 struct clock_event_device *bc, *dev; 206 struct tick_device *td; 207 unsigned long flags, *reason = why; 208 int cpu; 209 210 spin_lock_irqsave(&tick_broadcast_lock, flags); 211 212 cpu = smp_processor_id(); 213 td = &per_cpu(tick_cpu_device, cpu); 214 dev = td->evtdev; 215 bc = tick_broadcast_device.evtdev; 216 217 /* 218 * Is the device not affected by the powerstate ? 219 */ 220 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 221 goto out; 222 223 if (!tick_device_is_functional(dev)) 224 goto out; 225 226 switch (*reason) { 227 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 228 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 229 if (!cpu_isset(cpu, tick_broadcast_mask)) { 230 cpu_set(cpu, tick_broadcast_mask); 231 if (td->mode == TICKDEV_MODE_PERIODIC) 232 clockevents_set_mode(dev, 233 CLOCK_EVT_MODE_SHUTDOWN); 234 } 235 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 236 tick_broadcast_force = 1; 237 break; 238 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 239 if (!tick_broadcast_force && 240 cpu_isset(cpu, tick_broadcast_mask)) { 241 cpu_clear(cpu, tick_broadcast_mask); 242 if (td->mode == TICKDEV_MODE_PERIODIC) 243 tick_setup_periodic(dev, 0); 244 } 245 break; 246 } 247 248 if (cpus_empty(tick_broadcast_mask)) 249 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 250 else { 251 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 252 tick_broadcast_start_periodic(bc); 253 else 254 tick_broadcast_setup_oneshot(bc); 255 } 256 out: 257 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 258 } 259 260 /* 261 * Powerstate information: The system enters/leaves a state, where 262 * affected devices might stop. 263 */ 264 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 265 { 266 if (!cpu_isset(*oncpu, cpu_online_map)) 267 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 268 "offline CPU #%d\n", *oncpu); 269 else 270 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 271 &reason, 1); 272 } 273 274 /* 275 * Set the periodic handler depending on broadcast on/off 276 */ 277 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) 278 { 279 if (!broadcast) 280 dev->event_handler = tick_handle_periodic; 281 else 282 dev->event_handler = tick_handle_periodic_broadcast; 283 } 284 285 /* 286 * Remove a CPU from broadcasting 287 */ 288 void tick_shutdown_broadcast(unsigned int *cpup) 289 { 290 struct clock_event_device *bc; 291 unsigned long flags; 292 unsigned int cpu = *cpup; 293 294 spin_lock_irqsave(&tick_broadcast_lock, flags); 295 296 bc = tick_broadcast_device.evtdev; 297 cpu_clear(cpu, tick_broadcast_mask); 298 299 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 300 if (bc && cpus_empty(tick_broadcast_mask)) 301 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 302 } 303 304 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 305 } 306 307 void tick_suspend_broadcast(void) 308 { 309 struct clock_event_device *bc; 310 unsigned long flags; 311 312 spin_lock_irqsave(&tick_broadcast_lock, flags); 313 314 bc = tick_broadcast_device.evtdev; 315 if (bc) 316 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 317 318 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 319 } 320 321 int tick_resume_broadcast(void) 322 { 323 struct clock_event_device *bc; 324 unsigned long flags; 325 int broadcast = 0; 326 327 spin_lock_irqsave(&tick_broadcast_lock, flags); 328 329 bc = tick_broadcast_device.evtdev; 330 331 if (bc) { 332 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); 333 334 switch (tick_broadcast_device.mode) { 335 case TICKDEV_MODE_PERIODIC: 336 if(!cpus_empty(tick_broadcast_mask)) 337 tick_broadcast_start_periodic(bc); 338 broadcast = cpu_isset(smp_processor_id(), 339 tick_broadcast_mask); 340 break; 341 case TICKDEV_MODE_ONESHOT: 342 broadcast = tick_resume_broadcast_oneshot(bc); 343 break; 344 } 345 } 346 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 347 348 return broadcast; 349 } 350 351 352 #ifdef CONFIG_TICK_ONESHOT 353 354 static cpumask_t tick_broadcast_oneshot_mask; 355 356 /* 357 * Debugging: see timer_list.c 358 */ 359 cpumask_t *tick_get_broadcast_oneshot_mask(void) 360 { 361 return &tick_broadcast_oneshot_mask; 362 } 363 364 static int tick_broadcast_set_event(ktime_t expires, int force) 365 { 366 struct clock_event_device *bc = tick_broadcast_device.evtdev; 367 ktime_t now = ktime_get(); 368 int res; 369 370 for(;;) { 371 res = clockevents_program_event(bc, expires, now); 372 if (!res || !force) 373 return res; 374 now = ktime_get(); 375 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); 376 } 377 } 378 379 int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 380 { 381 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 382 return 0; 383 } 384 385 /* 386 * Handle oneshot mode broadcasting 387 */ 388 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 389 { 390 struct tick_device *td; 391 cpumask_t mask; 392 ktime_t now, next_event; 393 int cpu; 394 395 spin_lock(&tick_broadcast_lock); 396 again: 397 dev->next_event.tv64 = KTIME_MAX; 398 next_event.tv64 = KTIME_MAX; 399 mask = CPU_MASK_NONE; 400 now = ktime_get(); 401 /* Find all expired events */ 402 for_each_cpu_mask_nr(cpu, tick_broadcast_oneshot_mask) { 403 td = &per_cpu(tick_cpu_device, cpu); 404 if (td->evtdev->next_event.tv64 <= now.tv64) 405 cpu_set(cpu, mask); 406 else if (td->evtdev->next_event.tv64 < next_event.tv64) 407 next_event.tv64 = td->evtdev->next_event.tv64; 408 } 409 410 /* 411 * Wakeup the cpus which have an expired event. 412 */ 413 tick_do_broadcast(mask); 414 415 /* 416 * Two reasons for reprogram: 417 * 418 * - The global event did not expire any CPU local 419 * events. This happens in dyntick mode, as the maximum PIT 420 * delta is quite small. 421 * 422 * - There are pending events on sleeping CPUs which were not 423 * in the event mask 424 */ 425 if (next_event.tv64 != KTIME_MAX) { 426 /* 427 * Rearm the broadcast device. If event expired, 428 * repeat the above 429 */ 430 if (tick_broadcast_set_event(next_event, 0)) 431 goto again; 432 } 433 spin_unlock(&tick_broadcast_lock); 434 } 435 436 /* 437 * Powerstate information: The system enters/leaves a state, where 438 * affected devices might stop 439 */ 440 void tick_broadcast_oneshot_control(unsigned long reason) 441 { 442 struct clock_event_device *bc, *dev; 443 struct tick_device *td; 444 unsigned long flags; 445 int cpu; 446 447 spin_lock_irqsave(&tick_broadcast_lock, flags); 448 449 /* 450 * Periodic mode does not care about the enter/exit of power 451 * states 452 */ 453 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 454 goto out; 455 456 bc = tick_broadcast_device.evtdev; 457 cpu = smp_processor_id(); 458 td = &per_cpu(tick_cpu_device, cpu); 459 dev = td->evtdev; 460 461 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 462 goto out; 463 464 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 465 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 466 cpu_set(cpu, tick_broadcast_oneshot_mask); 467 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 468 if (dev->next_event.tv64 < bc->next_event.tv64) 469 tick_broadcast_set_event(dev->next_event, 1); 470 } 471 } else { 472 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 473 cpu_clear(cpu, tick_broadcast_oneshot_mask); 474 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 475 if (dev->next_event.tv64 != KTIME_MAX) 476 tick_program_event(dev->next_event, 1); 477 } 478 } 479 480 out: 481 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 482 } 483 484 /* 485 * Reset the one shot broadcast for a cpu 486 * 487 * Called with tick_broadcast_lock held 488 */ 489 static void tick_broadcast_clear_oneshot(int cpu) 490 { 491 cpu_clear(cpu, tick_broadcast_oneshot_mask); 492 } 493 494 /** 495 * tick_broadcast_setup_oneshot - setup the broadcast device 496 */ 497 void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 498 { 499 bc->event_handler = tick_handle_oneshot_broadcast; 500 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 501 bc->next_event.tv64 = KTIME_MAX; 502 } 503 504 /* 505 * Select oneshot operating mode for the broadcast device 506 */ 507 void tick_broadcast_switch_to_oneshot(void) 508 { 509 struct clock_event_device *bc; 510 unsigned long flags; 511 512 spin_lock_irqsave(&tick_broadcast_lock, flags); 513 514 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 515 bc = tick_broadcast_device.evtdev; 516 if (bc) 517 tick_broadcast_setup_oneshot(bc); 518 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 519 } 520 521 522 /* 523 * Remove a dead CPU from broadcasting 524 */ 525 void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 526 { 527 unsigned long flags; 528 unsigned int cpu = *cpup; 529 530 spin_lock_irqsave(&tick_broadcast_lock, flags); 531 532 /* 533 * Clear the broadcast mask flag for the dead cpu, but do not 534 * stop the broadcast device! 535 */ 536 cpu_clear(cpu, tick_broadcast_oneshot_mask); 537 538 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 539 } 540 541 #endif 542