1 /* 2 * linux/kernel/time/tick-broadcast.c 3 * 4 * This file contains functions which emulate a local clock-event 5 * device via a broadcast event source. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/irq.h> 18 #include <linux/percpu.h> 19 #include <linux/profile.h> 20 #include <linux/sched.h> 21 #include <linux/tick.h> 22 23 #include "tick-internal.h" 24 25 /* 26 * Broadcast support for broken x86 hardware, where the local apic 27 * timer stops in C3 state. 28 */ 29 30 struct tick_device tick_broadcast_device; 31 static cpumask_t tick_broadcast_mask; 32 static DEFINE_SPINLOCK(tick_broadcast_lock); 33 34 /* 35 * Debugging: see timer_list.c 36 */ 37 struct tick_device *tick_get_broadcast_device(void) 38 { 39 return &tick_broadcast_device; 40 } 41 42 cpumask_t *tick_get_broadcast_mask(void) 43 { 44 return &tick_broadcast_mask; 45 } 46 47 /* 48 * Start the device in periodic mode 49 */ 50 static void tick_broadcast_start_periodic(struct clock_event_device *bc) 51 { 52 if (bc && bc->mode == CLOCK_EVT_MODE_SHUTDOWN) 53 tick_setup_periodic(bc, 1); 54 } 55 56 /* 57 * Check, if the device can be utilized as broadcast device: 58 */ 59 int tick_check_broadcast_device(struct clock_event_device *dev) 60 { 61 if (tick_broadcast_device.evtdev || 62 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 63 return 0; 64 65 clockevents_exchange_device(NULL, dev); 66 tick_broadcast_device.evtdev = dev; 67 if (!cpus_empty(tick_broadcast_mask)) 68 tick_broadcast_start_periodic(dev); 69 return 1; 70 } 71 72 /* 73 * Check, if the device is the broadcast device 74 */ 75 int tick_is_broadcast_device(struct clock_event_device *dev) 76 { 77 return (dev && tick_broadcast_device.evtdev == dev); 78 } 79 80 /* 81 * Check, if the device is disfunctional and a place holder, which 82 * needs to be handled by the broadcast device. 83 */ 84 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) 85 { 86 unsigned long flags; 87 int ret = 0; 88 89 spin_lock_irqsave(&tick_broadcast_lock, flags); 90 91 /* 92 * Devices might be registered with both periodic and oneshot 93 * mode disabled. This signals, that the device needs to be 94 * operated from the broadcast device and is a placeholder for 95 * the cpu local device. 96 */ 97 if (!tick_device_is_functional(dev)) { 98 dev->event_handler = tick_handle_periodic; 99 cpu_set(cpu, tick_broadcast_mask); 100 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 101 ret = 1; 102 } 103 104 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 105 return ret; 106 } 107 108 /* 109 * Broadcast the event to the cpus, which are set in the mask 110 */ 111 int tick_do_broadcast(cpumask_t mask) 112 { 113 int ret = 0, cpu = smp_processor_id(); 114 struct tick_device *td; 115 116 /* 117 * Check, if the current cpu is in the mask 118 */ 119 if (cpu_isset(cpu, mask)) { 120 cpu_clear(cpu, mask); 121 td = &per_cpu(tick_cpu_device, cpu); 122 td->evtdev->event_handler(td->evtdev); 123 ret = 1; 124 } 125 126 if (!cpus_empty(mask)) { 127 /* 128 * It might be necessary to actually check whether the devices 129 * have different broadcast functions. For now, just use the 130 * one of the first device. This works as long as we have this 131 * misfeature only on x86 (lapic) 132 */ 133 cpu = first_cpu(mask); 134 td = &per_cpu(tick_cpu_device, cpu); 135 td->evtdev->broadcast(mask); 136 ret = 1; 137 } 138 return ret; 139 } 140 141 /* 142 * Periodic broadcast: 143 * - invoke the broadcast handlers 144 */ 145 static void tick_do_periodic_broadcast(void) 146 { 147 cpumask_t mask; 148 149 spin_lock(&tick_broadcast_lock); 150 151 cpus_and(mask, cpu_online_map, tick_broadcast_mask); 152 tick_do_broadcast(mask); 153 154 spin_unlock(&tick_broadcast_lock); 155 } 156 157 /* 158 * Event handler for periodic broadcast ticks 159 */ 160 static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 161 { 162 dev->next_event.tv64 = KTIME_MAX; 163 164 tick_do_periodic_broadcast(); 165 166 /* 167 * The device is in periodic mode. No reprogramming necessary: 168 */ 169 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 170 return; 171 172 /* 173 * Setup the next period for devices, which do not have 174 * periodic mode: 175 */ 176 for (;;) { 177 ktime_t next = ktime_add(dev->next_event, tick_period); 178 179 if (!clockevents_program_event(dev, next, ktime_get())) 180 return; 181 tick_do_periodic_broadcast(); 182 } 183 } 184 185 /* 186 * Powerstate information: The system enters/leaves a state, where 187 * affected devices might stop 188 */ 189 static void tick_do_broadcast_on_off(void *why) 190 { 191 struct clock_event_device *bc, *dev; 192 struct tick_device *td; 193 unsigned long flags, *reason = why; 194 int cpu; 195 196 spin_lock_irqsave(&tick_broadcast_lock, flags); 197 198 cpu = smp_processor_id(); 199 td = &per_cpu(tick_cpu_device, cpu); 200 dev = td->evtdev; 201 bc = tick_broadcast_device.evtdev; 202 203 /* 204 * Is the device in broadcast mode forever or is it not 205 * affected by the powerstate ? 206 */ 207 if (!dev || !tick_device_is_functional(dev) || 208 !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 209 goto out; 210 211 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_ON) { 212 if (!cpu_isset(cpu, tick_broadcast_mask)) { 213 cpu_set(cpu, tick_broadcast_mask); 214 if (td->mode == TICKDEV_MODE_PERIODIC) 215 clockevents_set_mode(dev, 216 CLOCK_EVT_MODE_SHUTDOWN); 217 } 218 } else { 219 if (cpu_isset(cpu, tick_broadcast_mask)) { 220 cpu_clear(cpu, tick_broadcast_mask); 221 if (td->mode == TICKDEV_MODE_PERIODIC) 222 tick_setup_periodic(dev, 0); 223 } 224 } 225 226 if (cpus_empty(tick_broadcast_mask)) 227 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 228 else { 229 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 230 tick_broadcast_start_periodic(bc); 231 else 232 tick_broadcast_setup_oneshot(bc); 233 } 234 out: 235 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 236 } 237 238 /* 239 * Powerstate information: The system enters/leaves a state, where 240 * affected devices might stop. 241 */ 242 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 243 { 244 int cpu = get_cpu(); 245 246 if (!cpu_isset(*oncpu, cpu_online_map)) { 247 printk(KERN_ERR "tick-braodcast: ignoring broadcast for " 248 "offline CPU #%d\n", *oncpu); 249 } else { 250 251 if (cpu == *oncpu) 252 tick_do_broadcast_on_off(&reason); 253 else 254 smp_call_function_single(*oncpu, 255 tick_do_broadcast_on_off, 256 &reason, 1, 1); 257 } 258 put_cpu(); 259 } 260 261 /* 262 * Set the periodic handler depending on broadcast on/off 263 */ 264 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) 265 { 266 if (!broadcast) 267 dev->event_handler = tick_handle_periodic; 268 else 269 dev->event_handler = tick_handle_periodic_broadcast; 270 } 271 272 /* 273 * Remove a CPU from broadcasting 274 */ 275 void tick_shutdown_broadcast(unsigned int *cpup) 276 { 277 struct clock_event_device *bc; 278 unsigned long flags; 279 unsigned int cpu = *cpup; 280 281 spin_lock_irqsave(&tick_broadcast_lock, flags); 282 283 bc = tick_broadcast_device.evtdev; 284 cpu_clear(cpu, tick_broadcast_mask); 285 286 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 287 if (bc && cpus_empty(tick_broadcast_mask)) 288 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 289 } 290 291 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 292 } 293 294 void tick_suspend_broadcast(void) 295 { 296 struct clock_event_device *bc; 297 unsigned long flags; 298 299 spin_lock_irqsave(&tick_broadcast_lock, flags); 300 301 bc = tick_broadcast_device.evtdev; 302 if (bc && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 303 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 304 305 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 306 } 307 308 int tick_resume_broadcast(void) 309 { 310 struct clock_event_device *bc; 311 unsigned long flags; 312 int broadcast = 0; 313 314 spin_lock_irqsave(&tick_broadcast_lock, flags); 315 316 bc = tick_broadcast_device.evtdev; 317 318 if (bc) { 319 switch (tick_broadcast_device.mode) { 320 case TICKDEV_MODE_PERIODIC: 321 if(!cpus_empty(tick_broadcast_mask)) 322 tick_broadcast_start_periodic(bc); 323 broadcast = cpu_isset(smp_processor_id(), 324 tick_broadcast_mask); 325 break; 326 case TICKDEV_MODE_ONESHOT: 327 broadcast = tick_resume_broadcast_oneshot(bc); 328 break; 329 } 330 } 331 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 332 333 return broadcast; 334 } 335 336 337 #ifdef CONFIG_TICK_ONESHOT 338 339 static cpumask_t tick_broadcast_oneshot_mask; 340 341 /* 342 * Debugging: see timer_list.c 343 */ 344 cpumask_t *tick_get_broadcast_oneshot_mask(void) 345 { 346 return &tick_broadcast_oneshot_mask; 347 } 348 349 static int tick_broadcast_set_event(ktime_t expires, int force) 350 { 351 struct clock_event_device *bc = tick_broadcast_device.evtdev; 352 ktime_t now = ktime_get(); 353 int res; 354 355 for(;;) { 356 res = clockevents_program_event(bc, expires, now); 357 if (!res || !force) 358 return res; 359 now = ktime_get(); 360 expires = ktime_add(now, ktime_set(0, bc->min_delta_ns)); 361 } 362 } 363 364 int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 365 { 366 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 367 368 if(!cpus_empty(tick_broadcast_oneshot_mask)) 369 tick_broadcast_set_event(ktime_get(), 1); 370 371 return cpu_isset(smp_processor_id(), tick_broadcast_oneshot_mask); 372 } 373 374 /* 375 * Reprogram the broadcast device: 376 * 377 * Called with tick_broadcast_lock held and interrupts disabled. 378 */ 379 static int tick_broadcast_reprogram(void) 380 { 381 ktime_t expires = { .tv64 = KTIME_MAX }; 382 struct tick_device *td; 383 int cpu; 384 385 /* 386 * Find the event which expires next: 387 */ 388 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 389 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { 390 td = &per_cpu(tick_cpu_device, cpu); 391 if (td->evtdev->next_event.tv64 < expires.tv64) 392 expires = td->evtdev->next_event; 393 } 394 395 if (expires.tv64 == KTIME_MAX) 396 return 0; 397 398 return tick_broadcast_set_event(expires, 0); 399 } 400 401 /* 402 * Handle oneshot mode broadcasting 403 */ 404 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 405 { 406 struct tick_device *td; 407 cpumask_t mask; 408 ktime_t now; 409 int cpu; 410 411 spin_lock(&tick_broadcast_lock); 412 again: 413 dev->next_event.tv64 = KTIME_MAX; 414 mask = CPU_MASK_NONE; 415 now = ktime_get(); 416 /* Find all expired events */ 417 for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; 418 cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { 419 td = &per_cpu(tick_cpu_device, cpu); 420 if (td->evtdev->next_event.tv64 <= now.tv64) 421 cpu_set(cpu, mask); 422 } 423 424 /* 425 * Wakeup the cpus which have an expired event. The broadcast 426 * device is reprogrammed in the return from idle code. 427 */ 428 if (!tick_do_broadcast(mask)) { 429 /* 430 * The global event did not expire any CPU local 431 * events. This happens in dyntick mode, as the 432 * maximum PIT delta is quite small. 433 */ 434 if (tick_broadcast_reprogram()) 435 goto again; 436 } 437 spin_unlock(&tick_broadcast_lock); 438 } 439 440 /* 441 * Powerstate information: The system enters/leaves a state, where 442 * affected devices might stop 443 */ 444 void tick_broadcast_oneshot_control(unsigned long reason) 445 { 446 struct clock_event_device *bc, *dev; 447 struct tick_device *td; 448 unsigned long flags; 449 int cpu; 450 451 spin_lock_irqsave(&tick_broadcast_lock, flags); 452 453 /* 454 * Periodic mode does not care about the enter/exit of power 455 * states 456 */ 457 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 458 goto out; 459 460 bc = tick_broadcast_device.evtdev; 461 cpu = smp_processor_id(); 462 td = &per_cpu(tick_cpu_device, cpu); 463 dev = td->evtdev; 464 465 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 466 goto out; 467 468 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 469 if (!cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 470 cpu_set(cpu, tick_broadcast_oneshot_mask); 471 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 472 if (dev->next_event.tv64 < bc->next_event.tv64) 473 tick_broadcast_set_event(dev->next_event, 1); 474 } 475 } else { 476 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) { 477 cpu_clear(cpu, tick_broadcast_oneshot_mask); 478 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 479 if (dev->next_event.tv64 != KTIME_MAX) 480 tick_program_event(dev->next_event, 1); 481 } 482 } 483 484 out: 485 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 486 } 487 488 /** 489 * tick_broadcast_setup_highres - setup the broadcast device for highres 490 */ 491 void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 492 { 493 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) { 494 bc->event_handler = tick_handle_oneshot_broadcast; 495 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 496 bc->next_event.tv64 = KTIME_MAX; 497 } 498 } 499 500 /* 501 * Select oneshot operating mode for the broadcast device 502 */ 503 void tick_broadcast_switch_to_oneshot(void) 504 { 505 struct clock_event_device *bc; 506 unsigned long flags; 507 508 spin_lock_irqsave(&tick_broadcast_lock, flags); 509 510 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 511 bc = tick_broadcast_device.evtdev; 512 if (bc) 513 tick_broadcast_setup_oneshot(bc); 514 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 515 } 516 517 518 /* 519 * Remove a dead CPU from broadcasting 520 */ 521 void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 522 { 523 struct clock_event_device *bc; 524 unsigned long flags; 525 unsigned int cpu = *cpup; 526 527 spin_lock_irqsave(&tick_broadcast_lock, flags); 528 529 bc = tick_broadcast_device.evtdev; 530 cpu_clear(cpu, tick_broadcast_oneshot_mask); 531 532 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) { 533 if (bc && cpus_empty(tick_broadcast_oneshot_mask)) 534 clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); 535 } 536 537 spin_unlock_irqrestore(&tick_broadcast_lock, flags); 538 } 539 540 #endif 541