1 /* 2 * linux/kernel/time/tick-broadcast.c 3 * 4 * This file contains functions which emulate a local clock-event 5 * device via a broadcast event source. 6 * 7 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> 8 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar 9 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner 10 * 11 * This code is licenced under the GPL version 2. For details see 12 * kernel-base/COPYING. 13 */ 14 #include <linux/cpu.h> 15 #include <linux/err.h> 16 #include <linux/hrtimer.h> 17 #include <linux/interrupt.h> 18 #include <linux/percpu.h> 19 #include <linux/profile.h> 20 #include <linux/sched.h> 21 #include <linux/smp.h> 22 23 #include "tick-internal.h" 24 25 /* 26 * Broadcast support for broken x86 hardware, where the local apic 27 * timer stops in C3 state. 28 */ 29 30 static struct tick_device tick_broadcast_device; 31 static cpumask_var_t tick_broadcast_mask; 32 static cpumask_var_t tmpmask; 33 static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 34 static int tick_broadcast_force; 35 36 #ifdef CONFIG_TICK_ONESHOT 37 static void tick_broadcast_clear_oneshot(int cpu); 38 #else 39 static inline void tick_broadcast_clear_oneshot(int cpu) { } 40 #endif 41 42 /* 43 * Debugging: see timer_list.c 44 */ 45 struct tick_device *tick_get_broadcast_device(void) 46 { 47 return &tick_broadcast_device; 48 } 49 50 struct cpumask *tick_get_broadcast_mask(void) 51 { 52 return tick_broadcast_mask; 53 } 54 55 /* 56 * Start the device in periodic mode 57 */ 58 static void tick_broadcast_start_periodic(struct clock_event_device *bc) 59 { 60 if (bc) 61 tick_setup_periodic(bc, 1); 62 } 63 64 /* 65 * Check, if the device can be utilized as broadcast device: 66 */ 67 int tick_check_broadcast_device(struct clock_event_device *dev) 68 { 69 struct clock_event_device *cur = tick_broadcast_device.evtdev; 70 71 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || 72 (tick_broadcast_device.evtdev && 73 tick_broadcast_device.evtdev->rating >= dev->rating) || 74 (dev->features & CLOCK_EVT_FEAT_C3STOP)) 75 return 0; 76 77 clockevents_exchange_device(tick_broadcast_device.evtdev, dev); 78 if (cur) 79 cur->event_handler = clockevents_handle_noop; 80 tick_broadcast_device.evtdev = dev; 81 if (!cpumask_empty(tick_broadcast_mask)) 82 tick_broadcast_start_periodic(dev); 83 /* 84 * Inform all cpus about this. We might be in a situation 85 * where we did not switch to oneshot mode because the per cpu 86 * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack 87 * of a oneshot capable broadcast device. Without that 88 * notification the systems stays stuck in periodic mode 89 * forever. 90 */ 91 if (dev->features & CLOCK_EVT_FEAT_ONESHOT) 92 tick_clock_notify(); 93 return 1; 94 } 95 96 /* 97 * Check, if the device is the broadcast device 98 */ 99 int tick_is_broadcast_device(struct clock_event_device *dev) 100 { 101 return (dev && tick_broadcast_device.evtdev == dev); 102 } 103 104 static void err_broadcast(const struct cpumask *mask) 105 { 106 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n"); 107 } 108 109 static void tick_device_setup_broadcast_func(struct clock_event_device *dev) 110 { 111 if (!dev->broadcast) 112 dev->broadcast = tick_broadcast; 113 if (!dev->broadcast) { 114 pr_warn_once("%s depends on broadcast, but no broadcast function available\n", 115 dev->name); 116 dev->broadcast = err_broadcast; 117 } 118 } 119 120 /* 121 * Check, if the device is disfunctional and a place holder, which 122 * needs to be handled by the broadcast device. 123 */ 124 int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) 125 { 126 unsigned long flags; 127 int ret = 0; 128 129 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 130 131 /* 132 * Devices might be registered with both periodic and oneshot 133 * mode disabled. This signals, that the device needs to be 134 * operated from the broadcast device and is a placeholder for 135 * the cpu local device. 136 */ 137 if (!tick_device_is_functional(dev)) { 138 dev->event_handler = tick_handle_periodic; 139 tick_device_setup_broadcast_func(dev); 140 cpumask_set_cpu(cpu, tick_broadcast_mask); 141 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 142 ret = 1; 143 } else { 144 /* 145 * When the new device is not affected by the stop 146 * feature and the cpu is marked in the broadcast mask 147 * then clear the broadcast bit. 148 */ 149 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 150 int cpu = smp_processor_id(); 151 cpumask_clear_cpu(cpu, tick_broadcast_mask); 152 tick_broadcast_clear_oneshot(cpu); 153 } else { 154 tick_device_setup_broadcast_func(dev); 155 } 156 } 157 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 158 return ret; 159 } 160 161 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 162 int tick_receive_broadcast(void) 163 { 164 struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 165 struct clock_event_device *evt = td->evtdev; 166 167 if (!evt) 168 return -ENODEV; 169 170 if (!evt->event_handler) 171 return -EINVAL; 172 173 evt->event_handler(evt); 174 return 0; 175 } 176 #endif 177 178 /* 179 * Broadcast the event to the cpus, which are set in the mask (mangled). 180 */ 181 static void tick_do_broadcast(struct cpumask *mask) 182 { 183 int cpu = smp_processor_id(); 184 struct tick_device *td; 185 186 /* 187 * Check, if the current cpu is in the mask 188 */ 189 if (cpumask_test_cpu(cpu, mask)) { 190 cpumask_clear_cpu(cpu, mask); 191 td = &per_cpu(tick_cpu_device, cpu); 192 td->evtdev->event_handler(td->evtdev); 193 } 194 195 if (!cpumask_empty(mask)) { 196 /* 197 * It might be necessary to actually check whether the devices 198 * have different broadcast functions. For now, just use the 199 * one of the first device. This works as long as we have this 200 * misfeature only on x86 (lapic) 201 */ 202 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); 203 td->evtdev->broadcast(mask); 204 } 205 } 206 207 /* 208 * Periodic broadcast: 209 * - invoke the broadcast handlers 210 */ 211 static void tick_do_periodic_broadcast(void) 212 { 213 raw_spin_lock(&tick_broadcast_lock); 214 215 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); 216 tick_do_broadcast(tmpmask); 217 218 raw_spin_unlock(&tick_broadcast_lock); 219 } 220 221 /* 222 * Event handler for periodic broadcast ticks 223 */ 224 static void tick_handle_periodic_broadcast(struct clock_event_device *dev) 225 { 226 ktime_t next; 227 228 tick_do_periodic_broadcast(); 229 230 /* 231 * The device is in periodic mode. No reprogramming necessary: 232 */ 233 if (dev->mode == CLOCK_EVT_MODE_PERIODIC) 234 return; 235 236 /* 237 * Setup the next period for devices, which do not have 238 * periodic mode. We read dev->next_event first and add to it 239 * when the event already expired. clockevents_program_event() 240 * sets dev->next_event only when the event is really 241 * programmed to the device. 242 */ 243 for (next = dev->next_event; ;) { 244 next = ktime_add(next, tick_period); 245 246 if (!clockevents_program_event(dev, next, false)) 247 return; 248 tick_do_periodic_broadcast(); 249 } 250 } 251 252 /* 253 * Powerstate information: The system enters/leaves a state, where 254 * affected devices might stop 255 */ 256 static void tick_do_broadcast_on_off(unsigned long *reason) 257 { 258 struct clock_event_device *bc, *dev; 259 struct tick_device *td; 260 unsigned long flags; 261 int cpu, bc_stopped; 262 263 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 264 265 cpu = smp_processor_id(); 266 td = &per_cpu(tick_cpu_device, cpu); 267 dev = td->evtdev; 268 bc = tick_broadcast_device.evtdev; 269 270 /* 271 * Is the device not affected by the powerstate ? 272 */ 273 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) 274 goto out; 275 276 if (!tick_device_is_functional(dev)) 277 goto out; 278 279 bc_stopped = cpumask_empty(tick_broadcast_mask); 280 281 switch (*reason) { 282 case CLOCK_EVT_NOTIFY_BROADCAST_ON: 283 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: 284 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { 285 if (tick_broadcast_device.mode == 286 TICKDEV_MODE_PERIODIC) 287 clockevents_shutdown(dev); 288 } 289 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) 290 tick_broadcast_force = 1; 291 break; 292 case CLOCK_EVT_NOTIFY_BROADCAST_OFF: 293 if (!tick_broadcast_force && 294 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { 295 if (tick_broadcast_device.mode == 296 TICKDEV_MODE_PERIODIC) 297 tick_setup_periodic(dev, 0); 298 } 299 break; 300 } 301 302 if (cpumask_empty(tick_broadcast_mask)) { 303 if (!bc_stopped) 304 clockevents_shutdown(bc); 305 } else if (bc_stopped) { 306 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 307 tick_broadcast_start_periodic(bc); 308 else 309 tick_broadcast_setup_oneshot(bc); 310 } 311 out: 312 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 313 } 314 315 /* 316 * Powerstate information: The system enters/leaves a state, where 317 * affected devices might stop. 318 */ 319 void tick_broadcast_on_off(unsigned long reason, int *oncpu) 320 { 321 if (!cpumask_test_cpu(*oncpu, cpu_online_mask)) 322 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 323 "offline CPU #%d\n", *oncpu); 324 else 325 tick_do_broadcast_on_off(&reason); 326 } 327 328 /* 329 * Set the periodic handler depending on broadcast on/off 330 */ 331 void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) 332 { 333 if (!broadcast) 334 dev->event_handler = tick_handle_periodic; 335 else 336 dev->event_handler = tick_handle_periodic_broadcast; 337 } 338 339 /* 340 * Remove a CPU from broadcasting 341 */ 342 void tick_shutdown_broadcast(unsigned int *cpup) 343 { 344 struct clock_event_device *bc; 345 unsigned long flags; 346 unsigned int cpu = *cpup; 347 348 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 349 350 bc = tick_broadcast_device.evtdev; 351 cpumask_clear_cpu(cpu, tick_broadcast_mask); 352 353 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { 354 if (bc && cpumask_empty(tick_broadcast_mask)) 355 clockevents_shutdown(bc); 356 } 357 358 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 359 } 360 361 void tick_suspend_broadcast(void) 362 { 363 struct clock_event_device *bc; 364 unsigned long flags; 365 366 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 367 368 bc = tick_broadcast_device.evtdev; 369 if (bc) 370 clockevents_shutdown(bc); 371 372 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 373 } 374 375 int tick_resume_broadcast(void) 376 { 377 struct clock_event_device *bc; 378 unsigned long flags; 379 int broadcast = 0; 380 381 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 382 383 bc = tick_broadcast_device.evtdev; 384 385 if (bc) { 386 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME); 387 388 switch (tick_broadcast_device.mode) { 389 case TICKDEV_MODE_PERIODIC: 390 if (!cpumask_empty(tick_broadcast_mask)) 391 tick_broadcast_start_periodic(bc); 392 broadcast = cpumask_test_cpu(smp_processor_id(), 393 tick_broadcast_mask); 394 break; 395 case TICKDEV_MODE_ONESHOT: 396 if (!cpumask_empty(tick_broadcast_mask)) 397 broadcast = tick_resume_broadcast_oneshot(bc); 398 break; 399 } 400 } 401 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 402 403 return broadcast; 404 } 405 406 407 #ifdef CONFIG_TICK_ONESHOT 408 409 static cpumask_var_t tick_broadcast_oneshot_mask; 410 static cpumask_var_t tick_broadcast_pending_mask; 411 static cpumask_var_t tick_broadcast_force_mask; 412 413 /* 414 * Exposed for debugging: see timer_list.c 415 */ 416 struct cpumask *tick_get_broadcast_oneshot_mask(void) 417 { 418 return tick_broadcast_oneshot_mask; 419 } 420 421 /* 422 * Called before going idle with interrupts disabled. Checks whether a 423 * broadcast event from the other core is about to happen. We detected 424 * that in tick_broadcast_oneshot_control(). The callsite can use this 425 * to avoid a deep idle transition as we are about to get the 426 * broadcast IPI right away. 427 */ 428 int tick_check_broadcast_expired(void) 429 { 430 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); 431 } 432 433 /* 434 * Set broadcast interrupt affinity 435 */ 436 static void tick_broadcast_set_affinity(struct clock_event_device *bc, 437 const struct cpumask *cpumask) 438 { 439 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) 440 return; 441 442 if (cpumask_equal(bc->cpumask, cpumask)) 443 return; 444 445 bc->cpumask = cpumask; 446 irq_set_affinity(bc->irq, bc->cpumask); 447 } 448 449 static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu, 450 ktime_t expires, int force) 451 { 452 int ret; 453 454 if (bc->mode != CLOCK_EVT_MODE_ONESHOT) 455 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 456 457 ret = clockevents_program_event(bc, expires, force); 458 if (!ret) 459 tick_broadcast_set_affinity(bc, cpumask_of(cpu)); 460 return ret; 461 } 462 463 int tick_resume_broadcast_oneshot(struct clock_event_device *bc) 464 { 465 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 466 return 0; 467 } 468 469 /* 470 * Called from irq_enter() when idle was interrupted to reenable the 471 * per cpu device. 472 */ 473 void tick_check_oneshot_broadcast(int cpu) 474 { 475 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) { 476 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); 477 478 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); 479 } 480 } 481 482 /* 483 * Handle oneshot mode broadcasting 484 */ 485 static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 486 { 487 struct tick_device *td; 488 ktime_t now, next_event; 489 int cpu, next_cpu = 0; 490 491 raw_spin_lock(&tick_broadcast_lock); 492 again: 493 dev->next_event.tv64 = KTIME_MAX; 494 next_event.tv64 = KTIME_MAX; 495 cpumask_clear(tmpmask); 496 now = ktime_get(); 497 /* Find all expired events */ 498 for_each_cpu(cpu, tick_broadcast_oneshot_mask) { 499 td = &per_cpu(tick_cpu_device, cpu); 500 if (td->evtdev->next_event.tv64 <= now.tv64) { 501 cpumask_set_cpu(cpu, tmpmask); 502 /* 503 * Mark the remote cpu in the pending mask, so 504 * it can avoid reprogramming the cpu local 505 * timer in tick_broadcast_oneshot_control(). 506 */ 507 cpumask_set_cpu(cpu, tick_broadcast_pending_mask); 508 } else if (td->evtdev->next_event.tv64 < next_event.tv64) { 509 next_event.tv64 = td->evtdev->next_event.tv64; 510 next_cpu = cpu; 511 } 512 } 513 514 /* Take care of enforced broadcast requests */ 515 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); 516 cpumask_clear(tick_broadcast_force_mask); 517 518 /* 519 * Wakeup the cpus which have an expired event. 520 */ 521 tick_do_broadcast(tmpmask); 522 523 /* 524 * Two reasons for reprogram: 525 * 526 * - The global event did not expire any CPU local 527 * events. This happens in dyntick mode, as the maximum PIT 528 * delta is quite small. 529 * 530 * - There are pending events on sleeping CPUs which were not 531 * in the event mask 532 */ 533 if (next_event.tv64 != KTIME_MAX) { 534 /* 535 * Rearm the broadcast device. If event expired, 536 * repeat the above 537 */ 538 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0)) 539 goto again; 540 } 541 raw_spin_unlock(&tick_broadcast_lock); 542 } 543 544 /* 545 * Powerstate information: The system enters/leaves a state, where 546 * affected devices might stop 547 */ 548 void tick_broadcast_oneshot_control(unsigned long reason) 549 { 550 struct clock_event_device *bc, *dev; 551 struct tick_device *td; 552 unsigned long flags; 553 ktime_t now; 554 int cpu; 555 556 /* 557 * Periodic mode does not care about the enter/exit of power 558 * states 559 */ 560 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) 561 return; 562 563 /* 564 * We are called with preemtion disabled from the depth of the 565 * idle code, so we can't be moved away. 566 */ 567 cpu = smp_processor_id(); 568 td = &per_cpu(tick_cpu_device, cpu); 569 dev = td->evtdev; 570 571 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) 572 return; 573 574 bc = tick_broadcast_device.evtdev; 575 576 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 577 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) { 578 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); 579 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { 580 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); 581 /* 582 * We only reprogram the broadcast timer if we 583 * did not mark ourself in the force mask and 584 * if the cpu local event is earlier than the 585 * broadcast event. If the current CPU is in 586 * the force mask, then we are going to be 587 * woken by the IPI right away. 588 */ 589 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) && 590 dev->next_event.tv64 < bc->next_event.tv64) 591 tick_broadcast_set_event(bc, cpu, dev->next_event, 1); 592 } 593 } else { 594 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { 595 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 596 if (dev->next_event.tv64 == KTIME_MAX) 597 goto out; 598 /* 599 * The cpu which was handling the broadcast 600 * timer marked this cpu in the broadcast 601 * pending mask and fired the broadcast 602 * IPI. So we are going to handle the expired 603 * event anyway via the broadcast IPI 604 * handler. No need to reprogram the timer 605 * with an already expired event. 606 */ 607 if (cpumask_test_and_clear_cpu(cpu, 608 tick_broadcast_pending_mask)) 609 goto out; 610 611 /* 612 * If the pending bit is not set, then we are 613 * either the CPU handling the broadcast 614 * interrupt or we got woken by something else. 615 * 616 * We are not longer in the broadcast mask, so 617 * if the cpu local expiry time is already 618 * reached, we would reprogram the cpu local 619 * timer with an already expired event. 620 * 621 * This can lead to a ping-pong when we return 622 * to idle and therefor rearm the broadcast 623 * timer before the cpu local timer was able 624 * to fire. This happens because the forced 625 * reprogramming makes sure that the event 626 * will happen in the future and depending on 627 * the min_delta setting this might be far 628 * enough out that the ping-pong starts. 629 * 630 * If the cpu local next_event has expired 631 * then we know that the broadcast timer 632 * next_event has expired as well and 633 * broadcast is about to be handled. So we 634 * avoid reprogramming and enforce that the 635 * broadcast handler, which did not run yet, 636 * will invoke the cpu local handler. 637 * 638 * We cannot call the handler directly from 639 * here, because we might be in a NOHZ phase 640 * and we did not go through the irq_enter() 641 * nohz fixups. 642 */ 643 now = ktime_get(); 644 if (dev->next_event.tv64 <= now.tv64) { 645 cpumask_set_cpu(cpu, tick_broadcast_force_mask); 646 goto out; 647 } 648 /* 649 * We got woken by something else. Reprogram 650 * the cpu local timer device. 651 */ 652 tick_program_event(dev->next_event, 1); 653 } 654 } 655 out: 656 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 657 } 658 659 /* 660 * Reset the one shot broadcast for a cpu 661 * 662 * Called with tick_broadcast_lock held 663 */ 664 static void tick_broadcast_clear_oneshot(int cpu) 665 { 666 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 667 } 668 669 static void tick_broadcast_init_next_event(struct cpumask *mask, 670 ktime_t expires) 671 { 672 struct tick_device *td; 673 int cpu; 674 675 for_each_cpu(cpu, mask) { 676 td = &per_cpu(tick_cpu_device, cpu); 677 if (td->evtdev) 678 td->evtdev->next_event = expires; 679 } 680 } 681 682 /** 683 * tick_broadcast_setup_oneshot - setup the broadcast device 684 */ 685 void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 686 { 687 int cpu = smp_processor_id(); 688 689 /* Set it up only once ! */ 690 if (bc->event_handler != tick_handle_oneshot_broadcast) { 691 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; 692 693 bc->event_handler = tick_handle_oneshot_broadcast; 694 695 /* Take the do_timer update */ 696 if (!tick_nohz_full_cpu(cpu)) 697 tick_do_timer_cpu = cpu; 698 699 /* 700 * We must be careful here. There might be other CPUs 701 * waiting for periodic broadcast. We need to set the 702 * oneshot_mask bits for those and program the 703 * broadcast device to fire. 704 */ 705 cpumask_copy(tmpmask, tick_broadcast_mask); 706 cpumask_clear_cpu(cpu, tmpmask); 707 cpumask_or(tick_broadcast_oneshot_mask, 708 tick_broadcast_oneshot_mask, tmpmask); 709 710 if (was_periodic && !cpumask_empty(tmpmask)) { 711 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); 712 tick_broadcast_init_next_event(tmpmask, 713 tick_next_period); 714 tick_broadcast_set_event(bc, cpu, tick_next_period, 1); 715 } else 716 bc->next_event.tv64 = KTIME_MAX; 717 } else { 718 /* 719 * The first cpu which switches to oneshot mode sets 720 * the bit for all other cpus which are in the general 721 * (periodic) broadcast mask. So the bit is set and 722 * would prevent the first broadcast enter after this 723 * to program the bc device. 724 */ 725 tick_broadcast_clear_oneshot(cpu); 726 } 727 } 728 729 /* 730 * Select oneshot operating mode for the broadcast device 731 */ 732 void tick_broadcast_switch_to_oneshot(void) 733 { 734 struct clock_event_device *bc; 735 unsigned long flags; 736 737 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 738 739 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 740 bc = tick_broadcast_device.evtdev; 741 if (bc) 742 tick_broadcast_setup_oneshot(bc); 743 744 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 745 } 746 747 748 /* 749 * Remove a dead CPU from broadcasting 750 */ 751 void tick_shutdown_broadcast_oneshot(unsigned int *cpup) 752 { 753 unsigned long flags; 754 unsigned int cpu = *cpup; 755 756 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 757 758 /* 759 * Clear the broadcast mask flag for the dead cpu, but do not 760 * stop the broadcast device! 761 */ 762 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); 763 764 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 765 } 766 767 /* 768 * Check, whether the broadcast device is in one shot mode 769 */ 770 int tick_broadcast_oneshot_active(void) 771 { 772 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; 773 } 774 775 /* 776 * Check whether the broadcast device supports oneshot. 777 */ 778 bool tick_broadcast_oneshot_available(void) 779 { 780 struct clock_event_device *bc = tick_broadcast_device.evtdev; 781 782 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; 783 } 784 785 #endif 786 787 void __init tick_broadcast_init(void) 788 { 789 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); 790 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); 791 #ifdef CONFIG_TICK_ONESHOT 792 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); 793 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); 794 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); 795 #endif 796 } 797