1 #include <linux/clocksource.h> 2 #include <linux/clockchips.h> 3 #include <linux/interrupt.h> 4 #include <linux/sysdev.h> 5 #include <linux/delay.h> 6 #include <linux/errno.h> 7 #include <linux/hpet.h> 8 #include <linux/init.h> 9 #include <linux/cpu.h> 10 #include <linux/pm.h> 11 #include <linux/io.h> 12 13 #include <asm/fixmap.h> 14 #include <asm/i8253.h> 15 #include <asm/hpet.h> 16 17 #define HPET_MASK CLOCKSOURCE_MASK(32) 18 #define HPET_SHIFT 22 19 20 /* FSEC = 10^-15 21 NSEC = 10^-9 */ 22 #define FSEC_PER_NSEC 1000000L 23 24 #define HPET_DEV_USED_BIT 2 25 #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) 26 #define HPET_DEV_VALID 0x8 27 #define HPET_DEV_FSB_CAP 0x1000 28 #define HPET_DEV_PERI_CAP 0x2000 29 30 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) 31 32 /* 33 * HPET address is set in acpi/boot.c, when an ACPI entry exists 34 */ 35 unsigned long hpet_address; 36 #ifdef CONFIG_PCI_MSI 37 static unsigned long hpet_num_timers; 38 #endif 39 static void __iomem *hpet_virt_address; 40 41 struct hpet_dev { 42 struct clock_event_device evt; 43 unsigned int num; 44 int cpu; 45 unsigned int irq; 46 unsigned int flags; 47 char name[10]; 48 }; 49 50 unsigned long hpet_readl(unsigned long a) 51 { 52 return readl(hpet_virt_address + a); 53 } 54 55 static inline void hpet_writel(unsigned long d, unsigned long a) 56 { 57 writel(d, hpet_virt_address + a); 58 } 59 60 #ifdef CONFIG_X86_64 61 #include <asm/pgtable.h> 62 #endif 63 64 static inline void hpet_set_mapping(void) 65 { 66 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); 67 #ifdef CONFIG_X86_64 68 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); 69 #endif 70 } 71 72 static inline void hpet_clear_mapping(void) 73 { 74 iounmap(hpet_virt_address); 75 hpet_virt_address = NULL; 76 } 77 78 /* 79 * HPET command line enable / disable 80 */ 81 static int boot_hpet_disable; 82 int hpet_force_user; 83 static int hpet_verbose; 84 85 static int __init hpet_setup(char *str) 86 { 87 if (str) { 88 if (!strncmp("disable", str, 7)) 89 boot_hpet_disable = 1; 90 if (!strncmp("force", str, 5)) 91 hpet_force_user = 1; 92 if (!strncmp("verbose", str, 7)) 93 hpet_verbose = 1; 94 } 95 return 1; 96 } 97 __setup("hpet=", hpet_setup); 98 99 static int __init disable_hpet(char *str) 100 { 101 boot_hpet_disable = 1; 102 return 1; 103 } 104 __setup("nohpet", disable_hpet); 105 106 static inline int is_hpet_capable(void) 107 { 108 return !boot_hpet_disable && hpet_address; 109 } 110 111 /* 112 * HPET timer interrupt enable / disable 113 */ 114 static int hpet_legacy_int_enabled; 115 116 /** 117 * is_hpet_enabled - check whether the hpet timer interrupt is enabled 118 */ 119 int is_hpet_enabled(void) 120 { 121 return is_hpet_capable() && hpet_legacy_int_enabled; 122 } 123 EXPORT_SYMBOL_GPL(is_hpet_enabled); 124 125 static void _hpet_print_config(const char *function, int line) 126 { 127 u32 i, timers, l, h; 128 printk(KERN_INFO "hpet: %s(%d):\n", function, line); 129 l = hpet_readl(HPET_ID); 130 h = hpet_readl(HPET_PERIOD); 131 timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 132 printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); 133 l = hpet_readl(HPET_CFG); 134 h = hpet_readl(HPET_STATUS); 135 printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); 136 l = hpet_readl(HPET_COUNTER); 137 h = hpet_readl(HPET_COUNTER+4); 138 printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); 139 140 for (i = 0; i < timers; i++) { 141 l = hpet_readl(HPET_Tn_CFG(i)); 142 h = hpet_readl(HPET_Tn_CFG(i)+4); 143 printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", 144 i, l, h); 145 l = hpet_readl(HPET_Tn_CMP(i)); 146 h = hpet_readl(HPET_Tn_CMP(i)+4); 147 printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", 148 i, l, h); 149 l = hpet_readl(HPET_Tn_ROUTE(i)); 150 h = hpet_readl(HPET_Tn_ROUTE(i)+4); 151 printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", 152 i, l, h); 153 } 154 } 155 156 #define hpet_print_config() \ 157 do { \ 158 if (hpet_verbose) \ 159 _hpet_print_config(__FUNCTION__, __LINE__); \ 160 } while (0) 161 162 /* 163 * When the hpet driver (/dev/hpet) is enabled, we need to reserve 164 * timer 0 and timer 1 in case of RTC emulation. 165 */ 166 #ifdef CONFIG_HPET 167 168 static void hpet_reserve_msi_timers(struct hpet_data *hd); 169 170 static void hpet_reserve_platform_timers(unsigned long id) 171 { 172 struct hpet __iomem *hpet = hpet_virt_address; 173 struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; 174 unsigned int nrtimers, i; 175 struct hpet_data hd; 176 177 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 178 179 memset(&hd, 0, sizeof(hd)); 180 hd.hd_phys_address = hpet_address; 181 hd.hd_address = hpet; 182 hd.hd_nirqs = nrtimers; 183 hpet_reserve_timer(&hd, 0); 184 185 #ifdef CONFIG_HPET_EMULATE_RTC 186 hpet_reserve_timer(&hd, 1); 187 #endif 188 189 /* 190 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 191 * is wrong for i8259!) not the output IRQ. Many BIOS writers 192 * don't bother configuring *any* comparator interrupts. 193 */ 194 hd.hd_irq[0] = HPET_LEGACY_8254; 195 hd.hd_irq[1] = HPET_LEGACY_RTC; 196 197 for (i = 2; i < nrtimers; timer++, i++) { 198 hd.hd_irq[i] = (readl(&timer->hpet_config) & 199 Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; 200 } 201 202 hpet_reserve_msi_timers(&hd); 203 204 hpet_alloc(&hd); 205 206 } 207 #else 208 static void hpet_reserve_platform_timers(unsigned long id) { } 209 #endif 210 211 /* 212 * Common hpet info 213 */ 214 static unsigned long hpet_period; 215 216 static void hpet_legacy_set_mode(enum clock_event_mode mode, 217 struct clock_event_device *evt); 218 static int hpet_legacy_next_event(unsigned long delta, 219 struct clock_event_device *evt); 220 221 /* 222 * The hpet clock event device 223 */ 224 static struct clock_event_device hpet_clockevent = { 225 .name = "hpet", 226 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 227 .set_mode = hpet_legacy_set_mode, 228 .set_next_event = hpet_legacy_next_event, 229 .shift = 32, 230 .irq = 0, 231 .rating = 50, 232 }; 233 234 static void hpet_stop_counter(void) 235 { 236 unsigned long cfg = hpet_readl(HPET_CFG); 237 cfg &= ~HPET_CFG_ENABLE; 238 hpet_writel(cfg, HPET_CFG); 239 } 240 241 static void hpet_reset_counter(void) 242 { 243 hpet_writel(0, HPET_COUNTER); 244 hpet_writel(0, HPET_COUNTER + 4); 245 } 246 247 static void hpet_start_counter(void) 248 { 249 unsigned long cfg = hpet_readl(HPET_CFG); 250 cfg |= HPET_CFG_ENABLE; 251 hpet_writel(cfg, HPET_CFG); 252 } 253 254 static void hpet_restart_counter(void) 255 { 256 hpet_stop_counter(); 257 hpet_reset_counter(); 258 hpet_start_counter(); 259 } 260 261 static void hpet_resume_device(void) 262 { 263 force_hpet_resume(); 264 } 265 266 static void hpet_resume_counter(void) 267 { 268 hpet_resume_device(); 269 hpet_restart_counter(); 270 } 271 272 static void hpet_enable_legacy_int(void) 273 { 274 unsigned long cfg = hpet_readl(HPET_CFG); 275 276 cfg |= HPET_CFG_LEGACY; 277 hpet_writel(cfg, HPET_CFG); 278 hpet_legacy_int_enabled = 1; 279 } 280 281 static void hpet_legacy_clockevent_register(void) 282 { 283 /* Start HPET legacy interrupts */ 284 hpet_enable_legacy_int(); 285 286 /* 287 * The mult factor is defined as (include/linux/clockchips.h) 288 * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) 289 * hpet_period is in units of femtoseconds (per cycle), so 290 * mult/2^shift = cyc/ns = 10^6/hpet_period 291 * mult = (10^6 * 2^shift)/hpet_period 292 * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period 293 */ 294 hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, 295 hpet_period, hpet_clockevent.shift); 296 /* Calculate the min / max delta */ 297 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 298 &hpet_clockevent); 299 /* 5 usec minimum reprogramming delta. */ 300 hpet_clockevent.min_delta_ns = 5000; 301 302 /* 303 * Start hpet with the boot cpu mask and make it 304 * global after the IO_APIC has been initialized. 305 */ 306 hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); 307 clockevents_register_device(&hpet_clockevent); 308 global_clock_event = &hpet_clockevent; 309 printk(KERN_DEBUG "hpet clockevent registered\n"); 310 } 311 312 static int hpet_setup_msi_irq(unsigned int irq); 313 314 static void hpet_set_mode(enum clock_event_mode mode, 315 struct clock_event_device *evt, int timer) 316 { 317 unsigned long cfg, cmp, now; 318 uint64_t delta; 319 320 switch (mode) { 321 case CLOCK_EVT_MODE_PERIODIC: 322 hpet_stop_counter(); 323 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 324 delta >>= evt->shift; 325 now = hpet_readl(HPET_COUNTER); 326 cmp = now + (unsigned long) delta; 327 cfg = hpet_readl(HPET_Tn_CFG(timer)); 328 /* Make sure we use edge triggered interrupts */ 329 cfg &= ~HPET_TN_LEVEL; 330 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 331 HPET_TN_SETVAL | HPET_TN_32BIT; 332 hpet_writel(cfg, HPET_Tn_CFG(timer)); 333 hpet_writel(cmp, HPET_Tn_CMP(timer)); 334 udelay(1); 335 /* 336 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL 337 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL 338 * bit is automatically cleared after the first write. 339 * (See AMD-8111 HyperTransport I/O Hub Data Sheet, 340 * Publication # 24674) 341 */ 342 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 343 hpet_start_counter(); 344 hpet_print_config(); 345 break; 346 347 case CLOCK_EVT_MODE_ONESHOT: 348 cfg = hpet_readl(HPET_Tn_CFG(timer)); 349 cfg &= ~HPET_TN_PERIODIC; 350 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 351 hpet_writel(cfg, HPET_Tn_CFG(timer)); 352 break; 353 354 case CLOCK_EVT_MODE_UNUSED: 355 case CLOCK_EVT_MODE_SHUTDOWN: 356 cfg = hpet_readl(HPET_Tn_CFG(timer)); 357 cfg &= ~HPET_TN_ENABLE; 358 hpet_writel(cfg, HPET_Tn_CFG(timer)); 359 break; 360 361 case CLOCK_EVT_MODE_RESUME: 362 if (timer == 0) { 363 hpet_enable_legacy_int(); 364 } else { 365 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 366 hpet_setup_msi_irq(hdev->irq); 367 disable_irq(hdev->irq); 368 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 369 enable_irq(hdev->irq); 370 } 371 hpet_print_config(); 372 break; 373 } 374 } 375 376 static int hpet_next_event(unsigned long delta, 377 struct clock_event_device *evt, int timer) 378 { 379 u32 cnt; 380 381 cnt = hpet_readl(HPET_COUNTER); 382 cnt += (u32) delta; 383 hpet_writel(cnt, HPET_Tn_CMP(timer)); 384 385 /* 386 * We need to read back the CMP register to make sure that 387 * what we wrote hit the chip before we compare it to the 388 * counter. 389 */ 390 WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); 391 392 return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; 393 } 394 395 static void hpet_legacy_set_mode(enum clock_event_mode mode, 396 struct clock_event_device *evt) 397 { 398 hpet_set_mode(mode, evt, 0); 399 } 400 401 static int hpet_legacy_next_event(unsigned long delta, 402 struct clock_event_device *evt) 403 { 404 return hpet_next_event(delta, evt, 0); 405 } 406 407 /* 408 * HPET MSI Support 409 */ 410 #ifdef CONFIG_PCI_MSI 411 412 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 413 static struct hpet_dev *hpet_devs; 414 415 void hpet_msi_unmask(unsigned int irq) 416 { 417 struct hpet_dev *hdev = get_irq_data(irq); 418 unsigned long cfg; 419 420 /* unmask it */ 421 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); 422 cfg |= HPET_TN_FSB; 423 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 424 } 425 426 void hpet_msi_mask(unsigned int irq) 427 { 428 unsigned long cfg; 429 struct hpet_dev *hdev = get_irq_data(irq); 430 431 /* mask it */ 432 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); 433 cfg &= ~HPET_TN_FSB; 434 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 435 } 436 437 void hpet_msi_write(unsigned int irq, struct msi_msg *msg) 438 { 439 struct hpet_dev *hdev = get_irq_data(irq); 440 441 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); 442 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); 443 } 444 445 void hpet_msi_read(unsigned int irq, struct msi_msg *msg) 446 { 447 struct hpet_dev *hdev = get_irq_data(irq); 448 449 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); 450 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); 451 msg->address_hi = 0; 452 } 453 454 static void hpet_msi_set_mode(enum clock_event_mode mode, 455 struct clock_event_device *evt) 456 { 457 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 458 hpet_set_mode(mode, evt, hdev->num); 459 } 460 461 static int hpet_msi_next_event(unsigned long delta, 462 struct clock_event_device *evt) 463 { 464 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 465 return hpet_next_event(delta, evt, hdev->num); 466 } 467 468 static int hpet_setup_msi_irq(unsigned int irq) 469 { 470 if (arch_setup_hpet_msi(irq)) { 471 destroy_irq(irq); 472 return -EINVAL; 473 } 474 return 0; 475 } 476 477 static int hpet_assign_irq(struct hpet_dev *dev) 478 { 479 unsigned int irq; 480 481 irq = create_irq(); 482 if (!irq) 483 return -EINVAL; 484 485 set_irq_data(irq, dev); 486 487 if (hpet_setup_msi_irq(irq)) 488 return -EINVAL; 489 490 dev->irq = irq; 491 return 0; 492 } 493 494 static irqreturn_t hpet_interrupt_handler(int irq, void *data) 495 { 496 struct hpet_dev *dev = (struct hpet_dev *)data; 497 struct clock_event_device *hevt = &dev->evt; 498 499 if (!hevt->event_handler) { 500 printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n", 501 dev->num); 502 return IRQ_HANDLED; 503 } 504 505 hevt->event_handler(hevt); 506 return IRQ_HANDLED; 507 } 508 509 static int hpet_setup_irq(struct hpet_dev *dev) 510 { 511 512 if (request_irq(dev->irq, hpet_interrupt_handler, 513 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 514 dev->name, dev)) 515 return -1; 516 517 disable_irq(dev->irq); 518 irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); 519 enable_irq(dev->irq); 520 521 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 522 dev->name, dev->irq); 523 524 return 0; 525 } 526 527 /* This should be called in specific @cpu */ 528 static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) 529 { 530 struct clock_event_device *evt = &hdev->evt; 531 uint64_t hpet_freq; 532 533 WARN_ON(cpu != smp_processor_id()); 534 if (!(hdev->flags & HPET_DEV_VALID)) 535 return; 536 537 if (hpet_setup_msi_irq(hdev->irq)) 538 return; 539 540 hdev->cpu = cpu; 541 per_cpu(cpu_hpet_dev, cpu) = hdev; 542 evt->name = hdev->name; 543 hpet_setup_irq(hdev); 544 evt->irq = hdev->irq; 545 546 evt->rating = 110; 547 evt->features = CLOCK_EVT_FEAT_ONESHOT; 548 if (hdev->flags & HPET_DEV_PERI_CAP) 549 evt->features |= CLOCK_EVT_FEAT_PERIODIC; 550 551 evt->set_mode = hpet_msi_set_mode; 552 evt->set_next_event = hpet_msi_next_event; 553 evt->shift = 32; 554 555 /* 556 * The period is a femto seconds value. We need to calculate the 557 * scaled math multiplication factor for nanosecond to hpet tick 558 * conversion. 559 */ 560 hpet_freq = 1000000000000000ULL; 561 do_div(hpet_freq, hpet_period); 562 evt->mult = div_sc((unsigned long) hpet_freq, 563 NSEC_PER_SEC, evt->shift); 564 /* Calculate the max delta */ 565 evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); 566 /* 5 usec minimum reprogramming delta. */ 567 evt->min_delta_ns = 5000; 568 569 evt->cpumask = cpumask_of(hdev->cpu); 570 clockevents_register_device(evt); 571 } 572 573 #ifdef CONFIG_HPET 574 /* Reserve at least one timer for userspace (/dev/hpet) */ 575 #define RESERVE_TIMERS 1 576 #else 577 #define RESERVE_TIMERS 0 578 #endif 579 580 static void hpet_msi_capability_lookup(unsigned int start_timer) 581 { 582 unsigned int id; 583 unsigned int num_timers; 584 unsigned int num_timers_used = 0; 585 int i; 586 587 id = hpet_readl(HPET_ID); 588 589 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); 590 num_timers++; /* Value read out starts from 0 */ 591 hpet_print_config(); 592 593 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); 594 if (!hpet_devs) 595 return; 596 597 hpet_num_timers = num_timers; 598 599 for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { 600 struct hpet_dev *hdev = &hpet_devs[num_timers_used]; 601 unsigned long cfg = hpet_readl(HPET_Tn_CFG(i)); 602 603 /* Only consider HPET timer with MSI support */ 604 if (!(cfg & HPET_TN_FSB_CAP)) 605 continue; 606 607 hdev->flags = 0; 608 if (cfg & HPET_TN_PERIODIC_CAP) 609 hdev->flags |= HPET_DEV_PERI_CAP; 610 hdev->num = i; 611 612 sprintf(hdev->name, "hpet%d", i); 613 if (hpet_assign_irq(hdev)) 614 continue; 615 616 hdev->flags |= HPET_DEV_FSB_CAP; 617 hdev->flags |= HPET_DEV_VALID; 618 num_timers_used++; 619 if (num_timers_used == num_possible_cpus()) 620 break; 621 } 622 623 printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n", 624 num_timers, num_timers_used); 625 } 626 627 #ifdef CONFIG_HPET 628 static void hpet_reserve_msi_timers(struct hpet_data *hd) 629 { 630 int i; 631 632 if (!hpet_devs) 633 return; 634 635 for (i = 0; i < hpet_num_timers; i++) { 636 struct hpet_dev *hdev = &hpet_devs[i]; 637 638 if (!(hdev->flags & HPET_DEV_VALID)) 639 continue; 640 641 hd->hd_irq[hdev->num] = hdev->irq; 642 hpet_reserve_timer(hd, hdev->num); 643 } 644 } 645 #endif 646 647 static struct hpet_dev *hpet_get_unused_timer(void) 648 { 649 int i; 650 651 if (!hpet_devs) 652 return NULL; 653 654 for (i = 0; i < hpet_num_timers; i++) { 655 struct hpet_dev *hdev = &hpet_devs[i]; 656 657 if (!(hdev->flags & HPET_DEV_VALID)) 658 continue; 659 if (test_and_set_bit(HPET_DEV_USED_BIT, 660 (unsigned long *)&hdev->flags)) 661 continue; 662 return hdev; 663 } 664 return NULL; 665 } 666 667 struct hpet_work_struct { 668 struct delayed_work work; 669 struct completion complete; 670 }; 671 672 static void hpet_work(struct work_struct *w) 673 { 674 struct hpet_dev *hdev; 675 int cpu = smp_processor_id(); 676 struct hpet_work_struct *hpet_work; 677 678 hpet_work = container_of(w, struct hpet_work_struct, work.work); 679 680 hdev = hpet_get_unused_timer(); 681 if (hdev) 682 init_one_hpet_msi_clockevent(hdev, cpu); 683 684 complete(&hpet_work->complete); 685 } 686 687 static int hpet_cpuhp_notify(struct notifier_block *n, 688 unsigned long action, void *hcpu) 689 { 690 unsigned long cpu = (unsigned long)hcpu; 691 struct hpet_work_struct work; 692 struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); 693 694 switch (action & 0xf) { 695 case CPU_ONLINE: 696 INIT_DELAYED_WORK_ON_STACK(&work.work, hpet_work); 697 init_completion(&work.complete); 698 /* FIXME: add schedule_work_on() */ 699 schedule_delayed_work_on(cpu, &work.work, 0); 700 wait_for_completion(&work.complete); 701 destroy_timer_on_stack(&work.work.timer); 702 break; 703 case CPU_DEAD: 704 if (hdev) { 705 free_irq(hdev->irq, hdev); 706 hdev->flags &= ~HPET_DEV_USED; 707 per_cpu(cpu_hpet_dev, cpu) = NULL; 708 } 709 break; 710 } 711 return NOTIFY_OK; 712 } 713 #else 714 715 static int hpet_setup_msi_irq(unsigned int irq) 716 { 717 return 0; 718 } 719 static void hpet_msi_capability_lookup(unsigned int start_timer) 720 { 721 return; 722 } 723 724 #ifdef CONFIG_HPET 725 static void hpet_reserve_msi_timers(struct hpet_data *hd) 726 { 727 return; 728 } 729 #endif 730 731 static int hpet_cpuhp_notify(struct notifier_block *n, 732 unsigned long action, void *hcpu) 733 { 734 return NOTIFY_OK; 735 } 736 737 #endif 738 739 /* 740 * Clock source related code 741 */ 742 static cycle_t read_hpet(struct clocksource *cs) 743 { 744 return (cycle_t)hpet_readl(HPET_COUNTER); 745 } 746 747 #ifdef CONFIG_X86_64 748 static cycle_t __vsyscall_fn vread_hpet(void) 749 { 750 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); 751 } 752 #endif 753 754 static struct clocksource clocksource_hpet = { 755 .name = "hpet", 756 .rating = 250, 757 .read = read_hpet, 758 .mask = HPET_MASK, 759 .shift = HPET_SHIFT, 760 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 761 .resume = hpet_resume_counter, 762 #ifdef CONFIG_X86_64 763 .vread = vread_hpet, 764 #endif 765 }; 766 767 static int hpet_clocksource_register(void) 768 { 769 u64 start, now; 770 cycle_t t1; 771 772 /* Start the counter */ 773 hpet_restart_counter(); 774 775 /* Verify whether hpet counter works */ 776 t1 = hpet_readl(HPET_COUNTER); 777 rdtscll(start); 778 779 /* 780 * We don't know the TSC frequency yet, but waiting for 781 * 200000 TSC cycles is safe: 782 * 4 GHz == 50us 783 * 1 GHz == 200us 784 */ 785 do { 786 rep_nop(); 787 rdtscll(now); 788 } while ((now - start) < 200000UL); 789 790 if (t1 == hpet_readl(HPET_COUNTER)) { 791 printk(KERN_WARNING 792 "HPET counter not counting. HPET disabled\n"); 793 return -ENODEV; 794 } 795 796 /* 797 * The definition of mult is (include/linux/clocksource.h) 798 * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc 799 * so we first need to convert hpet_period to ns/cyc units: 800 * mult/2^shift = ns/cyc = hpet_period/10^6 801 * mult = (hpet_period * 2^shift)/10^6 802 * mult = (hpet_period << shift)/FSEC_PER_NSEC 803 */ 804 clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT); 805 806 clocksource_register(&clocksource_hpet); 807 808 return 0; 809 } 810 811 /** 812 * hpet_enable - Try to setup the HPET timer. Returns 1 on success. 813 */ 814 int __init hpet_enable(void) 815 { 816 unsigned long id; 817 int i; 818 819 if (!is_hpet_capable()) 820 return 0; 821 822 hpet_set_mapping(); 823 824 /* 825 * Read the period and check for a sane value: 826 */ 827 hpet_period = hpet_readl(HPET_PERIOD); 828 829 /* 830 * AMD SB700 based systems with spread spectrum enabled use a 831 * SMM based HPET emulation to provide proper frequency 832 * setting. The SMM code is initialized with the first HPET 833 * register access and takes some time to complete. During 834 * this time the config register reads 0xffffffff. We check 835 * for max. 1000 loops whether the config register reads a non 836 * 0xffffffff value to make sure that HPET is up and running 837 * before we go further. A counting loop is safe, as the HPET 838 * access takes thousands of CPU cycles. On non SB700 based 839 * machines this check is only done once and has no side 840 * effects. 841 */ 842 for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) { 843 if (i == 1000) { 844 printk(KERN_WARNING 845 "HPET config register value = 0xFFFFFFFF. " 846 "Disabling HPET\n"); 847 goto out_nohpet; 848 } 849 } 850 851 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) 852 goto out_nohpet; 853 854 /* 855 * Read the HPET ID register to retrieve the IRQ routing 856 * information and the number of channels 857 */ 858 id = hpet_readl(HPET_ID); 859 hpet_print_config(); 860 861 #ifdef CONFIG_HPET_EMULATE_RTC 862 /* 863 * The legacy routing mode needs at least two channels, tick timer 864 * and the rtc emulation channel. 865 */ 866 if (!(id & HPET_ID_NUMBER)) 867 goto out_nohpet; 868 #endif 869 870 if (hpet_clocksource_register()) 871 goto out_nohpet; 872 873 if (id & HPET_ID_LEGSUP) { 874 hpet_legacy_clockevent_register(); 875 hpet_msi_capability_lookup(2); 876 return 1; 877 } 878 hpet_msi_capability_lookup(0); 879 return 0; 880 881 out_nohpet: 882 hpet_clear_mapping(); 883 hpet_address = 0; 884 return 0; 885 } 886 887 /* 888 * Needs to be late, as the reserve_timer code calls kalloc ! 889 * 890 * Not a problem on i386 as hpet_enable is called from late_time_init, 891 * but on x86_64 it is necessary ! 892 */ 893 static __init int hpet_late_init(void) 894 { 895 int cpu; 896 897 if (boot_hpet_disable) 898 return -ENODEV; 899 900 if (!hpet_address) { 901 if (!force_hpet_address) 902 return -ENODEV; 903 904 hpet_address = force_hpet_address; 905 hpet_enable(); 906 } 907 908 if (!hpet_virt_address) 909 return -ENODEV; 910 911 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 912 hpet_print_config(); 913 914 for_each_online_cpu(cpu) { 915 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); 916 } 917 918 /* This notifier should be called after workqueue is ready */ 919 hotcpu_notifier(hpet_cpuhp_notify, -20); 920 921 return 0; 922 } 923 fs_initcall(hpet_late_init); 924 925 void hpet_disable(void) 926 { 927 if (is_hpet_capable()) { 928 unsigned long cfg = hpet_readl(HPET_CFG); 929 930 if (hpet_legacy_int_enabled) { 931 cfg &= ~HPET_CFG_LEGACY; 932 hpet_legacy_int_enabled = 0; 933 } 934 cfg &= ~HPET_CFG_ENABLE; 935 hpet_writel(cfg, HPET_CFG); 936 } 937 } 938 939 #ifdef CONFIG_HPET_EMULATE_RTC 940 941 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET 942 * is enabled, we support RTC interrupt functionality in software. 943 * RTC has 3 kinds of interrupts: 944 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock 945 * is updated 946 * 2) Alarm Interrupt - generate an interrupt at a specific time of day 947 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies 948 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) 949 * (1) and (2) above are implemented using polling at a frequency of 950 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt 951 * overhead. (DEFAULT_RTC_INT_FREQ) 952 * For (3), we use interrupts at 64Hz or user specified periodic 953 * frequency, whichever is higher. 954 */ 955 #include <linux/mc146818rtc.h> 956 #include <linux/rtc.h> 957 #include <asm/rtc.h> 958 959 #define DEFAULT_RTC_INT_FREQ 64 960 #define DEFAULT_RTC_SHIFT 6 961 #define RTC_NUM_INTS 1 962 963 static unsigned long hpet_rtc_flags; 964 static int hpet_prev_update_sec; 965 static struct rtc_time hpet_alarm_time; 966 static unsigned long hpet_pie_count; 967 static u32 hpet_t1_cmp; 968 static unsigned long hpet_default_delta; 969 static unsigned long hpet_pie_delta; 970 static unsigned long hpet_pie_limit; 971 972 static rtc_irq_handler irq_handler; 973 974 /* 975 * Check that the hpet counter c1 is ahead of the c2 976 */ 977 static inline int hpet_cnt_ahead(u32 c1, u32 c2) 978 { 979 return (s32)(c2 - c1) < 0; 980 } 981 982 /* 983 * Registers a IRQ handler. 984 */ 985 int hpet_register_irq_handler(rtc_irq_handler handler) 986 { 987 if (!is_hpet_enabled()) 988 return -ENODEV; 989 if (irq_handler) 990 return -EBUSY; 991 992 irq_handler = handler; 993 994 return 0; 995 } 996 EXPORT_SYMBOL_GPL(hpet_register_irq_handler); 997 998 /* 999 * Deregisters the IRQ handler registered with hpet_register_irq_handler() 1000 * and does cleanup. 1001 */ 1002 void hpet_unregister_irq_handler(rtc_irq_handler handler) 1003 { 1004 if (!is_hpet_enabled()) 1005 return; 1006 1007 irq_handler = NULL; 1008 hpet_rtc_flags = 0; 1009 } 1010 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); 1011 1012 /* 1013 * Timer 1 for RTC emulation. We use one shot mode, as periodic mode 1014 * is not supported by all HPET implementations for timer 1. 1015 * 1016 * hpet_rtc_timer_init() is called when the rtc is initialized. 1017 */ 1018 int hpet_rtc_timer_init(void) 1019 { 1020 unsigned long cfg, cnt, delta, flags; 1021 1022 if (!is_hpet_enabled()) 1023 return 0; 1024 1025 if (!hpet_default_delta) { 1026 uint64_t clc; 1027 1028 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; 1029 clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; 1030 hpet_default_delta = (unsigned long) clc; 1031 } 1032 1033 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1034 delta = hpet_default_delta; 1035 else 1036 delta = hpet_pie_delta; 1037 1038 local_irq_save(flags); 1039 1040 cnt = delta + hpet_readl(HPET_COUNTER); 1041 hpet_writel(cnt, HPET_T1_CMP); 1042 hpet_t1_cmp = cnt; 1043 1044 cfg = hpet_readl(HPET_T1_CFG); 1045 cfg &= ~HPET_TN_PERIODIC; 1046 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 1047 hpet_writel(cfg, HPET_T1_CFG); 1048 1049 local_irq_restore(flags); 1050 1051 return 1; 1052 } 1053 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); 1054 1055 /* 1056 * The functions below are called from rtc driver. 1057 * Return 0 if HPET is not being used. 1058 * Otherwise do the necessary changes and return 1. 1059 */ 1060 int hpet_mask_rtc_irq_bit(unsigned long bit_mask) 1061 { 1062 if (!is_hpet_enabled()) 1063 return 0; 1064 1065 hpet_rtc_flags &= ~bit_mask; 1066 return 1; 1067 } 1068 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); 1069 1070 int hpet_set_rtc_irq_bit(unsigned long bit_mask) 1071 { 1072 unsigned long oldbits = hpet_rtc_flags; 1073 1074 if (!is_hpet_enabled()) 1075 return 0; 1076 1077 hpet_rtc_flags |= bit_mask; 1078 1079 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) 1080 hpet_prev_update_sec = -1; 1081 1082 if (!oldbits) 1083 hpet_rtc_timer_init(); 1084 1085 return 1; 1086 } 1087 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); 1088 1089 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, 1090 unsigned char sec) 1091 { 1092 if (!is_hpet_enabled()) 1093 return 0; 1094 1095 hpet_alarm_time.tm_hour = hrs; 1096 hpet_alarm_time.tm_min = min; 1097 hpet_alarm_time.tm_sec = sec; 1098 1099 return 1; 1100 } 1101 EXPORT_SYMBOL_GPL(hpet_set_alarm_time); 1102 1103 int hpet_set_periodic_freq(unsigned long freq) 1104 { 1105 uint64_t clc; 1106 1107 if (!is_hpet_enabled()) 1108 return 0; 1109 1110 if (freq <= DEFAULT_RTC_INT_FREQ) 1111 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; 1112 else { 1113 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; 1114 do_div(clc, freq); 1115 clc >>= hpet_clockevent.shift; 1116 hpet_pie_delta = (unsigned long) clc; 1117 } 1118 return 1; 1119 } 1120 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); 1121 1122 int hpet_rtc_dropped_irq(void) 1123 { 1124 return is_hpet_enabled(); 1125 } 1126 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); 1127 1128 static void hpet_rtc_timer_reinit(void) 1129 { 1130 unsigned long cfg, delta; 1131 int lost_ints = -1; 1132 1133 if (unlikely(!hpet_rtc_flags)) { 1134 cfg = hpet_readl(HPET_T1_CFG); 1135 cfg &= ~HPET_TN_ENABLE; 1136 hpet_writel(cfg, HPET_T1_CFG); 1137 return; 1138 } 1139 1140 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1141 delta = hpet_default_delta; 1142 else 1143 delta = hpet_pie_delta; 1144 1145 /* 1146 * Increment the comparator value until we are ahead of the 1147 * current count. 1148 */ 1149 do { 1150 hpet_t1_cmp += delta; 1151 hpet_writel(hpet_t1_cmp, HPET_T1_CMP); 1152 lost_ints++; 1153 } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); 1154 1155 if (lost_ints) { 1156 if (hpet_rtc_flags & RTC_PIE) 1157 hpet_pie_count += lost_ints; 1158 if (printk_ratelimit()) 1159 printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n", 1160 lost_ints); 1161 } 1162 } 1163 1164 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 1165 { 1166 struct rtc_time curr_time; 1167 unsigned long rtc_int_flag = 0; 1168 1169 hpet_rtc_timer_reinit(); 1170 memset(&curr_time, 0, sizeof(struct rtc_time)); 1171 1172 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) 1173 get_rtc_time(&curr_time); 1174 1175 if (hpet_rtc_flags & RTC_UIE && 1176 curr_time.tm_sec != hpet_prev_update_sec) { 1177 if (hpet_prev_update_sec >= 0) 1178 rtc_int_flag = RTC_UF; 1179 hpet_prev_update_sec = curr_time.tm_sec; 1180 } 1181 1182 if (hpet_rtc_flags & RTC_PIE && 1183 ++hpet_pie_count >= hpet_pie_limit) { 1184 rtc_int_flag |= RTC_PF; 1185 hpet_pie_count = 0; 1186 } 1187 1188 if (hpet_rtc_flags & RTC_AIE && 1189 (curr_time.tm_sec == hpet_alarm_time.tm_sec) && 1190 (curr_time.tm_min == hpet_alarm_time.tm_min) && 1191 (curr_time.tm_hour == hpet_alarm_time.tm_hour)) 1192 rtc_int_flag |= RTC_AF; 1193 1194 if (rtc_int_flag) { 1195 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); 1196 if (irq_handler) 1197 irq_handler(rtc_int_flag, dev_id); 1198 } 1199 return IRQ_HANDLED; 1200 } 1201 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); 1202 #endif 1203