1 #include <linux/clocksource.h> 2 #include <linux/clockchips.h> 3 #include <linux/interrupt.h> 4 #include <linux/sysdev.h> 5 #include <linux/delay.h> 6 #include <linux/errno.h> 7 #include <linux/slab.h> 8 #include <linux/hpet.h> 9 #include <linux/init.h> 10 #include <linux/cpu.h> 11 #include <linux/pm.h> 12 #include <linux/io.h> 13 14 #include <asm/fixmap.h> 15 #include <asm/i8253.h> 16 #include <asm/hpet.h> 17 18 #define HPET_MASK CLOCKSOURCE_MASK(32) 19 20 /* FSEC = 10^-15 21 NSEC = 10^-9 */ 22 #define FSEC_PER_NSEC 1000000L 23 24 #define HPET_DEV_USED_BIT 2 25 #define HPET_DEV_USED (1 << HPET_DEV_USED_BIT) 26 #define HPET_DEV_VALID 0x8 27 #define HPET_DEV_FSB_CAP 0x1000 28 #define HPET_DEV_PERI_CAP 0x2000 29 30 #define HPET_MIN_CYCLES 128 31 #define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) 32 33 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt) 34 35 /* 36 * HPET address is set in acpi/boot.c, when an ACPI entry exists 37 */ 38 unsigned long hpet_address; 39 u8 hpet_blockid; /* OS timer block num */ 40 u8 hpet_msi_disable; 41 42 #ifdef CONFIG_PCI_MSI 43 static unsigned long hpet_num_timers; 44 #endif 45 static void __iomem *hpet_virt_address; 46 47 struct hpet_dev { 48 struct clock_event_device evt; 49 unsigned int num; 50 int cpu; 51 unsigned int irq; 52 unsigned int flags; 53 char name[10]; 54 }; 55 56 inline unsigned int hpet_readl(unsigned int a) 57 { 58 return readl(hpet_virt_address + a); 59 } 60 61 static inline void hpet_writel(unsigned int d, unsigned int a) 62 { 63 writel(d, hpet_virt_address + a); 64 } 65 66 #ifdef CONFIG_X86_64 67 #include <asm/pgtable.h> 68 #endif 69 70 static inline void hpet_set_mapping(void) 71 { 72 hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); 73 #ifdef CONFIG_X86_64 74 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE); 75 #endif 76 } 77 78 static inline void hpet_clear_mapping(void) 79 { 80 iounmap(hpet_virt_address); 81 hpet_virt_address = NULL; 82 } 83 84 /* 85 * HPET command line enable / disable 86 */ 87 static int boot_hpet_disable; 88 int hpet_force_user; 89 static int hpet_verbose; 90 91 static int __init hpet_setup(char *str) 92 { 93 if (str) { 94 if (!strncmp("disable", str, 7)) 95 boot_hpet_disable = 1; 96 if (!strncmp("force", str, 5)) 97 hpet_force_user = 1; 98 if (!strncmp("verbose", str, 7)) 99 hpet_verbose = 1; 100 } 101 return 1; 102 } 103 __setup("hpet=", hpet_setup); 104 105 static int __init disable_hpet(char *str) 106 { 107 boot_hpet_disable = 1; 108 return 1; 109 } 110 __setup("nohpet", disable_hpet); 111 112 static inline int is_hpet_capable(void) 113 { 114 return !boot_hpet_disable && hpet_address; 115 } 116 117 /* 118 * HPET timer interrupt enable / disable 119 */ 120 static int hpet_legacy_int_enabled; 121 122 /** 123 * is_hpet_enabled - check whether the hpet timer interrupt is enabled 124 */ 125 int is_hpet_enabled(void) 126 { 127 return is_hpet_capable() && hpet_legacy_int_enabled; 128 } 129 EXPORT_SYMBOL_GPL(is_hpet_enabled); 130 131 static void _hpet_print_config(const char *function, int line) 132 { 133 u32 i, timers, l, h; 134 printk(KERN_INFO "hpet: %s(%d):\n", function, line); 135 l = hpet_readl(HPET_ID); 136 h = hpet_readl(HPET_PERIOD); 137 timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 138 printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h); 139 l = hpet_readl(HPET_CFG); 140 h = hpet_readl(HPET_STATUS); 141 printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h); 142 l = hpet_readl(HPET_COUNTER); 143 h = hpet_readl(HPET_COUNTER+4); 144 printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h); 145 146 for (i = 0; i < timers; i++) { 147 l = hpet_readl(HPET_Tn_CFG(i)); 148 h = hpet_readl(HPET_Tn_CFG(i)+4); 149 printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", 150 i, l, h); 151 l = hpet_readl(HPET_Tn_CMP(i)); 152 h = hpet_readl(HPET_Tn_CMP(i)+4); 153 printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", 154 i, l, h); 155 l = hpet_readl(HPET_Tn_ROUTE(i)); 156 h = hpet_readl(HPET_Tn_ROUTE(i)+4); 157 printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", 158 i, l, h); 159 } 160 } 161 162 #define hpet_print_config() \ 163 do { \ 164 if (hpet_verbose) \ 165 _hpet_print_config(__FUNCTION__, __LINE__); \ 166 } while (0) 167 168 /* 169 * When the hpet driver (/dev/hpet) is enabled, we need to reserve 170 * timer 0 and timer 1 in case of RTC emulation. 171 */ 172 #ifdef CONFIG_HPET 173 174 static void hpet_reserve_msi_timers(struct hpet_data *hd); 175 176 static void hpet_reserve_platform_timers(unsigned int id) 177 { 178 struct hpet __iomem *hpet = hpet_virt_address; 179 struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; 180 unsigned int nrtimers, i; 181 struct hpet_data hd; 182 183 nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1; 184 185 memset(&hd, 0, sizeof(hd)); 186 hd.hd_phys_address = hpet_address; 187 hd.hd_address = hpet; 188 hd.hd_nirqs = nrtimers; 189 hpet_reserve_timer(&hd, 0); 190 191 #ifdef CONFIG_HPET_EMULATE_RTC 192 hpet_reserve_timer(&hd, 1); 193 #endif 194 195 /* 196 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254 197 * is wrong for i8259!) not the output IRQ. Many BIOS writers 198 * don't bother configuring *any* comparator interrupts. 199 */ 200 hd.hd_irq[0] = HPET_LEGACY_8254; 201 hd.hd_irq[1] = HPET_LEGACY_RTC; 202 203 for (i = 2; i < nrtimers; timer++, i++) { 204 hd.hd_irq[i] = (readl(&timer->hpet_config) & 205 Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; 206 } 207 208 hpet_reserve_msi_timers(&hd); 209 210 hpet_alloc(&hd); 211 212 } 213 #else 214 static void hpet_reserve_platform_timers(unsigned int id) { } 215 #endif 216 217 /* 218 * Common hpet info 219 */ 220 static unsigned long hpet_period; 221 222 static void hpet_legacy_set_mode(enum clock_event_mode mode, 223 struct clock_event_device *evt); 224 static int hpet_legacy_next_event(unsigned long delta, 225 struct clock_event_device *evt); 226 227 /* 228 * The hpet clock event device 229 */ 230 static struct clock_event_device hpet_clockevent = { 231 .name = "hpet", 232 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 233 .set_mode = hpet_legacy_set_mode, 234 .set_next_event = hpet_legacy_next_event, 235 .shift = 32, 236 .irq = 0, 237 .rating = 50, 238 }; 239 240 static void hpet_stop_counter(void) 241 { 242 unsigned long cfg = hpet_readl(HPET_CFG); 243 cfg &= ~HPET_CFG_ENABLE; 244 hpet_writel(cfg, HPET_CFG); 245 } 246 247 static void hpet_reset_counter(void) 248 { 249 hpet_writel(0, HPET_COUNTER); 250 hpet_writel(0, HPET_COUNTER + 4); 251 } 252 253 static void hpet_start_counter(void) 254 { 255 unsigned int cfg = hpet_readl(HPET_CFG); 256 cfg |= HPET_CFG_ENABLE; 257 hpet_writel(cfg, HPET_CFG); 258 } 259 260 static void hpet_restart_counter(void) 261 { 262 hpet_stop_counter(); 263 hpet_reset_counter(); 264 hpet_start_counter(); 265 } 266 267 static void hpet_resume_device(void) 268 { 269 force_hpet_resume(); 270 } 271 272 static void hpet_resume_counter(struct clocksource *cs) 273 { 274 hpet_resume_device(); 275 hpet_restart_counter(); 276 } 277 278 static void hpet_enable_legacy_int(void) 279 { 280 unsigned int cfg = hpet_readl(HPET_CFG); 281 282 cfg |= HPET_CFG_LEGACY; 283 hpet_writel(cfg, HPET_CFG); 284 hpet_legacy_int_enabled = 1; 285 } 286 287 static void hpet_legacy_clockevent_register(void) 288 { 289 /* Start HPET legacy interrupts */ 290 hpet_enable_legacy_int(); 291 292 /* 293 * The mult factor is defined as (include/linux/clockchips.h) 294 * mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h) 295 * hpet_period is in units of femtoseconds (per cycle), so 296 * mult/2^shift = cyc/ns = 10^6/hpet_period 297 * mult = (10^6 * 2^shift)/hpet_period 298 * mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period 299 */ 300 hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC, 301 hpet_period, hpet_clockevent.shift); 302 /* Calculate the min / max delta */ 303 hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, 304 &hpet_clockevent); 305 /* Setup minimum reprogramming delta. */ 306 hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, 307 &hpet_clockevent); 308 309 /* 310 * Start hpet with the boot cpu mask and make it 311 * global after the IO_APIC has been initialized. 312 */ 313 hpet_clockevent.cpumask = cpumask_of(smp_processor_id()); 314 clockevents_register_device(&hpet_clockevent); 315 global_clock_event = &hpet_clockevent; 316 printk(KERN_DEBUG "hpet clockevent registered\n"); 317 } 318 319 static int hpet_setup_msi_irq(unsigned int irq); 320 321 static void hpet_set_mode(enum clock_event_mode mode, 322 struct clock_event_device *evt, int timer) 323 { 324 unsigned int cfg, cmp, now; 325 uint64_t delta; 326 327 switch (mode) { 328 case CLOCK_EVT_MODE_PERIODIC: 329 hpet_stop_counter(); 330 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 331 delta >>= evt->shift; 332 now = hpet_readl(HPET_COUNTER); 333 cmp = now + (unsigned int) delta; 334 cfg = hpet_readl(HPET_Tn_CFG(timer)); 335 /* Make sure we use edge triggered interrupts */ 336 cfg &= ~HPET_TN_LEVEL; 337 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 338 HPET_TN_SETVAL | HPET_TN_32BIT; 339 hpet_writel(cfg, HPET_Tn_CFG(timer)); 340 hpet_writel(cmp, HPET_Tn_CMP(timer)); 341 udelay(1); 342 /* 343 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL 344 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL 345 * bit is automatically cleared after the first write. 346 * (See AMD-8111 HyperTransport I/O Hub Data Sheet, 347 * Publication # 24674) 348 */ 349 hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer)); 350 hpet_start_counter(); 351 hpet_print_config(); 352 break; 353 354 case CLOCK_EVT_MODE_ONESHOT: 355 cfg = hpet_readl(HPET_Tn_CFG(timer)); 356 cfg &= ~HPET_TN_PERIODIC; 357 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 358 hpet_writel(cfg, HPET_Tn_CFG(timer)); 359 break; 360 361 case CLOCK_EVT_MODE_UNUSED: 362 case CLOCK_EVT_MODE_SHUTDOWN: 363 cfg = hpet_readl(HPET_Tn_CFG(timer)); 364 cfg &= ~HPET_TN_ENABLE; 365 hpet_writel(cfg, HPET_Tn_CFG(timer)); 366 break; 367 368 case CLOCK_EVT_MODE_RESUME: 369 if (timer == 0) { 370 hpet_enable_legacy_int(); 371 } else { 372 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 373 hpet_setup_msi_irq(hdev->irq); 374 disable_irq(hdev->irq); 375 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 376 enable_irq(hdev->irq); 377 } 378 hpet_print_config(); 379 break; 380 } 381 } 382 383 static int hpet_next_event(unsigned long delta, 384 struct clock_event_device *evt, int timer) 385 { 386 u32 cnt; 387 s32 res; 388 389 cnt = hpet_readl(HPET_COUNTER); 390 cnt += (u32) delta; 391 hpet_writel(cnt, HPET_Tn_CMP(timer)); 392 393 /* 394 * HPETs are a complete disaster. The compare register is 395 * based on a equal comparison and neither provides a less 396 * than or equal functionality (which would require to take 397 * the wraparound into account) nor a simple count down event 398 * mode. Further the write to the comparator register is 399 * delayed internally up to two HPET clock cycles in certain 400 * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even 401 * longer delays. We worked around that by reading back the 402 * compare register, but that required another workaround for 403 * ICH9,10 chips where the first readout after write can 404 * return the old stale value. We already had a minimum 405 * programming delta of 5us enforced, but a NMI or SMI hitting 406 * between the counter readout and the comparator write can 407 * move us behind that point easily. Now instead of reading 408 * the compare register back several times, we make the ETIME 409 * decision based on the following: Return ETIME if the 410 * counter value after the write is less than HPET_MIN_CYCLES 411 * away from the event or if the counter is already ahead of 412 * the event. The minimum programming delta for the generic 413 * clockevents code is set to 1.5 * HPET_MIN_CYCLES. 414 */ 415 res = (s32)(cnt - hpet_readl(HPET_COUNTER)); 416 417 return res < HPET_MIN_CYCLES ? -ETIME : 0; 418 } 419 420 static void hpet_legacy_set_mode(enum clock_event_mode mode, 421 struct clock_event_device *evt) 422 { 423 hpet_set_mode(mode, evt, 0); 424 } 425 426 static int hpet_legacy_next_event(unsigned long delta, 427 struct clock_event_device *evt) 428 { 429 return hpet_next_event(delta, evt, 0); 430 } 431 432 /* 433 * HPET MSI Support 434 */ 435 #ifdef CONFIG_PCI_MSI 436 437 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev); 438 static struct hpet_dev *hpet_devs; 439 440 void hpet_msi_unmask(struct irq_data *data) 441 { 442 struct hpet_dev *hdev = data->handler_data; 443 unsigned int cfg; 444 445 /* unmask it */ 446 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); 447 cfg |= HPET_TN_FSB; 448 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 449 } 450 451 void hpet_msi_mask(struct irq_data *data) 452 { 453 struct hpet_dev *hdev = data->handler_data; 454 unsigned int cfg; 455 456 /* mask it */ 457 cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); 458 cfg &= ~HPET_TN_FSB; 459 hpet_writel(cfg, HPET_Tn_CFG(hdev->num)); 460 } 461 462 void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg) 463 { 464 hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num)); 465 hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4); 466 } 467 468 void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg) 469 { 470 msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num)); 471 msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4); 472 msg->address_hi = 0; 473 } 474 475 static void hpet_msi_set_mode(enum clock_event_mode mode, 476 struct clock_event_device *evt) 477 { 478 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 479 hpet_set_mode(mode, evt, hdev->num); 480 } 481 482 static int hpet_msi_next_event(unsigned long delta, 483 struct clock_event_device *evt) 484 { 485 struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt); 486 return hpet_next_event(delta, evt, hdev->num); 487 } 488 489 static int hpet_setup_msi_irq(unsigned int irq) 490 { 491 if (arch_setup_hpet_msi(irq, hpet_blockid)) { 492 destroy_irq(irq); 493 return -EINVAL; 494 } 495 return 0; 496 } 497 498 static int hpet_assign_irq(struct hpet_dev *dev) 499 { 500 unsigned int irq; 501 502 irq = create_irq_nr(0, -1); 503 if (!irq) 504 return -EINVAL; 505 506 irq_set_handler_data(irq, dev); 507 508 if (hpet_setup_msi_irq(irq)) 509 return -EINVAL; 510 511 dev->irq = irq; 512 return 0; 513 } 514 515 static irqreturn_t hpet_interrupt_handler(int irq, void *data) 516 { 517 struct hpet_dev *dev = (struct hpet_dev *)data; 518 struct clock_event_device *hevt = &dev->evt; 519 520 if (!hevt->event_handler) { 521 printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n", 522 dev->num); 523 return IRQ_HANDLED; 524 } 525 526 hevt->event_handler(hevt); 527 return IRQ_HANDLED; 528 } 529 530 static int hpet_setup_irq(struct hpet_dev *dev) 531 { 532 533 if (request_irq(dev->irq, hpet_interrupt_handler, 534 IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, 535 dev->name, dev)) 536 return -1; 537 538 disable_irq(dev->irq); 539 irq_set_affinity(dev->irq, cpumask_of(dev->cpu)); 540 enable_irq(dev->irq); 541 542 printk(KERN_DEBUG "hpet: %s irq %d for MSI\n", 543 dev->name, dev->irq); 544 545 return 0; 546 } 547 548 /* This should be called in specific @cpu */ 549 static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu) 550 { 551 struct clock_event_device *evt = &hdev->evt; 552 uint64_t hpet_freq; 553 554 WARN_ON(cpu != smp_processor_id()); 555 if (!(hdev->flags & HPET_DEV_VALID)) 556 return; 557 558 if (hpet_setup_msi_irq(hdev->irq)) 559 return; 560 561 hdev->cpu = cpu; 562 per_cpu(cpu_hpet_dev, cpu) = hdev; 563 evt->name = hdev->name; 564 hpet_setup_irq(hdev); 565 evt->irq = hdev->irq; 566 567 evt->rating = 110; 568 evt->features = CLOCK_EVT_FEAT_ONESHOT; 569 if (hdev->flags & HPET_DEV_PERI_CAP) 570 evt->features |= CLOCK_EVT_FEAT_PERIODIC; 571 572 evt->set_mode = hpet_msi_set_mode; 573 evt->set_next_event = hpet_msi_next_event; 574 evt->shift = 32; 575 576 /* 577 * The period is a femto seconds value. We need to calculate the 578 * scaled math multiplication factor for nanosecond to hpet tick 579 * conversion. 580 */ 581 hpet_freq = FSEC_PER_SEC; 582 do_div(hpet_freq, hpet_period); 583 evt->mult = div_sc((unsigned long) hpet_freq, 584 NSEC_PER_SEC, evt->shift); 585 /* Calculate the max delta */ 586 evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt); 587 /* 5 usec minimum reprogramming delta. */ 588 evt->min_delta_ns = 5000; 589 590 evt->cpumask = cpumask_of(hdev->cpu); 591 clockevents_register_device(evt); 592 } 593 594 #ifdef CONFIG_HPET 595 /* Reserve at least one timer for userspace (/dev/hpet) */ 596 #define RESERVE_TIMERS 1 597 #else 598 #define RESERVE_TIMERS 0 599 #endif 600 601 static void hpet_msi_capability_lookup(unsigned int start_timer) 602 { 603 unsigned int id; 604 unsigned int num_timers; 605 unsigned int num_timers_used = 0; 606 int i; 607 608 if (hpet_msi_disable) 609 return; 610 611 if (boot_cpu_has(X86_FEATURE_ARAT)) 612 return; 613 id = hpet_readl(HPET_ID); 614 615 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); 616 num_timers++; /* Value read out starts from 0 */ 617 hpet_print_config(); 618 619 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); 620 if (!hpet_devs) 621 return; 622 623 hpet_num_timers = num_timers; 624 625 for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { 626 struct hpet_dev *hdev = &hpet_devs[num_timers_used]; 627 unsigned int cfg = hpet_readl(HPET_Tn_CFG(i)); 628 629 /* Only consider HPET timer with MSI support */ 630 if (!(cfg & HPET_TN_FSB_CAP)) 631 continue; 632 633 hdev->flags = 0; 634 if (cfg & HPET_TN_PERIODIC_CAP) 635 hdev->flags |= HPET_DEV_PERI_CAP; 636 hdev->num = i; 637 638 sprintf(hdev->name, "hpet%d", i); 639 if (hpet_assign_irq(hdev)) 640 continue; 641 642 hdev->flags |= HPET_DEV_FSB_CAP; 643 hdev->flags |= HPET_DEV_VALID; 644 num_timers_used++; 645 if (num_timers_used == num_possible_cpus()) 646 break; 647 } 648 649 printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n", 650 num_timers, num_timers_used); 651 } 652 653 #ifdef CONFIG_HPET 654 static void hpet_reserve_msi_timers(struct hpet_data *hd) 655 { 656 int i; 657 658 if (!hpet_devs) 659 return; 660 661 for (i = 0; i < hpet_num_timers; i++) { 662 struct hpet_dev *hdev = &hpet_devs[i]; 663 664 if (!(hdev->flags & HPET_DEV_VALID)) 665 continue; 666 667 hd->hd_irq[hdev->num] = hdev->irq; 668 hpet_reserve_timer(hd, hdev->num); 669 } 670 } 671 #endif 672 673 static struct hpet_dev *hpet_get_unused_timer(void) 674 { 675 int i; 676 677 if (!hpet_devs) 678 return NULL; 679 680 for (i = 0; i < hpet_num_timers; i++) { 681 struct hpet_dev *hdev = &hpet_devs[i]; 682 683 if (!(hdev->flags & HPET_DEV_VALID)) 684 continue; 685 if (test_and_set_bit(HPET_DEV_USED_BIT, 686 (unsigned long *)&hdev->flags)) 687 continue; 688 return hdev; 689 } 690 return NULL; 691 } 692 693 struct hpet_work_struct { 694 struct delayed_work work; 695 struct completion complete; 696 }; 697 698 static void hpet_work(struct work_struct *w) 699 { 700 struct hpet_dev *hdev; 701 int cpu = smp_processor_id(); 702 struct hpet_work_struct *hpet_work; 703 704 hpet_work = container_of(w, struct hpet_work_struct, work.work); 705 706 hdev = hpet_get_unused_timer(); 707 if (hdev) 708 init_one_hpet_msi_clockevent(hdev, cpu); 709 710 complete(&hpet_work->complete); 711 } 712 713 static int hpet_cpuhp_notify(struct notifier_block *n, 714 unsigned long action, void *hcpu) 715 { 716 unsigned long cpu = (unsigned long)hcpu; 717 struct hpet_work_struct work; 718 struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); 719 720 switch (action & 0xf) { 721 case CPU_ONLINE: 722 INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); 723 init_completion(&work.complete); 724 /* FIXME: add schedule_work_on() */ 725 schedule_delayed_work_on(cpu, &work.work, 0); 726 wait_for_completion(&work.complete); 727 destroy_timer_on_stack(&work.work.timer); 728 break; 729 case CPU_DEAD: 730 if (hdev) { 731 free_irq(hdev->irq, hdev); 732 hdev->flags &= ~HPET_DEV_USED; 733 per_cpu(cpu_hpet_dev, cpu) = NULL; 734 } 735 break; 736 } 737 return NOTIFY_OK; 738 } 739 #else 740 741 static int hpet_setup_msi_irq(unsigned int irq) 742 { 743 return 0; 744 } 745 static void hpet_msi_capability_lookup(unsigned int start_timer) 746 { 747 return; 748 } 749 750 #ifdef CONFIG_HPET 751 static void hpet_reserve_msi_timers(struct hpet_data *hd) 752 { 753 return; 754 } 755 #endif 756 757 static int hpet_cpuhp_notify(struct notifier_block *n, 758 unsigned long action, void *hcpu) 759 { 760 return NOTIFY_OK; 761 } 762 763 #endif 764 765 /* 766 * Clock source related code 767 */ 768 static cycle_t read_hpet(struct clocksource *cs) 769 { 770 return (cycle_t)hpet_readl(HPET_COUNTER); 771 } 772 773 #ifdef CONFIG_X86_64 774 static cycle_t __vsyscall_fn vread_hpet(void) 775 { 776 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); 777 } 778 #endif 779 780 static struct clocksource clocksource_hpet = { 781 .name = "hpet", 782 .rating = 250, 783 .read = read_hpet, 784 .mask = HPET_MASK, 785 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 786 .resume = hpet_resume_counter, 787 #ifdef CONFIG_X86_64 788 .vread = vread_hpet, 789 #endif 790 }; 791 792 static int hpet_clocksource_register(void) 793 { 794 u64 start, now; 795 u64 hpet_freq; 796 cycle_t t1; 797 798 /* Start the counter */ 799 hpet_restart_counter(); 800 801 /* Verify whether hpet counter works */ 802 t1 = hpet_readl(HPET_COUNTER); 803 rdtscll(start); 804 805 /* 806 * We don't know the TSC frequency yet, but waiting for 807 * 200000 TSC cycles is safe: 808 * 4 GHz == 50us 809 * 1 GHz == 200us 810 */ 811 do { 812 rep_nop(); 813 rdtscll(now); 814 } while ((now - start) < 200000UL); 815 816 if (t1 == hpet_readl(HPET_COUNTER)) { 817 printk(KERN_WARNING 818 "HPET counter not counting. HPET disabled\n"); 819 return -ENODEV; 820 } 821 822 /* 823 * The definition of mult is (include/linux/clocksource.h) 824 * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc 825 * so we first need to convert hpet_period to ns/cyc units: 826 * mult/2^shift = ns/cyc = hpet_period/10^6 827 * mult = (hpet_period * 2^shift)/10^6 828 * mult = (hpet_period << shift)/FSEC_PER_NSEC 829 */ 830 831 /* Need to convert hpet_period (fsec/cyc) to cyc/sec: 832 * 833 * cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc) 834 * cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period 835 */ 836 hpet_freq = FSEC_PER_SEC; 837 do_div(hpet_freq, hpet_period); 838 clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq); 839 840 return 0; 841 } 842 843 /** 844 * hpet_enable - Try to setup the HPET timer. Returns 1 on success. 845 */ 846 int __init hpet_enable(void) 847 { 848 unsigned int id; 849 int i; 850 851 if (!is_hpet_capable()) 852 return 0; 853 854 hpet_set_mapping(); 855 856 /* 857 * Read the period and check for a sane value: 858 */ 859 hpet_period = hpet_readl(HPET_PERIOD); 860 861 /* 862 * AMD SB700 based systems with spread spectrum enabled use a 863 * SMM based HPET emulation to provide proper frequency 864 * setting. The SMM code is initialized with the first HPET 865 * register access and takes some time to complete. During 866 * this time the config register reads 0xffffffff. We check 867 * for max. 1000 loops whether the config register reads a non 868 * 0xffffffff value to make sure that HPET is up and running 869 * before we go further. A counting loop is safe, as the HPET 870 * access takes thousands of CPU cycles. On non SB700 based 871 * machines this check is only done once and has no side 872 * effects. 873 */ 874 for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) { 875 if (i == 1000) { 876 printk(KERN_WARNING 877 "HPET config register value = 0xFFFFFFFF. " 878 "Disabling HPET\n"); 879 goto out_nohpet; 880 } 881 } 882 883 if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD) 884 goto out_nohpet; 885 886 /* 887 * Read the HPET ID register to retrieve the IRQ routing 888 * information and the number of channels 889 */ 890 id = hpet_readl(HPET_ID); 891 hpet_print_config(); 892 893 #ifdef CONFIG_HPET_EMULATE_RTC 894 /* 895 * The legacy routing mode needs at least two channels, tick timer 896 * and the rtc emulation channel. 897 */ 898 if (!(id & HPET_ID_NUMBER)) 899 goto out_nohpet; 900 #endif 901 902 if (hpet_clocksource_register()) 903 goto out_nohpet; 904 905 if (id & HPET_ID_LEGSUP) { 906 hpet_legacy_clockevent_register(); 907 return 1; 908 } 909 return 0; 910 911 out_nohpet: 912 hpet_clear_mapping(); 913 hpet_address = 0; 914 return 0; 915 } 916 917 /* 918 * Needs to be late, as the reserve_timer code calls kalloc ! 919 * 920 * Not a problem on i386 as hpet_enable is called from late_time_init, 921 * but on x86_64 it is necessary ! 922 */ 923 static __init int hpet_late_init(void) 924 { 925 int cpu; 926 927 if (boot_hpet_disable) 928 return -ENODEV; 929 930 if (!hpet_address) { 931 if (!force_hpet_address) 932 return -ENODEV; 933 934 hpet_address = force_hpet_address; 935 hpet_enable(); 936 } 937 938 if (!hpet_virt_address) 939 return -ENODEV; 940 941 if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) 942 hpet_msi_capability_lookup(2); 943 else 944 hpet_msi_capability_lookup(0); 945 946 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 947 hpet_print_config(); 948 949 if (hpet_msi_disable) 950 return 0; 951 952 if (boot_cpu_has(X86_FEATURE_ARAT)) 953 return 0; 954 955 for_each_online_cpu(cpu) { 956 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); 957 } 958 959 /* This notifier should be called after workqueue is ready */ 960 hotcpu_notifier(hpet_cpuhp_notify, -20); 961 962 return 0; 963 } 964 fs_initcall(hpet_late_init); 965 966 void hpet_disable(void) 967 { 968 if (is_hpet_capable() && hpet_virt_address) { 969 unsigned int cfg = hpet_readl(HPET_CFG); 970 971 if (hpet_legacy_int_enabled) { 972 cfg &= ~HPET_CFG_LEGACY; 973 hpet_legacy_int_enabled = 0; 974 } 975 cfg &= ~HPET_CFG_ENABLE; 976 hpet_writel(cfg, HPET_CFG); 977 } 978 } 979 980 #ifdef CONFIG_HPET_EMULATE_RTC 981 982 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET 983 * is enabled, we support RTC interrupt functionality in software. 984 * RTC has 3 kinds of interrupts: 985 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock 986 * is updated 987 * 2) Alarm Interrupt - generate an interrupt at a specific time of day 988 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies 989 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2) 990 * (1) and (2) above are implemented using polling at a frequency of 991 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt 992 * overhead. (DEFAULT_RTC_INT_FREQ) 993 * For (3), we use interrupts at 64Hz or user specified periodic 994 * frequency, whichever is higher. 995 */ 996 #include <linux/mc146818rtc.h> 997 #include <linux/rtc.h> 998 #include <asm/rtc.h> 999 1000 #define DEFAULT_RTC_INT_FREQ 64 1001 #define DEFAULT_RTC_SHIFT 6 1002 #define RTC_NUM_INTS 1 1003 1004 static unsigned long hpet_rtc_flags; 1005 static int hpet_prev_update_sec; 1006 static struct rtc_time hpet_alarm_time; 1007 static unsigned long hpet_pie_count; 1008 static u32 hpet_t1_cmp; 1009 static u32 hpet_default_delta; 1010 static u32 hpet_pie_delta; 1011 static unsigned long hpet_pie_limit; 1012 1013 static rtc_irq_handler irq_handler; 1014 1015 /* 1016 * Check that the hpet counter c1 is ahead of the c2 1017 */ 1018 static inline int hpet_cnt_ahead(u32 c1, u32 c2) 1019 { 1020 return (s32)(c2 - c1) < 0; 1021 } 1022 1023 /* 1024 * Registers a IRQ handler. 1025 */ 1026 int hpet_register_irq_handler(rtc_irq_handler handler) 1027 { 1028 if (!is_hpet_enabled()) 1029 return -ENODEV; 1030 if (irq_handler) 1031 return -EBUSY; 1032 1033 irq_handler = handler; 1034 1035 return 0; 1036 } 1037 EXPORT_SYMBOL_GPL(hpet_register_irq_handler); 1038 1039 /* 1040 * Deregisters the IRQ handler registered with hpet_register_irq_handler() 1041 * and does cleanup. 1042 */ 1043 void hpet_unregister_irq_handler(rtc_irq_handler handler) 1044 { 1045 if (!is_hpet_enabled()) 1046 return; 1047 1048 irq_handler = NULL; 1049 hpet_rtc_flags = 0; 1050 } 1051 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); 1052 1053 /* 1054 * Timer 1 for RTC emulation. We use one shot mode, as periodic mode 1055 * is not supported by all HPET implementations for timer 1. 1056 * 1057 * hpet_rtc_timer_init() is called when the rtc is initialized. 1058 */ 1059 int hpet_rtc_timer_init(void) 1060 { 1061 unsigned int cfg, cnt, delta; 1062 unsigned long flags; 1063 1064 if (!is_hpet_enabled()) 1065 return 0; 1066 1067 if (!hpet_default_delta) { 1068 uint64_t clc; 1069 1070 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; 1071 clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; 1072 hpet_default_delta = clc; 1073 } 1074 1075 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1076 delta = hpet_default_delta; 1077 else 1078 delta = hpet_pie_delta; 1079 1080 local_irq_save(flags); 1081 1082 cnt = delta + hpet_readl(HPET_COUNTER); 1083 hpet_writel(cnt, HPET_T1_CMP); 1084 hpet_t1_cmp = cnt; 1085 1086 cfg = hpet_readl(HPET_T1_CFG); 1087 cfg &= ~HPET_TN_PERIODIC; 1088 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT; 1089 hpet_writel(cfg, HPET_T1_CFG); 1090 1091 local_irq_restore(flags); 1092 1093 return 1; 1094 } 1095 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); 1096 1097 /* 1098 * The functions below are called from rtc driver. 1099 * Return 0 if HPET is not being used. 1100 * Otherwise do the necessary changes and return 1. 1101 */ 1102 int hpet_mask_rtc_irq_bit(unsigned long bit_mask) 1103 { 1104 if (!is_hpet_enabled()) 1105 return 0; 1106 1107 hpet_rtc_flags &= ~bit_mask; 1108 return 1; 1109 } 1110 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit); 1111 1112 int hpet_set_rtc_irq_bit(unsigned long bit_mask) 1113 { 1114 unsigned long oldbits = hpet_rtc_flags; 1115 1116 if (!is_hpet_enabled()) 1117 return 0; 1118 1119 hpet_rtc_flags |= bit_mask; 1120 1121 if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE)) 1122 hpet_prev_update_sec = -1; 1123 1124 if (!oldbits) 1125 hpet_rtc_timer_init(); 1126 1127 return 1; 1128 } 1129 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit); 1130 1131 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, 1132 unsigned char sec) 1133 { 1134 if (!is_hpet_enabled()) 1135 return 0; 1136 1137 hpet_alarm_time.tm_hour = hrs; 1138 hpet_alarm_time.tm_min = min; 1139 hpet_alarm_time.tm_sec = sec; 1140 1141 return 1; 1142 } 1143 EXPORT_SYMBOL_GPL(hpet_set_alarm_time); 1144 1145 int hpet_set_periodic_freq(unsigned long freq) 1146 { 1147 uint64_t clc; 1148 1149 if (!is_hpet_enabled()) 1150 return 0; 1151 1152 if (freq <= DEFAULT_RTC_INT_FREQ) 1153 hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq; 1154 else { 1155 clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; 1156 do_div(clc, freq); 1157 clc >>= hpet_clockevent.shift; 1158 hpet_pie_delta = clc; 1159 hpet_pie_limit = 0; 1160 } 1161 return 1; 1162 } 1163 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq); 1164 1165 int hpet_rtc_dropped_irq(void) 1166 { 1167 return is_hpet_enabled(); 1168 } 1169 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); 1170 1171 static void hpet_rtc_timer_reinit(void) 1172 { 1173 unsigned int cfg, delta; 1174 int lost_ints = -1; 1175 1176 if (unlikely(!hpet_rtc_flags)) { 1177 cfg = hpet_readl(HPET_T1_CFG); 1178 cfg &= ~HPET_TN_ENABLE; 1179 hpet_writel(cfg, HPET_T1_CFG); 1180 return; 1181 } 1182 1183 if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) 1184 delta = hpet_default_delta; 1185 else 1186 delta = hpet_pie_delta; 1187 1188 /* 1189 * Increment the comparator value until we are ahead of the 1190 * current count. 1191 */ 1192 do { 1193 hpet_t1_cmp += delta; 1194 hpet_writel(hpet_t1_cmp, HPET_T1_CMP); 1195 lost_ints++; 1196 } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER))); 1197 1198 if (lost_ints) { 1199 if (hpet_rtc_flags & RTC_PIE) 1200 hpet_pie_count += lost_ints; 1201 if (printk_ratelimit()) 1202 printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n", 1203 lost_ints); 1204 } 1205 } 1206 1207 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) 1208 { 1209 struct rtc_time curr_time; 1210 unsigned long rtc_int_flag = 0; 1211 1212 hpet_rtc_timer_reinit(); 1213 memset(&curr_time, 0, sizeof(struct rtc_time)); 1214 1215 if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) 1216 get_rtc_time(&curr_time); 1217 1218 if (hpet_rtc_flags & RTC_UIE && 1219 curr_time.tm_sec != hpet_prev_update_sec) { 1220 if (hpet_prev_update_sec >= 0) 1221 rtc_int_flag = RTC_UF; 1222 hpet_prev_update_sec = curr_time.tm_sec; 1223 } 1224 1225 if (hpet_rtc_flags & RTC_PIE && 1226 ++hpet_pie_count >= hpet_pie_limit) { 1227 rtc_int_flag |= RTC_PF; 1228 hpet_pie_count = 0; 1229 } 1230 1231 if (hpet_rtc_flags & RTC_AIE && 1232 (curr_time.tm_sec == hpet_alarm_time.tm_sec) && 1233 (curr_time.tm_min == hpet_alarm_time.tm_min) && 1234 (curr_time.tm_hour == hpet_alarm_time.tm_hour)) 1235 rtc_int_flag |= RTC_AF; 1236 1237 if (rtc_int_flag) { 1238 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8)); 1239 if (irq_handler) 1240 irq_handler(rtc_int_flag, dev_id); 1241 } 1242 return IRQ_HANDLED; 1243 } 1244 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt); 1245 #endif 1246