1 /* 2 * Intel & MS High Precision Event Timer Implementation. 3 * 4 * Copyright (C) 2003 Intel Corporation 5 * Venki Pallipadi 6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. 7 * Bob Picco <robert.picco@hp.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/miscdevice.h> 18 #include <linux/major.h> 19 #include <linux/ioport.h> 20 #include <linux/fcntl.h> 21 #include <linux/init.h> 22 #include <linux/poll.h> 23 #include <linux/mm.h> 24 #include <linux/proc_fs.h> 25 #include <linux/spinlock.h> 26 #include <linux/sysctl.h> 27 #include <linux/wait.h> 28 #include <linux/bcd.h> 29 #include <linux/seq_file.h> 30 #include <linux/bitops.h> 31 #include <linux/compat.h> 32 #include <linux/clocksource.h> 33 #include <linux/uaccess.h> 34 #include <linux/slab.h> 35 #include <linux/io.h> 36 #include <linux/acpi.h> 37 #include <linux/hpet.h> 38 #include <asm/current.h> 39 #include <asm/irq.h> 40 #include <asm/div64.h> 41 42 /* 43 * The High Precision Event Timer driver. 44 * This driver is closely modelled after the rtc.c driver. 45 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf 46 */ 47 #define HPET_USER_FREQ (64) 48 #define HPET_DRIFT (500) 49 50 #define HPET_RANGE_SIZE 1024 /* from HPET spec */ 51 52 53 /* WARNING -- don't get confused. These macros are never used 54 * to write the (single) counter, and rarely to read it. 55 * They're badly named; to fix, someday. 56 */ 57 #if BITS_PER_LONG == 64 58 #define write_counter(V, MC) writeq(V, MC) 59 #define read_counter(MC) readq(MC) 60 #else 61 #define write_counter(V, MC) writel(V, MC) 62 #define read_counter(MC) readl(MC) 63 #endif 64 65 static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ 66 static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 67 68 /* This clocksource driver currently only works on ia64 */ 69 #ifdef CONFIG_IA64 70 static void __iomem *hpet_mctr; 71 72 static cycle_t read_hpet(struct clocksource *cs) 73 { 74 return (cycle_t)read_counter((void __iomem *)hpet_mctr); 75 } 76 77 static struct clocksource clocksource_hpet = { 78 .name = "hpet", 79 .rating = 250, 80 .read = read_hpet, 81 .mask = CLOCKSOURCE_MASK(64), 82 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 83 }; 84 static struct clocksource *hpet_clocksource; 85 #endif 86 87 /* A lock for concurrent access by app and isr hpet activity. */ 88 static DEFINE_SPINLOCK(hpet_lock); 89 90 #define HPET_DEV_NAME (7) 91 92 struct hpet_dev { 93 struct hpets *hd_hpets; 94 struct hpet __iomem *hd_hpet; 95 struct hpet_timer __iomem *hd_timer; 96 unsigned long hd_ireqfreq; 97 unsigned long hd_irqdata; 98 wait_queue_head_t hd_waitqueue; 99 struct fasync_struct *hd_async_queue; 100 unsigned int hd_flags; 101 unsigned int hd_irq; 102 unsigned int hd_hdwirq; 103 char hd_name[HPET_DEV_NAME]; 104 }; 105 106 struct hpets { 107 struct hpets *hp_next; 108 struct hpet __iomem *hp_hpet; 109 unsigned long hp_hpet_phys; 110 struct clocksource *hp_clocksource; 111 unsigned long long hp_tick_freq; 112 unsigned long hp_delta; 113 unsigned int hp_ntimer; 114 unsigned int hp_which; 115 struct hpet_dev hp_dev[1]; 116 }; 117 118 static struct hpets *hpets; 119 120 #define HPET_OPEN 0x0001 121 #define HPET_IE 0x0002 /* interrupt enabled */ 122 #define HPET_PERIODIC 0x0004 123 #define HPET_SHARED_IRQ 0x0008 124 125 126 #ifndef readq 127 static inline unsigned long long readq(void __iomem *addr) 128 { 129 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); 130 } 131 #endif 132 133 #ifndef writeq 134 static inline void writeq(unsigned long long v, void __iomem *addr) 135 { 136 writel(v & 0xffffffff, addr); 137 writel(v >> 32, addr + 4); 138 } 139 #endif 140 141 static irqreturn_t hpet_interrupt(int irq, void *data) 142 { 143 struct hpet_dev *devp; 144 unsigned long isr; 145 146 devp = data; 147 isr = 1 << (devp - devp->hd_hpets->hp_dev); 148 149 if ((devp->hd_flags & HPET_SHARED_IRQ) && 150 !(isr & readl(&devp->hd_hpet->hpet_isr))) 151 return IRQ_NONE; 152 153 spin_lock(&hpet_lock); 154 devp->hd_irqdata++; 155 156 /* 157 * For non-periodic timers, increment the accumulator. 158 * This has the effect of treating non-periodic like periodic. 159 */ 160 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 161 unsigned long m, t, mc, base, k; 162 struct hpet __iomem *hpet = devp->hd_hpet; 163 struct hpets *hpetp = devp->hd_hpets; 164 165 t = devp->hd_ireqfreq; 166 m = read_counter(&devp->hd_timer->hpet_compare); 167 mc = read_counter(&hpet->hpet_mc); 168 /* The time for the next interrupt would logically be t + m, 169 * however, if we are very unlucky and the interrupt is delayed 170 * for longer than t then we will completely miss the next 171 * interrupt if we set t + m and an application will hang. 172 * Therefore we need to make a more complex computation assuming 173 * that there exists a k for which the following is true: 174 * k * t + base < mc + delta 175 * (k + 1) * t + base > mc + delta 176 * where t is the interval in hpet ticks for the given freq, 177 * base is the theoretical start value 0 < base < t, 178 * mc is the main counter value at the time of the interrupt, 179 * delta is the time it takes to write the a value to the 180 * comparator. 181 * k may then be computed as (mc - base + delta) / t . 182 */ 183 base = mc % t; 184 k = (mc - base + hpetp->hp_delta) / t; 185 write_counter(t * (k + 1) + base, 186 &devp->hd_timer->hpet_compare); 187 } 188 189 if (devp->hd_flags & HPET_SHARED_IRQ) 190 writel(isr, &devp->hd_hpet->hpet_isr); 191 spin_unlock(&hpet_lock); 192 193 wake_up_interruptible(&devp->hd_waitqueue); 194 195 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); 196 197 return IRQ_HANDLED; 198 } 199 200 static void hpet_timer_set_irq(struct hpet_dev *devp) 201 { 202 unsigned long v; 203 int irq, gsi; 204 struct hpet_timer __iomem *timer; 205 206 spin_lock_irq(&hpet_lock); 207 if (devp->hd_hdwirq) { 208 spin_unlock_irq(&hpet_lock); 209 return; 210 } 211 212 timer = devp->hd_timer; 213 214 /* we prefer level triggered mode */ 215 v = readl(&timer->hpet_config); 216 if (!(v & Tn_INT_TYPE_CNF_MASK)) { 217 v |= Tn_INT_TYPE_CNF_MASK; 218 writel(v, &timer->hpet_config); 219 } 220 spin_unlock_irq(&hpet_lock); 221 222 v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >> 223 Tn_INT_ROUTE_CAP_SHIFT; 224 225 /* 226 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by 227 * legacy device. In IO APIC mode, we skip all the legacy IRQS. 228 */ 229 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) 230 v &= ~0xf3df; 231 else 232 v &= ~0xffff; 233 234 for_each_set_bit(irq, &v, HPET_MAX_IRQ) { 235 if (irq >= nr_irqs) { 236 irq = HPET_MAX_IRQ; 237 break; 238 } 239 240 gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, 241 ACPI_ACTIVE_LOW); 242 if (gsi > 0) 243 break; 244 245 /* FIXME: Setup interrupt source table */ 246 } 247 248 if (irq < HPET_MAX_IRQ) { 249 spin_lock_irq(&hpet_lock); 250 v = readl(&timer->hpet_config); 251 v |= irq << Tn_INT_ROUTE_CNF_SHIFT; 252 writel(v, &timer->hpet_config); 253 devp->hd_hdwirq = gsi; 254 spin_unlock_irq(&hpet_lock); 255 } 256 return; 257 } 258 259 static int hpet_open(struct inode *inode, struct file *file) 260 { 261 struct hpet_dev *devp; 262 struct hpets *hpetp; 263 int i; 264 265 if (file->f_mode & FMODE_WRITE) 266 return -EINVAL; 267 268 mutex_lock(&hpet_mutex); 269 spin_lock_irq(&hpet_lock); 270 271 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 272 for (i = 0; i < hpetp->hp_ntimer; i++) 273 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) 274 continue; 275 else { 276 devp = &hpetp->hp_dev[i]; 277 break; 278 } 279 280 if (!devp) { 281 spin_unlock_irq(&hpet_lock); 282 mutex_unlock(&hpet_mutex); 283 return -EBUSY; 284 } 285 286 file->private_data = devp; 287 devp->hd_irqdata = 0; 288 devp->hd_flags |= HPET_OPEN; 289 spin_unlock_irq(&hpet_lock); 290 mutex_unlock(&hpet_mutex); 291 292 hpet_timer_set_irq(devp); 293 294 return 0; 295 } 296 297 static ssize_t 298 hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) 299 { 300 DECLARE_WAITQUEUE(wait, current); 301 unsigned long data; 302 ssize_t retval; 303 struct hpet_dev *devp; 304 305 devp = file->private_data; 306 if (!devp->hd_ireqfreq) 307 return -EIO; 308 309 if (count < sizeof(unsigned long)) 310 return -EINVAL; 311 312 add_wait_queue(&devp->hd_waitqueue, &wait); 313 314 for ( ; ; ) { 315 set_current_state(TASK_INTERRUPTIBLE); 316 317 spin_lock_irq(&hpet_lock); 318 data = devp->hd_irqdata; 319 devp->hd_irqdata = 0; 320 spin_unlock_irq(&hpet_lock); 321 322 if (data) 323 break; 324 else if (file->f_flags & O_NONBLOCK) { 325 retval = -EAGAIN; 326 goto out; 327 } else if (signal_pending(current)) { 328 retval = -ERESTARTSYS; 329 goto out; 330 } 331 schedule(); 332 } 333 334 retval = put_user(data, (unsigned long __user *)buf); 335 if (!retval) 336 retval = sizeof(unsigned long); 337 out: 338 __set_current_state(TASK_RUNNING); 339 remove_wait_queue(&devp->hd_waitqueue, &wait); 340 341 return retval; 342 } 343 344 static unsigned int hpet_poll(struct file *file, poll_table * wait) 345 { 346 unsigned long v; 347 struct hpet_dev *devp; 348 349 devp = file->private_data; 350 351 if (!devp->hd_ireqfreq) 352 return 0; 353 354 poll_wait(file, &devp->hd_waitqueue, wait); 355 356 spin_lock_irq(&hpet_lock); 357 v = devp->hd_irqdata; 358 spin_unlock_irq(&hpet_lock); 359 360 if (v != 0) 361 return POLLIN | POLLRDNORM; 362 363 return 0; 364 } 365 366 #ifdef CONFIG_HPET_MMAP 367 #ifdef CONFIG_HPET_MMAP_DEFAULT 368 static int hpet_mmap_enabled = 1; 369 #else 370 static int hpet_mmap_enabled = 0; 371 #endif 372 373 static __init int hpet_mmap_enable(char *str) 374 { 375 get_option(&str, &hpet_mmap_enabled); 376 pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled"); 377 return 1; 378 } 379 __setup("hpet_mmap", hpet_mmap_enable); 380 381 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 382 { 383 struct hpet_dev *devp; 384 unsigned long addr; 385 386 if (!hpet_mmap_enabled) 387 return -EACCES; 388 389 devp = file->private_data; 390 addr = devp->hd_hpets->hp_hpet_phys; 391 392 if (addr & (PAGE_SIZE - 1)) 393 return -ENOSYS; 394 395 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 396 return vm_iomap_memory(vma, addr, PAGE_SIZE); 397 } 398 #else 399 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 400 { 401 return -ENOSYS; 402 } 403 #endif 404 405 static int hpet_fasync(int fd, struct file *file, int on) 406 { 407 struct hpet_dev *devp; 408 409 devp = file->private_data; 410 411 if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) 412 return 0; 413 else 414 return -EIO; 415 } 416 417 static int hpet_release(struct inode *inode, struct file *file) 418 { 419 struct hpet_dev *devp; 420 struct hpet_timer __iomem *timer; 421 int irq = 0; 422 423 devp = file->private_data; 424 timer = devp->hd_timer; 425 426 spin_lock_irq(&hpet_lock); 427 428 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 429 &timer->hpet_config); 430 431 irq = devp->hd_irq; 432 devp->hd_irq = 0; 433 434 devp->hd_ireqfreq = 0; 435 436 if (devp->hd_flags & HPET_PERIODIC 437 && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 438 unsigned long v; 439 440 v = readq(&timer->hpet_config); 441 v ^= Tn_TYPE_CNF_MASK; 442 writeq(v, &timer->hpet_config); 443 } 444 445 devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); 446 spin_unlock_irq(&hpet_lock); 447 448 if (irq) 449 free_irq(irq, devp); 450 451 file->private_data = NULL; 452 return 0; 453 } 454 455 static int hpet_ioctl_ieon(struct hpet_dev *devp) 456 { 457 struct hpet_timer __iomem *timer; 458 struct hpet __iomem *hpet; 459 struct hpets *hpetp; 460 int irq; 461 unsigned long g, v, t, m; 462 unsigned long flags, isr; 463 464 timer = devp->hd_timer; 465 hpet = devp->hd_hpet; 466 hpetp = devp->hd_hpets; 467 468 if (!devp->hd_ireqfreq) 469 return -EIO; 470 471 spin_lock_irq(&hpet_lock); 472 473 if (devp->hd_flags & HPET_IE) { 474 spin_unlock_irq(&hpet_lock); 475 return -EBUSY; 476 } 477 478 devp->hd_flags |= HPET_IE; 479 480 if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK) 481 devp->hd_flags |= HPET_SHARED_IRQ; 482 spin_unlock_irq(&hpet_lock); 483 484 irq = devp->hd_hdwirq; 485 486 if (irq) { 487 unsigned long irq_flags; 488 489 if (devp->hd_flags & HPET_SHARED_IRQ) { 490 /* 491 * To prevent the interrupt handler from seeing an 492 * unwanted interrupt status bit, program the timer 493 * so that it will not fire in the near future ... 494 */ 495 writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK, 496 &timer->hpet_config); 497 write_counter(read_counter(&hpet->hpet_mc), 498 &timer->hpet_compare); 499 /* ... and clear any left-over status. */ 500 isr = 1 << (devp - devp->hd_hpets->hp_dev); 501 writel(isr, &hpet->hpet_isr); 502 } 503 504 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); 505 irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0; 506 if (request_irq(irq, hpet_interrupt, irq_flags, 507 devp->hd_name, (void *)devp)) { 508 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); 509 irq = 0; 510 } 511 } 512 513 if (irq == 0) { 514 spin_lock_irq(&hpet_lock); 515 devp->hd_flags ^= HPET_IE; 516 spin_unlock_irq(&hpet_lock); 517 return -EIO; 518 } 519 520 devp->hd_irq = irq; 521 t = devp->hd_ireqfreq; 522 v = readq(&timer->hpet_config); 523 524 /* 64-bit comparators are not yet supported through the ioctls, 525 * so force this into 32-bit mode if it supports both modes 526 */ 527 g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; 528 529 if (devp->hd_flags & HPET_PERIODIC) { 530 g |= Tn_TYPE_CNF_MASK; 531 v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK; 532 writeq(v, &timer->hpet_config); 533 local_irq_save(flags); 534 535 /* 536 * NOTE: First we modify the hidden accumulator 537 * register supported by periodic-capable comparators. 538 * We never want to modify the (single) counter; that 539 * would affect all the comparators. The value written 540 * is the counter value when the first interrupt is due. 541 */ 542 m = read_counter(&hpet->hpet_mc); 543 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 544 /* 545 * Then we modify the comparator, indicating the period 546 * for subsequent interrupt. 547 */ 548 write_counter(t, &timer->hpet_compare); 549 } else { 550 local_irq_save(flags); 551 m = read_counter(&hpet->hpet_mc); 552 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 553 } 554 555 if (devp->hd_flags & HPET_SHARED_IRQ) { 556 isr = 1 << (devp - devp->hd_hpets->hp_dev); 557 writel(isr, &hpet->hpet_isr); 558 } 559 writeq(g, &timer->hpet_config); 560 local_irq_restore(flags); 561 562 return 0; 563 } 564 565 /* converts Hz to number of timer ticks */ 566 static inline unsigned long hpet_time_div(struct hpets *hpets, 567 unsigned long dis) 568 { 569 unsigned long long m; 570 571 m = hpets->hp_tick_freq + (dis >> 1); 572 do_div(m, dis); 573 return (unsigned long)m; 574 } 575 576 static int 577 hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, 578 struct hpet_info *info) 579 { 580 struct hpet_timer __iomem *timer; 581 struct hpet __iomem *hpet; 582 struct hpets *hpetp; 583 int err; 584 unsigned long v; 585 586 switch (cmd) { 587 case HPET_IE_OFF: 588 case HPET_INFO: 589 case HPET_EPI: 590 case HPET_DPI: 591 case HPET_IRQFREQ: 592 timer = devp->hd_timer; 593 hpet = devp->hd_hpet; 594 hpetp = devp->hd_hpets; 595 break; 596 case HPET_IE_ON: 597 return hpet_ioctl_ieon(devp); 598 default: 599 return -EINVAL; 600 } 601 602 err = 0; 603 604 switch (cmd) { 605 case HPET_IE_OFF: 606 if ((devp->hd_flags & HPET_IE) == 0) 607 break; 608 v = readq(&timer->hpet_config); 609 v &= ~Tn_INT_ENB_CNF_MASK; 610 writeq(v, &timer->hpet_config); 611 if (devp->hd_irq) { 612 free_irq(devp->hd_irq, devp); 613 devp->hd_irq = 0; 614 } 615 devp->hd_flags ^= HPET_IE; 616 break; 617 case HPET_INFO: 618 { 619 memset(info, 0, sizeof(*info)); 620 if (devp->hd_ireqfreq) 621 info->hi_ireqfreq = 622 hpet_time_div(hpetp, devp->hd_ireqfreq); 623 info->hi_flags = 624 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; 625 info->hi_hpet = hpetp->hp_which; 626 info->hi_timer = devp - hpetp->hp_dev; 627 break; 628 } 629 case HPET_EPI: 630 v = readq(&timer->hpet_config); 631 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 632 err = -ENXIO; 633 break; 634 } 635 devp->hd_flags |= HPET_PERIODIC; 636 break; 637 case HPET_DPI: 638 v = readq(&timer->hpet_config); 639 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 640 err = -ENXIO; 641 break; 642 } 643 if (devp->hd_flags & HPET_PERIODIC && 644 readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 645 v = readq(&timer->hpet_config); 646 v ^= Tn_TYPE_CNF_MASK; 647 writeq(v, &timer->hpet_config); 648 } 649 devp->hd_flags &= ~HPET_PERIODIC; 650 break; 651 case HPET_IRQFREQ: 652 if ((arg > hpet_max_freq) && 653 !capable(CAP_SYS_RESOURCE)) { 654 err = -EACCES; 655 break; 656 } 657 658 if (!arg) { 659 err = -EINVAL; 660 break; 661 } 662 663 devp->hd_ireqfreq = hpet_time_div(hpetp, arg); 664 } 665 666 return err; 667 } 668 669 static long 670 hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 671 { 672 struct hpet_info info; 673 int err; 674 675 mutex_lock(&hpet_mutex); 676 err = hpet_ioctl_common(file->private_data, cmd, arg, &info); 677 mutex_unlock(&hpet_mutex); 678 679 if ((cmd == HPET_INFO) && !err && 680 (copy_to_user((void __user *)arg, &info, sizeof(info)))) 681 err = -EFAULT; 682 683 return err; 684 } 685 686 #ifdef CONFIG_COMPAT 687 struct compat_hpet_info { 688 compat_ulong_t hi_ireqfreq; /* Hz */ 689 compat_ulong_t hi_flags; /* information */ 690 unsigned short hi_hpet; 691 unsigned short hi_timer; 692 }; 693 694 static long 695 hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 696 { 697 struct hpet_info info; 698 int err; 699 700 mutex_lock(&hpet_mutex); 701 err = hpet_ioctl_common(file->private_data, cmd, arg, &info); 702 mutex_unlock(&hpet_mutex); 703 704 if ((cmd == HPET_INFO) && !err) { 705 struct compat_hpet_info __user *u = compat_ptr(arg); 706 if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) || 707 put_user(info.hi_flags, &u->hi_flags) || 708 put_user(info.hi_hpet, &u->hi_hpet) || 709 put_user(info.hi_timer, &u->hi_timer)) 710 err = -EFAULT; 711 } 712 713 return err; 714 } 715 #endif 716 717 static const struct file_operations hpet_fops = { 718 .owner = THIS_MODULE, 719 .llseek = no_llseek, 720 .read = hpet_read, 721 .poll = hpet_poll, 722 .unlocked_ioctl = hpet_ioctl, 723 #ifdef CONFIG_COMPAT 724 .compat_ioctl = hpet_compat_ioctl, 725 #endif 726 .open = hpet_open, 727 .release = hpet_release, 728 .fasync = hpet_fasync, 729 .mmap = hpet_mmap, 730 }; 731 732 static int hpet_is_known(struct hpet_data *hdp) 733 { 734 struct hpets *hpetp; 735 736 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 737 if (hpetp->hp_hpet_phys == hdp->hd_phys_address) 738 return 1; 739 740 return 0; 741 } 742 743 static struct ctl_table hpet_table[] = { 744 { 745 .procname = "max-user-freq", 746 .data = &hpet_max_freq, 747 .maxlen = sizeof(int), 748 .mode = 0644, 749 .proc_handler = proc_dointvec, 750 }, 751 {} 752 }; 753 754 static struct ctl_table hpet_root[] = { 755 { 756 .procname = "hpet", 757 .maxlen = 0, 758 .mode = 0555, 759 .child = hpet_table, 760 }, 761 {} 762 }; 763 764 static struct ctl_table dev_root[] = { 765 { 766 .procname = "dev", 767 .maxlen = 0, 768 .mode = 0555, 769 .child = hpet_root, 770 }, 771 {} 772 }; 773 774 static struct ctl_table_header *sysctl_header; 775 776 /* 777 * Adjustment for when arming the timer with 778 * initial conditions. That is, main counter 779 * ticks expired before interrupts are enabled. 780 */ 781 #define TICK_CALIBRATE (1000UL) 782 783 static unsigned long __hpet_calibrate(struct hpets *hpetp) 784 { 785 struct hpet_timer __iomem *timer = NULL; 786 unsigned long t, m, count, i, flags, start; 787 struct hpet_dev *devp; 788 int j; 789 struct hpet __iomem *hpet; 790 791 for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) 792 if ((devp->hd_flags & HPET_OPEN) == 0) { 793 timer = devp->hd_timer; 794 break; 795 } 796 797 if (!timer) 798 return 0; 799 800 hpet = hpetp->hp_hpet; 801 t = read_counter(&timer->hpet_compare); 802 803 i = 0; 804 count = hpet_time_div(hpetp, TICK_CALIBRATE); 805 806 local_irq_save(flags); 807 808 start = read_counter(&hpet->hpet_mc); 809 810 do { 811 m = read_counter(&hpet->hpet_mc); 812 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 813 } while (i++, (m - start) < count); 814 815 local_irq_restore(flags); 816 817 return (m - start) / i; 818 } 819 820 static unsigned long hpet_calibrate(struct hpets *hpetp) 821 { 822 unsigned long ret = ~0UL; 823 unsigned long tmp; 824 825 /* 826 * Try to calibrate until return value becomes stable small value. 827 * If SMI interruption occurs in calibration loop, the return value 828 * will be big. This avoids its impact. 829 */ 830 for ( ; ; ) { 831 tmp = __hpet_calibrate(hpetp); 832 if (ret <= tmp) 833 break; 834 ret = tmp; 835 } 836 837 return ret; 838 } 839 840 int hpet_alloc(struct hpet_data *hdp) 841 { 842 u64 cap, mcfg; 843 struct hpet_dev *devp; 844 u32 i, ntimer; 845 struct hpets *hpetp; 846 size_t siz; 847 struct hpet __iomem *hpet; 848 static struct hpets *last; 849 unsigned long period; 850 unsigned long long temp; 851 u32 remainder; 852 853 /* 854 * hpet_alloc can be called by platform dependent code. 855 * If platform dependent code has allocated the hpet that 856 * ACPI has also reported, then we catch it here. 857 */ 858 if (hpet_is_known(hdp)) { 859 printk(KERN_DEBUG "%s: duplicate HPET ignored\n", 860 __func__); 861 return 0; 862 } 863 864 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * 865 sizeof(struct hpet_dev)); 866 867 hpetp = kzalloc(siz, GFP_KERNEL); 868 869 if (!hpetp) 870 return -ENOMEM; 871 872 hpetp->hp_which = hpet_nhpet++; 873 hpetp->hp_hpet = hdp->hd_address; 874 hpetp->hp_hpet_phys = hdp->hd_phys_address; 875 876 hpetp->hp_ntimer = hdp->hd_nirqs; 877 878 for (i = 0; i < hdp->hd_nirqs; i++) 879 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 880 881 hpet = hpetp->hp_hpet; 882 883 cap = readq(&hpet->hpet_cap); 884 885 ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; 886 887 if (hpetp->hp_ntimer != ntimer) { 888 printk(KERN_WARNING "hpet: number irqs doesn't agree" 889 " with number of timers\n"); 890 kfree(hpetp); 891 return -ENODEV; 892 } 893 894 if (last) 895 last->hp_next = hpetp; 896 else 897 hpets = hpetp; 898 899 last = hpetp; 900 901 period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> 902 HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */ 903 temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */ 904 temp += period >> 1; /* round */ 905 do_div(temp, period); 906 hpetp->hp_tick_freq = temp; /* ticks per second */ 907 908 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", 909 hpetp->hp_which, hdp->hd_phys_address, 910 hpetp->hp_ntimer > 1 ? "s" : ""); 911 for (i = 0; i < hpetp->hp_ntimer; i++) 912 printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 913 printk(KERN_CONT "\n"); 914 915 temp = hpetp->hp_tick_freq; 916 remainder = do_div(temp, 1000000); 917 printk(KERN_INFO 918 "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n", 919 hpetp->hp_which, hpetp->hp_ntimer, 920 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, 921 (unsigned) temp, remainder); 922 923 mcfg = readq(&hpet->hpet_config); 924 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 925 write_counter(0L, &hpet->hpet_mc); 926 mcfg |= HPET_ENABLE_CNF_MASK; 927 writeq(mcfg, &hpet->hpet_config); 928 } 929 930 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) { 931 struct hpet_timer __iomem *timer; 932 933 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 934 935 devp->hd_hpets = hpetp; 936 devp->hd_hpet = hpet; 937 devp->hd_timer = timer; 938 939 /* 940 * If the timer was reserved by platform code, 941 * then make timer unavailable for opens. 942 */ 943 if (hdp->hd_state & (1 << i)) { 944 devp->hd_flags = HPET_OPEN; 945 continue; 946 } 947 948 init_waitqueue_head(&devp->hd_waitqueue); 949 } 950 951 hpetp->hp_delta = hpet_calibrate(hpetp); 952 953 /* This clocksource driver currently only works on ia64 */ 954 #ifdef CONFIG_IA64 955 if (!hpet_clocksource) { 956 hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; 957 clocksource_hpet.archdata.fsys_mmio = hpet_mctr; 958 clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq); 959 hpetp->hp_clocksource = &clocksource_hpet; 960 hpet_clocksource = &clocksource_hpet; 961 } 962 #endif 963 964 return 0; 965 } 966 967 static acpi_status hpet_resources(struct acpi_resource *res, void *data) 968 { 969 struct hpet_data *hdp; 970 acpi_status status; 971 struct acpi_resource_address64 addr; 972 973 hdp = data; 974 975 status = acpi_resource_to_address64(res, &addr); 976 977 if (ACPI_SUCCESS(status)) { 978 hdp->hd_phys_address = addr.address.minimum; 979 hdp->hd_address = ioremap(addr.address.minimum, addr.address.address_length); 980 981 if (hpet_is_known(hdp)) { 982 iounmap(hdp->hd_address); 983 return AE_ALREADY_EXISTS; 984 } 985 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { 986 struct acpi_resource_fixed_memory32 *fixmem32; 987 988 fixmem32 = &res->data.fixed_memory32; 989 990 hdp->hd_phys_address = fixmem32->address; 991 hdp->hd_address = ioremap(fixmem32->address, 992 HPET_RANGE_SIZE); 993 994 if (hpet_is_known(hdp)) { 995 iounmap(hdp->hd_address); 996 return AE_ALREADY_EXISTS; 997 } 998 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { 999 struct acpi_resource_extended_irq *irqp; 1000 int i, irq; 1001 1002 irqp = &res->data.extended_irq; 1003 1004 for (i = 0; i < irqp->interrupt_count; i++) { 1005 if (hdp->hd_nirqs >= HPET_MAX_TIMERS) 1006 break; 1007 1008 irq = acpi_register_gsi(NULL, irqp->interrupts[i], 1009 irqp->triggering, irqp->polarity); 1010 if (irq < 0) 1011 return AE_ERROR; 1012 1013 hdp->hd_irq[hdp->hd_nirqs] = irq; 1014 hdp->hd_nirqs++; 1015 } 1016 } 1017 1018 return AE_OK; 1019 } 1020 1021 static int hpet_acpi_add(struct acpi_device *device) 1022 { 1023 acpi_status result; 1024 struct hpet_data data; 1025 1026 memset(&data, 0, sizeof(data)); 1027 1028 result = 1029 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 1030 hpet_resources, &data); 1031 1032 if (ACPI_FAILURE(result)) 1033 return -ENODEV; 1034 1035 if (!data.hd_address || !data.hd_nirqs) { 1036 if (data.hd_address) 1037 iounmap(data.hd_address); 1038 printk("%s: no address or irqs in _CRS\n", __func__); 1039 return -ENODEV; 1040 } 1041 1042 return hpet_alloc(&data); 1043 } 1044 1045 static const struct acpi_device_id hpet_device_ids[] = { 1046 {"PNP0103", 0}, 1047 {"", 0}, 1048 }; 1049 1050 static struct acpi_driver hpet_acpi_driver = { 1051 .name = "hpet", 1052 .ids = hpet_device_ids, 1053 .ops = { 1054 .add = hpet_acpi_add, 1055 }, 1056 }; 1057 1058 static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; 1059 1060 static int __init hpet_init(void) 1061 { 1062 int result; 1063 1064 result = misc_register(&hpet_misc); 1065 if (result < 0) 1066 return -ENODEV; 1067 1068 sysctl_header = register_sysctl_table(dev_root); 1069 1070 result = acpi_bus_register_driver(&hpet_acpi_driver); 1071 if (result < 0) { 1072 if (sysctl_header) 1073 unregister_sysctl_table(sysctl_header); 1074 misc_deregister(&hpet_misc); 1075 return result; 1076 } 1077 1078 return 0; 1079 } 1080 device_initcall(hpet_init); 1081 1082 /* 1083 MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); 1084 MODULE_LICENSE("GPL"); 1085 */ 1086