1 /* 2 * Intel & MS High Precision Event Timer Implementation. 3 * 4 * Copyright (C) 2003 Intel Corporation 5 * Venki Pallipadi 6 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. 7 * Bob Picco <robert.picco@hp.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/interrupt.h> 15 #include <linux/module.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <linux/miscdevice.h> 19 #include <linux/major.h> 20 #include <linux/ioport.h> 21 #include <linux/fcntl.h> 22 #include <linux/init.h> 23 #include <linux/poll.h> 24 #include <linux/mm.h> 25 #include <linux/proc_fs.h> 26 #include <linux/spinlock.h> 27 #include <linux/sysctl.h> 28 #include <linux/wait.h> 29 #include <linux/bcd.h> 30 #include <linux/seq_file.h> 31 #include <linux/bitops.h> 32 #include <linux/clocksource.h> 33 34 #include <asm/current.h> 35 #include <asm/uaccess.h> 36 #include <asm/system.h> 37 #include <asm/io.h> 38 #include <asm/irq.h> 39 #include <asm/div64.h> 40 41 #include <linux/acpi.h> 42 #include <acpi/acpi_bus.h> 43 #include <linux/hpet.h> 44 45 /* 46 * The High Precision Event Timer driver. 47 * This driver is closely modelled after the rtc.c driver. 48 * http://www.intel.com/hardwaredesign/hpetspec.htm 49 */ 50 #define HPET_USER_FREQ (64) 51 #define HPET_DRIFT (500) 52 53 #define HPET_RANGE_SIZE 1024 /* from HPET spec */ 54 55 #if BITS_PER_LONG == 64 56 #define write_counter(V, MC) writeq(V, MC) 57 #define read_counter(MC) readq(MC) 58 #else 59 #define write_counter(V, MC) writel(V, MC) 60 #define read_counter(MC) readl(MC) 61 #endif 62 63 static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; 64 65 static void __iomem *hpet_mctr; 66 67 static cycle_t read_hpet(void) 68 { 69 return (cycle_t)read_counter((void __iomem *)hpet_mctr); 70 } 71 72 static struct clocksource clocksource_hpet = { 73 .name = "hpet", 74 .rating = 250, 75 .read = read_hpet, 76 .mask = CLOCKSOURCE_MASK(64), 77 .mult = 0, /*to be caluclated*/ 78 .shift = 10, 79 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 80 }; 81 static struct clocksource *hpet_clocksource; 82 83 /* A lock for concurrent access by app and isr hpet activity. */ 84 static DEFINE_SPINLOCK(hpet_lock); 85 /* A lock for concurrent intermodule access to hpet and isr hpet activity. */ 86 static DEFINE_SPINLOCK(hpet_task_lock); 87 88 #define HPET_DEV_NAME (7) 89 90 struct hpet_dev { 91 struct hpets *hd_hpets; 92 struct hpet __iomem *hd_hpet; 93 struct hpet_timer __iomem *hd_timer; 94 unsigned long hd_ireqfreq; 95 unsigned long hd_irqdata; 96 wait_queue_head_t hd_waitqueue; 97 struct fasync_struct *hd_async_queue; 98 struct hpet_task *hd_task; 99 unsigned int hd_flags; 100 unsigned int hd_irq; 101 unsigned int hd_hdwirq; 102 char hd_name[HPET_DEV_NAME]; 103 }; 104 105 struct hpets { 106 struct hpets *hp_next; 107 struct hpet __iomem *hp_hpet; 108 unsigned long hp_hpet_phys; 109 struct clocksource *hp_clocksource; 110 unsigned long long hp_tick_freq; 111 unsigned long hp_delta; 112 unsigned int hp_ntimer; 113 unsigned int hp_which; 114 struct hpet_dev hp_dev[1]; 115 }; 116 117 static struct hpets *hpets; 118 119 #define HPET_OPEN 0x0001 120 #define HPET_IE 0x0002 /* interrupt enabled */ 121 #define HPET_PERIODIC 0x0004 122 #define HPET_SHARED_IRQ 0x0008 123 124 125 #ifndef readq 126 static inline unsigned long long readq(void __iomem *addr) 127 { 128 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); 129 } 130 #endif 131 132 #ifndef writeq 133 static inline void writeq(unsigned long long v, void __iomem *addr) 134 { 135 writel(v & 0xffffffff, addr); 136 writel(v >> 32, addr + 4); 137 } 138 #endif 139 140 static irqreturn_t hpet_interrupt(int irq, void *data) 141 { 142 struct hpet_dev *devp; 143 unsigned long isr; 144 145 devp = data; 146 isr = 1 << (devp - devp->hd_hpets->hp_dev); 147 148 if ((devp->hd_flags & HPET_SHARED_IRQ) && 149 !(isr & readl(&devp->hd_hpet->hpet_isr))) 150 return IRQ_NONE; 151 152 spin_lock(&hpet_lock); 153 devp->hd_irqdata++; 154 155 /* 156 * For non-periodic timers, increment the accumulator. 157 * This has the effect of treating non-periodic like periodic. 158 */ 159 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 160 unsigned long m, t; 161 162 t = devp->hd_ireqfreq; 163 m = read_counter(&devp->hd_hpet->hpet_mc); 164 write_counter(t + m + devp->hd_hpets->hp_delta, 165 &devp->hd_timer->hpet_compare); 166 } 167 168 if (devp->hd_flags & HPET_SHARED_IRQ) 169 writel(isr, &devp->hd_hpet->hpet_isr); 170 spin_unlock(&hpet_lock); 171 172 spin_lock(&hpet_task_lock); 173 if (devp->hd_task) 174 devp->hd_task->ht_func(devp->hd_task->ht_data); 175 spin_unlock(&hpet_task_lock); 176 177 wake_up_interruptible(&devp->hd_waitqueue); 178 179 kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); 180 181 return IRQ_HANDLED; 182 } 183 184 static int hpet_open(struct inode *inode, struct file *file) 185 { 186 struct hpet_dev *devp; 187 struct hpets *hpetp; 188 int i; 189 190 if (file->f_mode & FMODE_WRITE) 191 return -EINVAL; 192 193 spin_lock_irq(&hpet_lock); 194 195 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 196 for (i = 0; i < hpetp->hp_ntimer; i++) 197 if (hpetp->hp_dev[i].hd_flags & HPET_OPEN 198 || hpetp->hp_dev[i].hd_task) 199 continue; 200 else { 201 devp = &hpetp->hp_dev[i]; 202 break; 203 } 204 205 if (!devp) { 206 spin_unlock_irq(&hpet_lock); 207 return -EBUSY; 208 } 209 210 file->private_data = devp; 211 devp->hd_irqdata = 0; 212 devp->hd_flags |= HPET_OPEN; 213 spin_unlock_irq(&hpet_lock); 214 215 return 0; 216 } 217 218 static ssize_t 219 hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) 220 { 221 DECLARE_WAITQUEUE(wait, current); 222 unsigned long data; 223 ssize_t retval; 224 struct hpet_dev *devp; 225 226 devp = file->private_data; 227 if (!devp->hd_ireqfreq) 228 return -EIO; 229 230 if (count < sizeof(unsigned long)) 231 return -EINVAL; 232 233 add_wait_queue(&devp->hd_waitqueue, &wait); 234 235 for ( ; ; ) { 236 set_current_state(TASK_INTERRUPTIBLE); 237 238 spin_lock_irq(&hpet_lock); 239 data = devp->hd_irqdata; 240 devp->hd_irqdata = 0; 241 spin_unlock_irq(&hpet_lock); 242 243 if (data) 244 break; 245 else if (file->f_flags & O_NONBLOCK) { 246 retval = -EAGAIN; 247 goto out; 248 } else if (signal_pending(current)) { 249 retval = -ERESTARTSYS; 250 goto out; 251 } 252 schedule(); 253 } 254 255 retval = put_user(data, (unsigned long __user *)buf); 256 if (!retval) 257 retval = sizeof(unsigned long); 258 out: 259 __set_current_state(TASK_RUNNING); 260 remove_wait_queue(&devp->hd_waitqueue, &wait); 261 262 return retval; 263 } 264 265 static unsigned int hpet_poll(struct file *file, poll_table * wait) 266 { 267 unsigned long v; 268 struct hpet_dev *devp; 269 270 devp = file->private_data; 271 272 if (!devp->hd_ireqfreq) 273 return 0; 274 275 poll_wait(file, &devp->hd_waitqueue, wait); 276 277 spin_lock_irq(&hpet_lock); 278 v = devp->hd_irqdata; 279 spin_unlock_irq(&hpet_lock); 280 281 if (v != 0) 282 return POLLIN | POLLRDNORM; 283 284 return 0; 285 } 286 287 static int hpet_mmap(struct file *file, struct vm_area_struct *vma) 288 { 289 #ifdef CONFIG_HPET_MMAP 290 struct hpet_dev *devp; 291 unsigned long addr; 292 293 if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) 294 return -EINVAL; 295 296 devp = file->private_data; 297 addr = devp->hd_hpets->hp_hpet_phys; 298 299 if (addr & (PAGE_SIZE - 1)) 300 return -ENOSYS; 301 302 vma->vm_flags |= VM_IO; 303 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 304 305 if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, 306 PAGE_SIZE, vma->vm_page_prot)) { 307 printk(KERN_ERR "%s: io_remap_pfn_range failed\n", 308 __FUNCTION__); 309 return -EAGAIN; 310 } 311 312 return 0; 313 #else 314 return -ENOSYS; 315 #endif 316 } 317 318 static int hpet_fasync(int fd, struct file *file, int on) 319 { 320 struct hpet_dev *devp; 321 322 devp = file->private_data; 323 324 if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) 325 return 0; 326 else 327 return -EIO; 328 } 329 330 static int hpet_release(struct inode *inode, struct file *file) 331 { 332 struct hpet_dev *devp; 333 struct hpet_timer __iomem *timer; 334 int irq = 0; 335 336 devp = file->private_data; 337 timer = devp->hd_timer; 338 339 spin_lock_irq(&hpet_lock); 340 341 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 342 &timer->hpet_config); 343 344 irq = devp->hd_irq; 345 devp->hd_irq = 0; 346 347 devp->hd_ireqfreq = 0; 348 349 if (devp->hd_flags & HPET_PERIODIC 350 && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 351 unsigned long v; 352 353 v = readq(&timer->hpet_config); 354 v ^= Tn_TYPE_CNF_MASK; 355 writeq(v, &timer->hpet_config); 356 } 357 358 devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); 359 spin_unlock_irq(&hpet_lock); 360 361 if (irq) 362 free_irq(irq, devp); 363 364 if (file->f_flags & FASYNC) 365 hpet_fasync(-1, file, 0); 366 367 file->private_data = NULL; 368 return 0; 369 } 370 371 static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); 372 373 static int 374 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, 375 unsigned long arg) 376 { 377 struct hpet_dev *devp; 378 379 devp = file->private_data; 380 return hpet_ioctl_common(devp, cmd, arg, 0); 381 } 382 383 static int hpet_ioctl_ieon(struct hpet_dev *devp) 384 { 385 struct hpet_timer __iomem *timer; 386 struct hpet __iomem *hpet; 387 struct hpets *hpetp; 388 int irq; 389 unsigned long g, v, t, m; 390 unsigned long flags, isr; 391 392 timer = devp->hd_timer; 393 hpet = devp->hd_hpet; 394 hpetp = devp->hd_hpets; 395 396 if (!devp->hd_ireqfreq) 397 return -EIO; 398 399 spin_lock_irq(&hpet_lock); 400 401 if (devp->hd_flags & HPET_IE) { 402 spin_unlock_irq(&hpet_lock); 403 return -EBUSY; 404 } 405 406 devp->hd_flags |= HPET_IE; 407 408 if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK) 409 devp->hd_flags |= HPET_SHARED_IRQ; 410 spin_unlock_irq(&hpet_lock); 411 412 irq = devp->hd_hdwirq; 413 414 if (irq) { 415 unsigned long irq_flags; 416 417 sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); 418 irq_flags = devp->hd_flags & HPET_SHARED_IRQ 419 ? IRQF_SHARED : IRQF_DISABLED; 420 if (request_irq(irq, hpet_interrupt, irq_flags, 421 devp->hd_name, (void *)devp)) { 422 printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); 423 irq = 0; 424 } 425 } 426 427 if (irq == 0) { 428 spin_lock_irq(&hpet_lock); 429 devp->hd_flags ^= HPET_IE; 430 spin_unlock_irq(&hpet_lock); 431 return -EIO; 432 } 433 434 devp->hd_irq = irq; 435 t = devp->hd_ireqfreq; 436 v = readq(&timer->hpet_config); 437 g = v | Tn_INT_ENB_CNF_MASK; 438 439 if (devp->hd_flags & HPET_PERIODIC) { 440 write_counter(t, &timer->hpet_compare); 441 g |= Tn_TYPE_CNF_MASK; 442 v |= Tn_TYPE_CNF_MASK; 443 writeq(v, &timer->hpet_config); 444 v |= Tn_VAL_SET_CNF_MASK; 445 writeq(v, &timer->hpet_config); 446 local_irq_save(flags); 447 m = read_counter(&hpet->hpet_mc); 448 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 449 } else { 450 local_irq_save(flags); 451 m = read_counter(&hpet->hpet_mc); 452 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 453 } 454 455 if (devp->hd_flags & HPET_SHARED_IRQ) { 456 isr = 1 << (devp - devp->hd_hpets->hp_dev); 457 writel(isr, &hpet->hpet_isr); 458 } 459 writeq(g, &timer->hpet_config); 460 local_irq_restore(flags); 461 462 return 0; 463 } 464 465 /* converts Hz to number of timer ticks */ 466 static inline unsigned long hpet_time_div(struct hpets *hpets, 467 unsigned long dis) 468 { 469 unsigned long long m; 470 471 m = hpets->hp_tick_freq + (dis >> 1); 472 do_div(m, dis); 473 return (unsigned long)m; 474 } 475 476 static int 477 hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) 478 { 479 struct hpet_timer __iomem *timer; 480 struct hpet __iomem *hpet; 481 struct hpets *hpetp; 482 int err; 483 unsigned long v; 484 485 switch (cmd) { 486 case HPET_IE_OFF: 487 case HPET_INFO: 488 case HPET_EPI: 489 case HPET_DPI: 490 case HPET_IRQFREQ: 491 timer = devp->hd_timer; 492 hpet = devp->hd_hpet; 493 hpetp = devp->hd_hpets; 494 break; 495 case HPET_IE_ON: 496 return hpet_ioctl_ieon(devp); 497 default: 498 return -EINVAL; 499 } 500 501 err = 0; 502 503 switch (cmd) { 504 case HPET_IE_OFF: 505 if ((devp->hd_flags & HPET_IE) == 0) 506 break; 507 v = readq(&timer->hpet_config); 508 v &= ~Tn_INT_ENB_CNF_MASK; 509 writeq(v, &timer->hpet_config); 510 if (devp->hd_irq) { 511 free_irq(devp->hd_irq, devp); 512 devp->hd_irq = 0; 513 } 514 devp->hd_flags ^= HPET_IE; 515 break; 516 case HPET_INFO: 517 { 518 struct hpet_info info; 519 520 if (devp->hd_ireqfreq) 521 info.hi_ireqfreq = 522 hpet_time_div(hpetp, devp->hd_ireqfreq); 523 else 524 info.hi_ireqfreq = 0; 525 info.hi_flags = 526 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; 527 info.hi_hpet = hpetp->hp_which; 528 info.hi_timer = devp - hpetp->hp_dev; 529 if (kernel) 530 memcpy((void *)arg, &info, sizeof(info)); 531 else 532 if (copy_to_user((void __user *)arg, &info, 533 sizeof(info))) 534 err = -EFAULT; 535 break; 536 } 537 case HPET_EPI: 538 v = readq(&timer->hpet_config); 539 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 540 err = -ENXIO; 541 break; 542 } 543 devp->hd_flags |= HPET_PERIODIC; 544 break; 545 case HPET_DPI: 546 v = readq(&timer->hpet_config); 547 if ((v & Tn_PER_INT_CAP_MASK) == 0) { 548 err = -ENXIO; 549 break; 550 } 551 if (devp->hd_flags & HPET_PERIODIC && 552 readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { 553 v = readq(&timer->hpet_config); 554 v ^= Tn_TYPE_CNF_MASK; 555 writeq(v, &timer->hpet_config); 556 } 557 devp->hd_flags &= ~HPET_PERIODIC; 558 break; 559 case HPET_IRQFREQ: 560 if (!kernel && (arg > hpet_max_freq) && 561 !capable(CAP_SYS_RESOURCE)) { 562 err = -EACCES; 563 break; 564 } 565 566 if (!arg) { 567 err = -EINVAL; 568 break; 569 } 570 571 devp->hd_ireqfreq = hpet_time_div(hpetp, arg); 572 } 573 574 return err; 575 } 576 577 static const struct file_operations hpet_fops = { 578 .owner = THIS_MODULE, 579 .llseek = no_llseek, 580 .read = hpet_read, 581 .poll = hpet_poll, 582 .ioctl = hpet_ioctl, 583 .open = hpet_open, 584 .release = hpet_release, 585 .fasync = hpet_fasync, 586 .mmap = hpet_mmap, 587 }; 588 589 static int hpet_is_known(struct hpet_data *hdp) 590 { 591 struct hpets *hpetp; 592 593 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 594 if (hpetp->hp_hpet_phys == hdp->hd_phys_address) 595 return 1; 596 597 return 0; 598 } 599 600 EXPORT_SYMBOL(hpet_alloc); 601 EXPORT_SYMBOL(hpet_register); 602 EXPORT_SYMBOL(hpet_unregister); 603 EXPORT_SYMBOL(hpet_control); 604 605 int hpet_register(struct hpet_task *tp, int periodic) 606 { 607 unsigned int i; 608 u64 mask; 609 struct hpet_timer __iomem *timer; 610 struct hpet_dev *devp; 611 struct hpets *hpetp; 612 613 switch (periodic) { 614 case 1: 615 mask = Tn_PER_INT_CAP_MASK; 616 break; 617 case 0: 618 mask = 0; 619 break; 620 default: 621 return -EINVAL; 622 } 623 624 tp->ht_opaque = NULL; 625 626 spin_lock_irq(&hpet_task_lock); 627 spin_lock(&hpet_lock); 628 629 for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) 630 for (timer = hpetp->hp_hpet->hpet_timers, i = 0; 631 i < hpetp->hp_ntimer; i++, timer++) { 632 if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK) 633 != mask) 634 continue; 635 636 devp = &hpetp->hp_dev[i]; 637 638 if (devp->hd_flags & HPET_OPEN || devp->hd_task) { 639 devp = NULL; 640 continue; 641 } 642 643 tp->ht_opaque = devp; 644 devp->hd_task = tp; 645 break; 646 } 647 648 spin_unlock(&hpet_lock); 649 spin_unlock_irq(&hpet_task_lock); 650 651 if (tp->ht_opaque) 652 return 0; 653 else 654 return -EBUSY; 655 } 656 657 static inline int hpet_tpcheck(struct hpet_task *tp) 658 { 659 struct hpet_dev *devp; 660 struct hpets *hpetp; 661 662 devp = tp->ht_opaque; 663 664 if (!devp) 665 return -ENXIO; 666 667 for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) 668 if (devp >= hpetp->hp_dev 669 && devp < (hpetp->hp_dev + hpetp->hp_ntimer) 670 && devp->hd_hpet == hpetp->hp_hpet) 671 return 0; 672 673 return -ENXIO; 674 } 675 676 int hpet_unregister(struct hpet_task *tp) 677 { 678 struct hpet_dev *devp; 679 struct hpet_timer __iomem *timer; 680 int err; 681 682 if ((err = hpet_tpcheck(tp))) 683 return err; 684 685 spin_lock_irq(&hpet_task_lock); 686 spin_lock(&hpet_lock); 687 688 devp = tp->ht_opaque; 689 if (devp->hd_task != tp) { 690 spin_unlock(&hpet_lock); 691 spin_unlock_irq(&hpet_task_lock); 692 return -ENXIO; 693 } 694 695 timer = devp->hd_timer; 696 writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), 697 &timer->hpet_config); 698 devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC); 699 devp->hd_task = NULL; 700 spin_unlock(&hpet_lock); 701 spin_unlock_irq(&hpet_task_lock); 702 703 return 0; 704 } 705 706 int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) 707 { 708 struct hpet_dev *devp; 709 int err; 710 711 if ((err = hpet_tpcheck(tp))) 712 return err; 713 714 spin_lock_irq(&hpet_lock); 715 devp = tp->ht_opaque; 716 if (devp->hd_task != tp) { 717 spin_unlock_irq(&hpet_lock); 718 return -ENXIO; 719 } 720 spin_unlock_irq(&hpet_lock); 721 return hpet_ioctl_common(devp, cmd, arg, 1); 722 } 723 724 static ctl_table hpet_table[] = { 725 { 726 .ctl_name = CTL_UNNUMBERED, 727 .procname = "max-user-freq", 728 .data = &hpet_max_freq, 729 .maxlen = sizeof(int), 730 .mode = 0644, 731 .proc_handler = &proc_dointvec, 732 }, 733 {.ctl_name = 0} 734 }; 735 736 static ctl_table hpet_root[] = { 737 { 738 .ctl_name = CTL_UNNUMBERED, 739 .procname = "hpet", 740 .maxlen = 0, 741 .mode = 0555, 742 .child = hpet_table, 743 }, 744 {.ctl_name = 0} 745 }; 746 747 static ctl_table dev_root[] = { 748 { 749 .ctl_name = CTL_DEV, 750 .procname = "dev", 751 .maxlen = 0, 752 .mode = 0555, 753 .child = hpet_root, 754 }, 755 {.ctl_name = 0} 756 }; 757 758 static struct ctl_table_header *sysctl_header; 759 760 /* 761 * Adjustment for when arming the timer with 762 * initial conditions. That is, main counter 763 * ticks expired before interrupts are enabled. 764 */ 765 #define TICK_CALIBRATE (1000UL) 766 767 static unsigned long hpet_calibrate(struct hpets *hpetp) 768 { 769 struct hpet_timer __iomem *timer = NULL; 770 unsigned long t, m, count, i, flags, start; 771 struct hpet_dev *devp; 772 int j; 773 struct hpet __iomem *hpet; 774 775 for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) 776 if ((devp->hd_flags & HPET_OPEN) == 0) { 777 timer = devp->hd_timer; 778 break; 779 } 780 781 if (!timer) 782 return 0; 783 784 hpet = hpetp->hp_hpet; 785 t = read_counter(&timer->hpet_compare); 786 787 i = 0; 788 count = hpet_time_div(hpetp, TICK_CALIBRATE); 789 790 local_irq_save(flags); 791 792 start = read_counter(&hpet->hpet_mc); 793 794 do { 795 m = read_counter(&hpet->hpet_mc); 796 write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); 797 } while (i++, (m - start) < count); 798 799 local_irq_restore(flags); 800 801 return (m - start) / i; 802 } 803 804 int hpet_alloc(struct hpet_data *hdp) 805 { 806 u64 cap, mcfg; 807 struct hpet_dev *devp; 808 u32 i, ntimer; 809 struct hpets *hpetp; 810 size_t siz; 811 struct hpet __iomem *hpet; 812 static struct hpets *last = NULL; 813 unsigned long period; 814 unsigned long long temp; 815 816 /* 817 * hpet_alloc can be called by platform dependent code. 818 * If platform dependent code has allocated the hpet that 819 * ACPI has also reported, then we catch it here. 820 */ 821 if (hpet_is_known(hdp)) { 822 printk(KERN_DEBUG "%s: duplicate HPET ignored\n", 823 __FUNCTION__); 824 return 0; 825 } 826 827 siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * 828 sizeof(struct hpet_dev)); 829 830 hpetp = kzalloc(siz, GFP_KERNEL); 831 832 if (!hpetp) 833 return -ENOMEM; 834 835 hpetp->hp_which = hpet_nhpet++; 836 hpetp->hp_hpet = hdp->hd_address; 837 hpetp->hp_hpet_phys = hdp->hd_phys_address; 838 839 hpetp->hp_ntimer = hdp->hd_nirqs; 840 841 for (i = 0; i < hdp->hd_nirqs; i++) 842 hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; 843 844 hpet = hpetp->hp_hpet; 845 846 cap = readq(&hpet->hpet_cap); 847 848 ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; 849 850 if (hpetp->hp_ntimer != ntimer) { 851 printk(KERN_WARNING "hpet: number irqs doesn't agree" 852 " with number of timers\n"); 853 kfree(hpetp); 854 return -ENODEV; 855 } 856 857 if (last) 858 last->hp_next = hpetp; 859 else 860 hpets = hpetp; 861 862 last = hpetp; 863 864 period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> 865 HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */ 866 temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */ 867 temp += period >> 1; /* round */ 868 do_div(temp, period); 869 hpetp->hp_tick_freq = temp; /* ticks per second */ 870 871 printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s", 872 hpetp->hp_which, hdp->hd_phys_address, 873 hpetp->hp_ntimer > 1 ? "s" : ""); 874 for (i = 0; i < hpetp->hp_ntimer; i++) 875 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 876 printk("\n"); 877 878 printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", 879 hpetp->hp_which, hpetp->hp_ntimer, 880 cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, hpetp->hp_tick_freq); 881 882 mcfg = readq(&hpet->hpet_config); 883 if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { 884 write_counter(0L, &hpet->hpet_mc); 885 mcfg |= HPET_ENABLE_CNF_MASK; 886 writeq(mcfg, &hpet->hpet_config); 887 } 888 889 for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) { 890 struct hpet_timer __iomem *timer; 891 892 timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; 893 894 devp->hd_hpets = hpetp; 895 devp->hd_hpet = hpet; 896 devp->hd_timer = timer; 897 898 /* 899 * If the timer was reserved by platform code, 900 * then make timer unavailable for opens. 901 */ 902 if (hdp->hd_state & (1 << i)) { 903 devp->hd_flags = HPET_OPEN; 904 continue; 905 } 906 907 init_waitqueue_head(&devp->hd_waitqueue); 908 } 909 910 hpetp->hp_delta = hpet_calibrate(hpetp); 911 912 if (!hpet_clocksource) { 913 hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; 914 CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); 915 clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, 916 clocksource_hpet.shift); 917 clocksource_register(&clocksource_hpet); 918 hpetp->hp_clocksource = &clocksource_hpet; 919 hpet_clocksource = &clocksource_hpet; 920 } 921 922 return 0; 923 } 924 925 static acpi_status hpet_resources(struct acpi_resource *res, void *data) 926 { 927 struct hpet_data *hdp; 928 acpi_status status; 929 struct acpi_resource_address64 addr; 930 931 hdp = data; 932 933 status = acpi_resource_to_address64(res, &addr); 934 935 if (ACPI_SUCCESS(status)) { 936 hdp->hd_phys_address = addr.minimum; 937 hdp->hd_address = ioremap(addr.minimum, addr.address_length); 938 939 if (hpet_is_known(hdp)) { 940 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 941 __FUNCTION__, hdp->hd_phys_address); 942 iounmap(hdp->hd_address); 943 return -EBUSY; 944 } 945 } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { 946 struct acpi_resource_fixed_memory32 *fixmem32; 947 948 fixmem32 = &res->data.fixed_memory32; 949 if (!fixmem32) 950 return -EINVAL; 951 952 hdp->hd_phys_address = fixmem32->address; 953 hdp->hd_address = ioremap(fixmem32->address, 954 HPET_RANGE_SIZE); 955 956 if (hpet_is_known(hdp)) { 957 printk(KERN_DEBUG "%s: 0x%lx is busy\n", 958 __FUNCTION__, hdp->hd_phys_address); 959 iounmap(hdp->hd_address); 960 return -EBUSY; 961 } 962 } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { 963 struct acpi_resource_extended_irq *irqp; 964 int i, irq; 965 966 irqp = &res->data.extended_irq; 967 968 for (i = 0; i < irqp->interrupt_count; i++) { 969 irq = acpi_register_gsi(irqp->interrupts[i], 970 irqp->triggering, irqp->polarity); 971 if (irq < 0) 972 return AE_ERROR; 973 974 hdp->hd_irq[hdp->hd_nirqs] = irq; 975 hdp->hd_nirqs++; 976 } 977 } 978 979 return AE_OK; 980 } 981 982 static int hpet_acpi_add(struct acpi_device *device) 983 { 984 acpi_status result; 985 struct hpet_data data; 986 987 memset(&data, 0, sizeof(data)); 988 989 result = 990 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 991 hpet_resources, &data); 992 993 if (ACPI_FAILURE(result)) 994 return -ENODEV; 995 996 if (!data.hd_address || !data.hd_nirqs) { 997 printk("%s: no address or irqs in _CRS\n", __FUNCTION__); 998 return -ENODEV; 999 } 1000 1001 return hpet_alloc(&data); 1002 } 1003 1004 static int hpet_acpi_remove(struct acpi_device *device, int type) 1005 { 1006 /* XXX need to unregister clocksource, dealloc mem, etc */ 1007 return -EINVAL; 1008 } 1009 1010 static const struct acpi_device_id hpet_device_ids[] = { 1011 {"PNP0103", 0}, 1012 {"", 0}, 1013 }; 1014 MODULE_DEVICE_TABLE(acpi, hpet_device_ids); 1015 1016 static struct acpi_driver hpet_acpi_driver = { 1017 .name = "hpet", 1018 .ids = hpet_device_ids, 1019 .ops = { 1020 .add = hpet_acpi_add, 1021 .remove = hpet_acpi_remove, 1022 }, 1023 }; 1024 1025 static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; 1026 1027 static int __init hpet_init(void) 1028 { 1029 int result; 1030 1031 result = misc_register(&hpet_misc); 1032 if (result < 0) 1033 return -ENODEV; 1034 1035 sysctl_header = register_sysctl_table(dev_root); 1036 1037 result = acpi_bus_register_driver(&hpet_acpi_driver); 1038 if (result < 0) { 1039 if (sysctl_header) 1040 unregister_sysctl_table(sysctl_header); 1041 misc_deregister(&hpet_misc); 1042 return result; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static void __exit hpet_exit(void) 1049 { 1050 acpi_bus_unregister_driver(&hpet_acpi_driver); 1051 1052 if (sysctl_header) 1053 unregister_sysctl_table(sysctl_header); 1054 misc_deregister(&hpet_misc); 1055 1056 return; 1057 } 1058 1059 module_init(hpet_init); 1060 module_exit(hpet_exit); 1061 MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); 1062 MODULE_LICENSE("GPL"); 1063