1 /* 2 * linux/drivers/clocksource/arm_arch_timer.c 3 * 4 * Copyright (C) 2011 ARM Ltd. 5 * All Rights Reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/smp.h> 15 #include <linux/cpu.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/clockchips.h> 18 #include <linux/interrupt.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_address.h> 21 #include <linux/io.h> 22 #include <linux/slab.h> 23 #include <linux/sched_clock.h> 24 25 #include <asm/arch_timer.h> 26 #include <asm/virt.h> 27 28 #include <clocksource/arm_arch_timer.h> 29 30 #define CNTTIDR 0x08 31 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 32 33 #define CNTVCT_LO 0x08 34 #define CNTVCT_HI 0x0c 35 #define CNTFRQ 0x10 36 #define CNTP_TVAL 0x28 37 #define CNTP_CTL 0x2c 38 #define CNTV_TVAL 0x38 39 #define CNTV_CTL 0x3c 40 41 #define ARCH_CP15_TIMER BIT(0) 42 #define ARCH_MEM_TIMER BIT(1) 43 static unsigned arch_timers_present __initdata; 44 45 static void __iomem *arch_counter_base; 46 47 struct arch_timer { 48 void __iomem *base; 49 struct clock_event_device evt; 50 }; 51 52 #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 53 54 static u32 arch_timer_rate; 55 56 enum ppi_nr { 57 PHYS_SECURE_PPI, 58 PHYS_NONSECURE_PPI, 59 VIRT_PPI, 60 HYP_PPI, 61 MAX_TIMER_PPI 62 }; 63 64 static int arch_timer_ppi[MAX_TIMER_PPI]; 65 66 static struct clock_event_device __percpu *arch_timer_evt; 67 68 static bool arch_timer_use_virtual = true; 69 static bool arch_timer_mem_use_virtual; 70 71 /* 72 * Architected system timer support. 73 */ 74 75 static __always_inline 76 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 77 struct clock_event_device *clk) 78 { 79 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 80 struct arch_timer *timer = to_arch_timer(clk); 81 switch (reg) { 82 case ARCH_TIMER_REG_CTRL: 83 writel_relaxed(val, timer->base + CNTP_CTL); 84 break; 85 case ARCH_TIMER_REG_TVAL: 86 writel_relaxed(val, timer->base + CNTP_TVAL); 87 break; 88 } 89 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 90 struct arch_timer *timer = to_arch_timer(clk); 91 switch (reg) { 92 case ARCH_TIMER_REG_CTRL: 93 writel_relaxed(val, timer->base + CNTV_CTL); 94 break; 95 case ARCH_TIMER_REG_TVAL: 96 writel_relaxed(val, timer->base + CNTV_TVAL); 97 break; 98 } 99 } else { 100 arch_timer_reg_write_cp15(access, reg, val); 101 } 102 } 103 104 static __always_inline 105 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 106 struct clock_event_device *clk) 107 { 108 u32 val; 109 110 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 111 struct arch_timer *timer = to_arch_timer(clk); 112 switch (reg) { 113 case ARCH_TIMER_REG_CTRL: 114 val = readl_relaxed(timer->base + CNTP_CTL); 115 break; 116 case ARCH_TIMER_REG_TVAL: 117 val = readl_relaxed(timer->base + CNTP_TVAL); 118 break; 119 } 120 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 121 struct arch_timer *timer = to_arch_timer(clk); 122 switch (reg) { 123 case ARCH_TIMER_REG_CTRL: 124 val = readl_relaxed(timer->base + CNTV_CTL); 125 break; 126 case ARCH_TIMER_REG_TVAL: 127 val = readl_relaxed(timer->base + CNTV_TVAL); 128 break; 129 } 130 } else { 131 val = arch_timer_reg_read_cp15(access, reg); 132 } 133 134 return val; 135 } 136 137 static __always_inline irqreturn_t timer_handler(const int access, 138 struct clock_event_device *evt) 139 { 140 unsigned long ctrl; 141 142 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 143 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 144 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 145 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 146 evt->event_handler(evt); 147 return IRQ_HANDLED; 148 } 149 150 return IRQ_NONE; 151 } 152 153 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 154 { 155 struct clock_event_device *evt = dev_id; 156 157 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 158 } 159 160 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 161 { 162 struct clock_event_device *evt = dev_id; 163 164 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 165 } 166 167 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 168 { 169 struct clock_event_device *evt = dev_id; 170 171 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 172 } 173 174 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 175 { 176 struct clock_event_device *evt = dev_id; 177 178 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 179 } 180 181 static __always_inline void timer_set_mode(const int access, int mode, 182 struct clock_event_device *clk) 183 { 184 unsigned long ctrl; 185 switch (mode) { 186 case CLOCK_EVT_MODE_UNUSED: 187 case CLOCK_EVT_MODE_SHUTDOWN: 188 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 189 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 190 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 191 break; 192 default: 193 break; 194 } 195 } 196 197 static void arch_timer_set_mode_virt(enum clock_event_mode mode, 198 struct clock_event_device *clk) 199 { 200 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); 201 } 202 203 static void arch_timer_set_mode_phys(enum clock_event_mode mode, 204 struct clock_event_device *clk) 205 { 206 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); 207 } 208 209 static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, 210 struct clock_event_device *clk) 211 { 212 timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); 213 } 214 215 static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, 216 struct clock_event_device *clk) 217 { 218 timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); 219 } 220 221 static __always_inline void set_next_event(const int access, unsigned long evt, 222 struct clock_event_device *clk) 223 { 224 unsigned long ctrl; 225 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 226 ctrl |= ARCH_TIMER_CTRL_ENABLE; 227 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 228 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); 229 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 230 } 231 232 static int arch_timer_set_next_event_virt(unsigned long evt, 233 struct clock_event_device *clk) 234 { 235 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 236 return 0; 237 } 238 239 static int arch_timer_set_next_event_phys(unsigned long evt, 240 struct clock_event_device *clk) 241 { 242 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 243 return 0; 244 } 245 246 static int arch_timer_set_next_event_virt_mem(unsigned long evt, 247 struct clock_event_device *clk) 248 { 249 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 250 return 0; 251 } 252 253 static int arch_timer_set_next_event_phys_mem(unsigned long evt, 254 struct clock_event_device *clk) 255 { 256 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 257 return 0; 258 } 259 260 static void __arch_timer_setup(unsigned type, 261 struct clock_event_device *clk) 262 { 263 clk->features = CLOCK_EVT_FEAT_ONESHOT; 264 265 if (type == ARCH_CP15_TIMER) { 266 clk->features |= CLOCK_EVT_FEAT_C3STOP; 267 clk->name = "arch_sys_timer"; 268 clk->rating = 450; 269 clk->cpumask = cpumask_of(smp_processor_id()); 270 if (arch_timer_use_virtual) { 271 clk->irq = arch_timer_ppi[VIRT_PPI]; 272 clk->set_mode = arch_timer_set_mode_virt; 273 clk->set_next_event = arch_timer_set_next_event_virt; 274 } else { 275 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 276 clk->set_mode = arch_timer_set_mode_phys; 277 clk->set_next_event = arch_timer_set_next_event_phys; 278 } 279 } else { 280 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 281 clk->name = "arch_mem_timer"; 282 clk->rating = 400; 283 clk->cpumask = cpu_all_mask; 284 if (arch_timer_mem_use_virtual) { 285 clk->set_mode = arch_timer_set_mode_virt_mem; 286 clk->set_next_event = 287 arch_timer_set_next_event_virt_mem; 288 } else { 289 clk->set_mode = arch_timer_set_mode_phys_mem; 290 clk->set_next_event = 291 arch_timer_set_next_event_phys_mem; 292 } 293 } 294 295 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); 296 297 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); 298 } 299 300 static void arch_timer_configure_evtstream(void) 301 { 302 int evt_stream_div, pos; 303 304 /* Find the closest power of two to the divisor */ 305 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; 306 pos = fls(evt_stream_div); 307 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) 308 pos--; 309 /* enable event stream */ 310 arch_timer_evtstrm_enable(min(pos, 15)); 311 } 312 313 static int arch_timer_setup(struct clock_event_device *clk) 314 { 315 __arch_timer_setup(ARCH_CP15_TIMER, clk); 316 317 if (arch_timer_use_virtual) 318 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); 319 else { 320 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); 321 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 322 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 323 } 324 325 arch_counter_set_user_access(); 326 if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) 327 arch_timer_configure_evtstream(); 328 329 return 0; 330 } 331 332 static void 333 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) 334 { 335 /* Who has more than one independent system counter? */ 336 if (arch_timer_rate) 337 return; 338 339 /* Try to determine the frequency from the device tree or CNTFRQ */ 340 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { 341 if (cntbase) 342 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); 343 else 344 arch_timer_rate = arch_timer_get_cntfrq(); 345 } 346 347 /* Check the timer frequency. */ 348 if (arch_timer_rate == 0) 349 pr_warn("Architected timer frequency not available\n"); 350 } 351 352 static void arch_timer_banner(unsigned type) 353 { 354 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 355 type & ARCH_CP15_TIMER ? "cp15" : "", 356 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", 357 type & ARCH_MEM_TIMER ? "mmio" : "", 358 (unsigned long)arch_timer_rate / 1000000, 359 (unsigned long)(arch_timer_rate / 10000) % 100, 360 type & ARCH_CP15_TIMER ? 361 arch_timer_use_virtual ? "virt" : "phys" : 362 "", 363 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", 364 type & ARCH_MEM_TIMER ? 365 arch_timer_mem_use_virtual ? "virt" : "phys" : 366 ""); 367 } 368 369 u32 arch_timer_get_rate(void) 370 { 371 return arch_timer_rate; 372 } 373 374 static u64 arch_counter_get_cntvct_mem(void) 375 { 376 u32 vct_lo, vct_hi, tmp_hi; 377 378 do { 379 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 380 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); 381 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 382 } while (vct_hi != tmp_hi); 383 384 return ((u64) vct_hi << 32) | vct_lo; 385 } 386 387 /* 388 * Default to cp15 based access because arm64 uses this function for 389 * sched_clock() before DT is probed and the cp15 method is guaranteed 390 * to exist on arm64. arm doesn't use this before DT is probed so even 391 * if we don't have the cp15 accessors we won't have a problem. 392 */ 393 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 394 395 static cycle_t arch_counter_read(struct clocksource *cs) 396 { 397 return arch_timer_read_counter(); 398 } 399 400 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 401 { 402 return arch_timer_read_counter(); 403 } 404 405 static struct clocksource clocksource_counter = { 406 .name = "arch_sys_counter", 407 .rating = 400, 408 .read = arch_counter_read, 409 .mask = CLOCKSOURCE_MASK(56), 410 .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, 411 }; 412 413 static struct cyclecounter cyclecounter = { 414 .read = arch_counter_read_cc, 415 .mask = CLOCKSOURCE_MASK(56), 416 }; 417 418 static struct timecounter timecounter; 419 420 struct timecounter *arch_timer_get_timecounter(void) 421 { 422 return &timecounter; 423 } 424 425 static void __init arch_counter_register(unsigned type) 426 { 427 u64 start_count; 428 429 /* Register the CP15 based counter if we have one */ 430 if (type & ARCH_CP15_TIMER) 431 arch_timer_read_counter = arch_counter_get_cntvct; 432 else 433 arch_timer_read_counter = arch_counter_get_cntvct_mem; 434 435 start_count = arch_timer_read_counter(); 436 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 437 cyclecounter.mult = clocksource_counter.mult; 438 cyclecounter.shift = clocksource_counter.shift; 439 timecounter_init(&timecounter, &cyclecounter, start_count); 440 441 /* 56 bits minimum, so we assume worst case rollover */ 442 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); 443 } 444 445 static void arch_timer_stop(struct clock_event_device *clk) 446 { 447 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 448 clk->irq, smp_processor_id()); 449 450 if (arch_timer_use_virtual) 451 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); 452 else { 453 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); 454 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 455 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); 456 } 457 458 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); 459 } 460 461 static int arch_timer_cpu_notify(struct notifier_block *self, 462 unsigned long action, void *hcpu) 463 { 464 /* 465 * Grab cpu pointer in each case to avoid spurious 466 * preemptible warnings 467 */ 468 switch (action & ~CPU_TASKS_FROZEN) { 469 case CPU_STARTING: 470 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 471 break; 472 case CPU_DYING: 473 arch_timer_stop(this_cpu_ptr(arch_timer_evt)); 474 break; 475 } 476 477 return NOTIFY_OK; 478 } 479 480 static struct notifier_block arch_timer_cpu_nb = { 481 .notifier_call = arch_timer_cpu_notify, 482 }; 483 484 #ifdef CONFIG_CPU_PM 485 static unsigned int saved_cntkctl; 486 static int arch_timer_cpu_pm_notify(struct notifier_block *self, 487 unsigned long action, void *hcpu) 488 { 489 if (action == CPU_PM_ENTER) 490 saved_cntkctl = arch_timer_get_cntkctl(); 491 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) 492 arch_timer_set_cntkctl(saved_cntkctl); 493 return NOTIFY_OK; 494 } 495 496 static struct notifier_block arch_timer_cpu_pm_notifier = { 497 .notifier_call = arch_timer_cpu_pm_notify, 498 }; 499 500 static int __init arch_timer_cpu_pm_init(void) 501 { 502 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); 503 } 504 #else 505 static int __init arch_timer_cpu_pm_init(void) 506 { 507 return 0; 508 } 509 #endif 510 511 static int __init arch_timer_register(void) 512 { 513 int err; 514 int ppi; 515 516 arch_timer_evt = alloc_percpu(struct clock_event_device); 517 if (!arch_timer_evt) { 518 err = -ENOMEM; 519 goto out; 520 } 521 522 if (arch_timer_use_virtual) { 523 ppi = arch_timer_ppi[VIRT_PPI]; 524 err = request_percpu_irq(ppi, arch_timer_handler_virt, 525 "arch_timer", arch_timer_evt); 526 } else { 527 ppi = arch_timer_ppi[PHYS_SECURE_PPI]; 528 err = request_percpu_irq(ppi, arch_timer_handler_phys, 529 "arch_timer", arch_timer_evt); 530 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { 531 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; 532 err = request_percpu_irq(ppi, arch_timer_handler_phys, 533 "arch_timer", arch_timer_evt); 534 if (err) 535 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 536 arch_timer_evt); 537 } 538 } 539 540 if (err) { 541 pr_err("arch_timer: can't register interrupt %d (%d)\n", 542 ppi, err); 543 goto out_free; 544 } 545 546 err = register_cpu_notifier(&arch_timer_cpu_nb); 547 if (err) 548 goto out_free_irq; 549 550 err = arch_timer_cpu_pm_init(); 551 if (err) 552 goto out_unreg_notify; 553 554 /* Immediately configure the timer on the boot CPU */ 555 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 556 557 return 0; 558 559 out_unreg_notify: 560 unregister_cpu_notifier(&arch_timer_cpu_nb); 561 out_free_irq: 562 if (arch_timer_use_virtual) 563 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); 564 else { 565 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 566 arch_timer_evt); 567 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 568 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 569 arch_timer_evt); 570 } 571 572 out_free: 573 free_percpu(arch_timer_evt); 574 out: 575 return err; 576 } 577 578 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 579 { 580 int ret; 581 irq_handler_t func; 582 struct arch_timer *t; 583 584 t = kzalloc(sizeof(*t), GFP_KERNEL); 585 if (!t) 586 return -ENOMEM; 587 588 t->base = base; 589 t->evt.irq = irq; 590 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); 591 592 if (arch_timer_mem_use_virtual) 593 func = arch_timer_handler_virt_mem; 594 else 595 func = arch_timer_handler_phys_mem; 596 597 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 598 if (ret) { 599 pr_err("arch_timer: Failed to request mem timer irq\n"); 600 kfree(t); 601 } 602 603 return ret; 604 } 605 606 static const struct of_device_id arch_timer_of_match[] __initconst = { 607 { .compatible = "arm,armv7-timer", }, 608 { .compatible = "arm,armv8-timer", }, 609 {}, 610 }; 611 612 static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 613 { .compatible = "arm,armv7-timer-mem", }, 614 {}, 615 }; 616 617 static void __init arch_timer_common_init(void) 618 { 619 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 620 621 /* Wait until both nodes are probed if we have two timers */ 622 if ((arch_timers_present & mask) != mask) { 623 if (of_find_matching_node(NULL, arch_timer_mem_of_match) && 624 !(arch_timers_present & ARCH_MEM_TIMER)) 625 return; 626 if (of_find_matching_node(NULL, arch_timer_of_match) && 627 !(arch_timers_present & ARCH_CP15_TIMER)) 628 return; 629 } 630 631 arch_timer_banner(arch_timers_present); 632 arch_counter_register(arch_timers_present); 633 arch_timer_arch_init(); 634 } 635 636 static void __init arch_timer_init(struct device_node *np) 637 { 638 int i; 639 640 if (arch_timers_present & ARCH_CP15_TIMER) { 641 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 642 return; 643 } 644 645 arch_timers_present |= ARCH_CP15_TIMER; 646 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 647 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 648 arch_timer_detect_rate(NULL, np); 649 650 /* 651 * If HYP mode is available, we know that the physical timer 652 * has been configured to be accessible from PL1. Use it, so 653 * that a guest can use the virtual timer instead. 654 * 655 * If no interrupt provided for virtual timer, we'll have to 656 * stick to the physical timer. It'd better be accessible... 657 */ 658 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { 659 arch_timer_use_virtual = false; 660 661 if (!arch_timer_ppi[PHYS_SECURE_PPI] || 662 !arch_timer_ppi[PHYS_NONSECURE_PPI]) { 663 pr_warn("arch_timer: No interrupt available, giving up\n"); 664 return; 665 } 666 } 667 668 arch_timer_register(); 669 arch_timer_common_init(); 670 } 671 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); 672 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); 673 674 static void __init arch_timer_mem_init(struct device_node *np) 675 { 676 struct device_node *frame, *best_frame = NULL; 677 void __iomem *cntctlbase, *base; 678 unsigned int irq; 679 u32 cnttidr; 680 681 arch_timers_present |= ARCH_MEM_TIMER; 682 cntctlbase = of_iomap(np, 0); 683 if (!cntctlbase) { 684 pr_err("arch_timer: Can't find CNTCTLBase\n"); 685 return; 686 } 687 688 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 689 iounmap(cntctlbase); 690 691 /* 692 * Try to find a virtual capable frame. Otherwise fall back to a 693 * physical capable frame. 694 */ 695 for_each_available_child_of_node(np, frame) { 696 int n; 697 698 if (of_property_read_u32(frame, "frame-number", &n)) { 699 pr_err("arch_timer: Missing frame-number\n"); 700 of_node_put(best_frame); 701 of_node_put(frame); 702 return; 703 } 704 705 if (cnttidr & CNTTIDR_VIRT(n)) { 706 of_node_put(best_frame); 707 best_frame = frame; 708 arch_timer_mem_use_virtual = true; 709 break; 710 } 711 of_node_put(best_frame); 712 best_frame = of_node_get(frame); 713 } 714 715 base = arch_counter_base = of_iomap(best_frame, 0); 716 if (!base) { 717 pr_err("arch_timer: Can't map frame's registers\n"); 718 of_node_put(best_frame); 719 return; 720 } 721 722 if (arch_timer_mem_use_virtual) 723 irq = irq_of_parse_and_map(best_frame, 1); 724 else 725 irq = irq_of_parse_and_map(best_frame, 0); 726 of_node_put(best_frame); 727 if (!irq) { 728 pr_err("arch_timer: Frame missing %s irq", 729 arch_timer_mem_use_virtual ? "virt" : "phys"); 730 return; 731 } 732 733 arch_timer_detect_rate(base, np); 734 arch_timer_mem_register(base, irq); 735 arch_timer_common_init(); 736 } 737 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 738 arch_timer_mem_init); 739