1 /* 2 * linux/drivers/clocksource/arm_arch_timer.c 3 * 4 * Copyright (C) 2011 ARM Ltd. 5 * All Rights Reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/smp.h> 15 #include <linux/cpu.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/clockchips.h> 18 #include <linux/interrupt.h> 19 #include <linux/of_irq.h> 20 #include <linux/of_address.h> 21 #include <linux/io.h> 22 #include <linux/slab.h> 23 #include <linux/sched_clock.h> 24 25 #include <asm/arch_timer.h> 26 #include <asm/virt.h> 27 28 #include <clocksource/arm_arch_timer.h> 29 30 #define CNTTIDR 0x08 31 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 32 33 #define CNTVCT_LO 0x08 34 #define CNTVCT_HI 0x0c 35 #define CNTFRQ 0x10 36 #define CNTP_TVAL 0x28 37 #define CNTP_CTL 0x2c 38 #define CNTV_TVAL 0x38 39 #define CNTV_CTL 0x3c 40 41 #define ARCH_CP15_TIMER BIT(0) 42 #define ARCH_MEM_TIMER BIT(1) 43 static unsigned arch_timers_present __initdata; 44 45 static void __iomem *arch_counter_base; 46 47 struct arch_timer { 48 void __iomem *base; 49 struct clock_event_device evt; 50 }; 51 52 #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 53 54 static u32 arch_timer_rate; 55 56 enum ppi_nr { 57 PHYS_SECURE_PPI, 58 PHYS_NONSECURE_PPI, 59 VIRT_PPI, 60 HYP_PPI, 61 MAX_TIMER_PPI 62 }; 63 64 static int arch_timer_ppi[MAX_TIMER_PPI]; 65 66 static struct clock_event_device __percpu *arch_timer_evt; 67 68 static bool arch_timer_use_virtual = true; 69 static bool arch_timer_mem_use_virtual; 70 71 /* 72 * Architected system timer support. 73 */ 74 75 static __always_inline 76 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 77 struct clock_event_device *clk) 78 { 79 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 80 struct arch_timer *timer = to_arch_timer(clk); 81 switch (reg) { 82 case ARCH_TIMER_REG_CTRL: 83 writel_relaxed(val, timer->base + CNTP_CTL); 84 break; 85 case ARCH_TIMER_REG_TVAL: 86 writel_relaxed(val, timer->base + CNTP_TVAL); 87 break; 88 } 89 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 90 struct arch_timer *timer = to_arch_timer(clk); 91 switch (reg) { 92 case ARCH_TIMER_REG_CTRL: 93 writel_relaxed(val, timer->base + CNTV_CTL); 94 break; 95 case ARCH_TIMER_REG_TVAL: 96 writel_relaxed(val, timer->base + CNTV_TVAL); 97 break; 98 } 99 } else { 100 arch_timer_reg_write_cp15(access, reg, val); 101 } 102 } 103 104 static __always_inline 105 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 106 struct clock_event_device *clk) 107 { 108 u32 val; 109 110 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 111 struct arch_timer *timer = to_arch_timer(clk); 112 switch (reg) { 113 case ARCH_TIMER_REG_CTRL: 114 val = readl_relaxed(timer->base + CNTP_CTL); 115 break; 116 case ARCH_TIMER_REG_TVAL: 117 val = readl_relaxed(timer->base + CNTP_TVAL); 118 break; 119 } 120 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 121 struct arch_timer *timer = to_arch_timer(clk); 122 switch (reg) { 123 case ARCH_TIMER_REG_CTRL: 124 val = readl_relaxed(timer->base + CNTV_CTL); 125 break; 126 case ARCH_TIMER_REG_TVAL: 127 val = readl_relaxed(timer->base + CNTV_TVAL); 128 break; 129 } 130 } else { 131 val = arch_timer_reg_read_cp15(access, reg); 132 } 133 134 return val; 135 } 136 137 static __always_inline irqreturn_t timer_handler(const int access, 138 struct clock_event_device *evt) 139 { 140 unsigned long ctrl; 141 142 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 143 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 144 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 145 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 146 evt->event_handler(evt); 147 return IRQ_HANDLED; 148 } 149 150 return IRQ_NONE; 151 } 152 153 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 154 { 155 struct clock_event_device *evt = dev_id; 156 157 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 158 } 159 160 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 161 { 162 struct clock_event_device *evt = dev_id; 163 164 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 165 } 166 167 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 168 { 169 struct clock_event_device *evt = dev_id; 170 171 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 172 } 173 174 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 175 { 176 struct clock_event_device *evt = dev_id; 177 178 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 179 } 180 181 static __always_inline void timer_set_mode(const int access, int mode, 182 struct clock_event_device *clk) 183 { 184 unsigned long ctrl; 185 switch (mode) { 186 case CLOCK_EVT_MODE_UNUSED: 187 case CLOCK_EVT_MODE_SHUTDOWN: 188 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 189 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 190 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 191 break; 192 default: 193 break; 194 } 195 } 196 197 static void arch_timer_set_mode_virt(enum clock_event_mode mode, 198 struct clock_event_device *clk) 199 { 200 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk); 201 } 202 203 static void arch_timer_set_mode_phys(enum clock_event_mode mode, 204 struct clock_event_device *clk) 205 { 206 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk); 207 } 208 209 static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode, 210 struct clock_event_device *clk) 211 { 212 timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk); 213 } 214 215 static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode, 216 struct clock_event_device *clk) 217 { 218 timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk); 219 } 220 221 static __always_inline void set_next_event(const int access, unsigned long evt, 222 struct clock_event_device *clk) 223 { 224 unsigned long ctrl; 225 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 226 ctrl |= ARCH_TIMER_CTRL_ENABLE; 227 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 228 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); 229 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 230 } 231 232 static int arch_timer_set_next_event_virt(unsigned long evt, 233 struct clock_event_device *clk) 234 { 235 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 236 return 0; 237 } 238 239 static int arch_timer_set_next_event_phys(unsigned long evt, 240 struct clock_event_device *clk) 241 { 242 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 243 return 0; 244 } 245 246 static int arch_timer_set_next_event_virt_mem(unsigned long evt, 247 struct clock_event_device *clk) 248 { 249 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 250 return 0; 251 } 252 253 static int arch_timer_set_next_event_phys_mem(unsigned long evt, 254 struct clock_event_device *clk) 255 { 256 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 257 return 0; 258 } 259 260 static void __arch_timer_setup(unsigned type, 261 struct clock_event_device *clk) 262 { 263 clk->features = CLOCK_EVT_FEAT_ONESHOT; 264 265 if (type == ARCH_CP15_TIMER) { 266 clk->features |= CLOCK_EVT_FEAT_C3STOP; 267 clk->name = "arch_sys_timer"; 268 clk->rating = 450; 269 clk->cpumask = cpumask_of(smp_processor_id()); 270 if (arch_timer_use_virtual) { 271 clk->irq = arch_timer_ppi[VIRT_PPI]; 272 clk->set_mode = arch_timer_set_mode_virt; 273 clk->set_next_event = arch_timer_set_next_event_virt; 274 } else { 275 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 276 clk->set_mode = arch_timer_set_mode_phys; 277 clk->set_next_event = arch_timer_set_next_event_phys; 278 } 279 } else { 280 clk->name = "arch_mem_timer"; 281 clk->rating = 400; 282 clk->cpumask = cpu_all_mask; 283 if (arch_timer_mem_use_virtual) { 284 clk->set_mode = arch_timer_set_mode_virt_mem; 285 clk->set_next_event = 286 arch_timer_set_next_event_virt_mem; 287 } else { 288 clk->set_mode = arch_timer_set_mode_phys_mem; 289 clk->set_next_event = 290 arch_timer_set_next_event_phys_mem; 291 } 292 } 293 294 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk); 295 296 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); 297 } 298 299 static void arch_timer_configure_evtstream(void) 300 { 301 int evt_stream_div, pos; 302 303 /* Find the closest power of two to the divisor */ 304 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ; 305 pos = fls(evt_stream_div); 306 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2)))) 307 pos--; 308 /* enable event stream */ 309 arch_timer_evtstrm_enable(min(pos, 15)); 310 } 311 312 static int arch_timer_setup(struct clock_event_device *clk) 313 { 314 __arch_timer_setup(ARCH_CP15_TIMER, clk); 315 316 if (arch_timer_use_virtual) 317 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); 318 else { 319 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); 320 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 321 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 322 } 323 324 arch_counter_set_user_access(); 325 if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM)) 326 arch_timer_configure_evtstream(); 327 328 return 0; 329 } 330 331 static void 332 arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np) 333 { 334 /* Who has more than one independent system counter? */ 335 if (arch_timer_rate) 336 return; 337 338 /* Try to determine the frequency from the device tree or CNTFRQ */ 339 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) { 340 if (cntbase) 341 arch_timer_rate = readl_relaxed(cntbase + CNTFRQ); 342 else 343 arch_timer_rate = arch_timer_get_cntfrq(); 344 } 345 346 /* Check the timer frequency. */ 347 if (arch_timer_rate == 0) 348 pr_warn("Architected timer frequency not available\n"); 349 } 350 351 static void arch_timer_banner(unsigned type) 352 { 353 pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 354 type & ARCH_CP15_TIMER ? "cp15" : "", 355 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "", 356 type & ARCH_MEM_TIMER ? "mmio" : "", 357 (unsigned long)arch_timer_rate / 1000000, 358 (unsigned long)(arch_timer_rate / 10000) % 100, 359 type & ARCH_CP15_TIMER ? 360 arch_timer_use_virtual ? "virt" : "phys" : 361 "", 362 type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "", 363 type & ARCH_MEM_TIMER ? 364 arch_timer_mem_use_virtual ? "virt" : "phys" : 365 ""); 366 } 367 368 u32 arch_timer_get_rate(void) 369 { 370 return arch_timer_rate; 371 } 372 373 static u64 arch_counter_get_cntvct_mem(void) 374 { 375 u32 vct_lo, vct_hi, tmp_hi; 376 377 do { 378 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 379 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); 380 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 381 } while (vct_hi != tmp_hi); 382 383 return ((u64) vct_hi << 32) | vct_lo; 384 } 385 386 /* 387 * Default to cp15 based access because arm64 uses this function for 388 * sched_clock() before DT is probed and the cp15 method is guaranteed 389 * to exist on arm64. arm doesn't use this before DT is probed so even 390 * if we don't have the cp15 accessors we won't have a problem. 391 */ 392 u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; 393 394 static cycle_t arch_counter_read(struct clocksource *cs) 395 { 396 return arch_timer_read_counter(); 397 } 398 399 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 400 { 401 return arch_timer_read_counter(); 402 } 403 404 static struct clocksource clocksource_counter = { 405 .name = "arch_sys_counter", 406 .rating = 400, 407 .read = arch_counter_read, 408 .mask = CLOCKSOURCE_MASK(56), 409 .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, 410 }; 411 412 static struct cyclecounter cyclecounter = { 413 .read = arch_counter_read_cc, 414 .mask = CLOCKSOURCE_MASK(56), 415 }; 416 417 static struct timecounter timecounter; 418 419 struct timecounter *arch_timer_get_timecounter(void) 420 { 421 return &timecounter; 422 } 423 424 static void __init arch_counter_register(unsigned type) 425 { 426 u64 start_count; 427 428 /* Register the CP15 based counter if we have one */ 429 if (type & ARCH_CP15_TIMER) 430 arch_timer_read_counter = arch_counter_get_cntvct; 431 else 432 arch_timer_read_counter = arch_counter_get_cntvct_mem; 433 434 start_count = arch_timer_read_counter(); 435 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 436 cyclecounter.mult = clocksource_counter.mult; 437 cyclecounter.shift = clocksource_counter.shift; 438 timecounter_init(&timecounter, &cyclecounter, start_count); 439 440 /* 56 bits minimum, so we assume worst case rollover */ 441 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); 442 } 443 444 static void arch_timer_stop(struct clock_event_device *clk) 445 { 446 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 447 clk->irq, smp_processor_id()); 448 449 if (arch_timer_use_virtual) 450 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); 451 else { 452 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); 453 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 454 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); 455 } 456 457 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); 458 } 459 460 static int arch_timer_cpu_notify(struct notifier_block *self, 461 unsigned long action, void *hcpu) 462 { 463 /* 464 * Grab cpu pointer in each case to avoid spurious 465 * preemptible warnings 466 */ 467 switch (action & ~CPU_TASKS_FROZEN) { 468 case CPU_STARTING: 469 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 470 break; 471 case CPU_DYING: 472 arch_timer_stop(this_cpu_ptr(arch_timer_evt)); 473 break; 474 } 475 476 return NOTIFY_OK; 477 } 478 479 static struct notifier_block arch_timer_cpu_nb = { 480 .notifier_call = arch_timer_cpu_notify, 481 }; 482 483 #ifdef CONFIG_CPU_PM 484 static unsigned int saved_cntkctl; 485 static int arch_timer_cpu_pm_notify(struct notifier_block *self, 486 unsigned long action, void *hcpu) 487 { 488 if (action == CPU_PM_ENTER) 489 saved_cntkctl = arch_timer_get_cntkctl(); 490 else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) 491 arch_timer_set_cntkctl(saved_cntkctl); 492 return NOTIFY_OK; 493 } 494 495 static struct notifier_block arch_timer_cpu_pm_notifier = { 496 .notifier_call = arch_timer_cpu_pm_notify, 497 }; 498 499 static int __init arch_timer_cpu_pm_init(void) 500 { 501 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); 502 } 503 #else 504 static int __init arch_timer_cpu_pm_init(void) 505 { 506 return 0; 507 } 508 #endif 509 510 static int __init arch_timer_register(void) 511 { 512 int err; 513 int ppi; 514 515 arch_timer_evt = alloc_percpu(struct clock_event_device); 516 if (!arch_timer_evt) { 517 err = -ENOMEM; 518 goto out; 519 } 520 521 if (arch_timer_use_virtual) { 522 ppi = arch_timer_ppi[VIRT_PPI]; 523 err = request_percpu_irq(ppi, arch_timer_handler_virt, 524 "arch_timer", arch_timer_evt); 525 } else { 526 ppi = arch_timer_ppi[PHYS_SECURE_PPI]; 527 err = request_percpu_irq(ppi, arch_timer_handler_phys, 528 "arch_timer", arch_timer_evt); 529 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { 530 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; 531 err = request_percpu_irq(ppi, arch_timer_handler_phys, 532 "arch_timer", arch_timer_evt); 533 if (err) 534 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 535 arch_timer_evt); 536 } 537 } 538 539 if (err) { 540 pr_err("arch_timer: can't register interrupt %d (%d)\n", 541 ppi, err); 542 goto out_free; 543 } 544 545 err = register_cpu_notifier(&arch_timer_cpu_nb); 546 if (err) 547 goto out_free_irq; 548 549 err = arch_timer_cpu_pm_init(); 550 if (err) 551 goto out_unreg_notify; 552 553 /* Immediately configure the timer on the boot CPU */ 554 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 555 556 return 0; 557 558 out_unreg_notify: 559 unregister_cpu_notifier(&arch_timer_cpu_nb); 560 out_free_irq: 561 if (arch_timer_use_virtual) 562 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); 563 else { 564 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 565 arch_timer_evt); 566 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 567 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 568 arch_timer_evt); 569 } 570 571 out_free: 572 free_percpu(arch_timer_evt); 573 out: 574 return err; 575 } 576 577 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 578 { 579 int ret; 580 irq_handler_t func; 581 struct arch_timer *t; 582 583 t = kzalloc(sizeof(*t), GFP_KERNEL); 584 if (!t) 585 return -ENOMEM; 586 587 t->base = base; 588 t->evt.irq = irq; 589 __arch_timer_setup(ARCH_MEM_TIMER, &t->evt); 590 591 if (arch_timer_mem_use_virtual) 592 func = arch_timer_handler_virt_mem; 593 else 594 func = arch_timer_handler_phys_mem; 595 596 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 597 if (ret) { 598 pr_err("arch_timer: Failed to request mem timer irq\n"); 599 kfree(t); 600 } 601 602 return ret; 603 } 604 605 static const struct of_device_id arch_timer_of_match[] __initconst = { 606 { .compatible = "arm,armv7-timer", }, 607 { .compatible = "arm,armv8-timer", }, 608 {}, 609 }; 610 611 static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 612 { .compatible = "arm,armv7-timer-mem", }, 613 {}, 614 }; 615 616 static void __init arch_timer_common_init(void) 617 { 618 unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER; 619 620 /* Wait until both nodes are probed if we have two timers */ 621 if ((arch_timers_present & mask) != mask) { 622 if (of_find_matching_node(NULL, arch_timer_mem_of_match) && 623 !(arch_timers_present & ARCH_MEM_TIMER)) 624 return; 625 if (of_find_matching_node(NULL, arch_timer_of_match) && 626 !(arch_timers_present & ARCH_CP15_TIMER)) 627 return; 628 } 629 630 arch_timer_banner(arch_timers_present); 631 arch_counter_register(arch_timers_present); 632 arch_timer_arch_init(); 633 } 634 635 static void __init arch_timer_init(struct device_node *np) 636 { 637 int i; 638 639 if (arch_timers_present & ARCH_CP15_TIMER) { 640 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 641 return; 642 } 643 644 arch_timers_present |= ARCH_CP15_TIMER; 645 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 646 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 647 arch_timer_detect_rate(NULL, np); 648 649 /* 650 * If HYP mode is available, we know that the physical timer 651 * has been configured to be accessible from PL1. Use it, so 652 * that a guest can use the virtual timer instead. 653 * 654 * If no interrupt provided for virtual timer, we'll have to 655 * stick to the physical timer. It'd better be accessible... 656 */ 657 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { 658 arch_timer_use_virtual = false; 659 660 if (!arch_timer_ppi[PHYS_SECURE_PPI] || 661 !arch_timer_ppi[PHYS_NONSECURE_PPI]) { 662 pr_warn("arch_timer: No interrupt available, giving up\n"); 663 return; 664 } 665 } 666 667 arch_timer_register(); 668 arch_timer_common_init(); 669 } 670 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); 671 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); 672 673 static void __init arch_timer_mem_init(struct device_node *np) 674 { 675 struct device_node *frame, *best_frame = NULL; 676 void __iomem *cntctlbase, *base; 677 unsigned int irq; 678 u32 cnttidr; 679 680 arch_timers_present |= ARCH_MEM_TIMER; 681 cntctlbase = of_iomap(np, 0); 682 if (!cntctlbase) { 683 pr_err("arch_timer: Can't find CNTCTLBase\n"); 684 return; 685 } 686 687 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 688 iounmap(cntctlbase); 689 690 /* 691 * Try to find a virtual capable frame. Otherwise fall back to a 692 * physical capable frame. 693 */ 694 for_each_available_child_of_node(np, frame) { 695 int n; 696 697 if (of_property_read_u32(frame, "frame-number", &n)) { 698 pr_err("arch_timer: Missing frame-number\n"); 699 of_node_put(best_frame); 700 of_node_put(frame); 701 return; 702 } 703 704 if (cnttidr & CNTTIDR_VIRT(n)) { 705 of_node_put(best_frame); 706 best_frame = frame; 707 arch_timer_mem_use_virtual = true; 708 break; 709 } 710 of_node_put(best_frame); 711 best_frame = of_node_get(frame); 712 } 713 714 base = arch_counter_base = of_iomap(best_frame, 0); 715 if (!base) { 716 pr_err("arch_timer: Can't map frame's registers\n"); 717 of_node_put(best_frame); 718 return; 719 } 720 721 if (arch_timer_mem_use_virtual) 722 irq = irq_of_parse_and_map(best_frame, 1); 723 else 724 irq = irq_of_parse_and_map(best_frame, 0); 725 of_node_put(best_frame); 726 if (!irq) { 727 pr_err("arch_timer: Frame missing %s irq", 728 arch_timer_mem_use_virtual ? "virt" : "phys"); 729 return; 730 } 731 732 arch_timer_detect_rate(base, np); 733 arch_timer_mem_register(base, irq); 734 arch_timer_common_init(); 735 } 736 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 737 arch_timer_mem_init); 738