1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/drivers/clocksource/arm_arch_timer.c 4 * 5 * Copyright (C) 2011 ARM Ltd. 6 * All Rights Reserved 7 */ 8 9 #define pr_fmt(fmt) "arch_timer: " fmt 10 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/smp.h> 15 #include <linux/cpu.h> 16 #include <linux/cpu_pm.h> 17 #include <linux/clockchips.h> 18 #include <linux/clocksource.h> 19 #include <linux/clocksource_ids.h> 20 #include <linux/interrupt.h> 21 #include <linux/of_irq.h> 22 #include <linux/of_address.h> 23 #include <linux/io.h> 24 #include <linux/slab.h> 25 #include <linux/sched/clock.h> 26 #include <linux/sched_clock.h> 27 #include <linux/acpi.h> 28 #include <linux/arm-smccc.h> 29 #include <linux/ptp_kvm.h> 30 31 #include <asm/arch_timer.h> 32 #include <asm/virt.h> 33 34 #include <clocksource/arm_arch_timer.h> 35 36 #define CNTTIDR 0x08 37 #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) 38 39 #define CNTACR(n) (0x40 + ((n) * 4)) 40 #define CNTACR_RPCT BIT(0) 41 #define CNTACR_RVCT BIT(1) 42 #define CNTACR_RFRQ BIT(2) 43 #define CNTACR_RVOFF BIT(3) 44 #define CNTACR_RWVT BIT(4) 45 #define CNTACR_RWPT BIT(5) 46 47 #define CNTVCT_LO 0x08 48 #define CNTVCT_HI 0x0c 49 #define CNTFRQ 0x10 50 #define CNTP_TVAL 0x28 51 #define CNTP_CTL 0x2c 52 #define CNTV_TVAL 0x38 53 #define CNTV_CTL 0x3c 54 55 static unsigned arch_timers_present __initdata; 56 57 static void __iomem *arch_counter_base __ro_after_init; 58 59 struct arch_timer { 60 void __iomem *base; 61 struct clock_event_device evt; 62 }; 63 64 #define to_arch_timer(e) container_of(e, struct arch_timer, evt) 65 66 static u32 arch_timer_rate __ro_after_init; 67 static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init; 68 69 static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = { 70 [ARCH_TIMER_PHYS_SECURE_PPI] = "sec-phys", 71 [ARCH_TIMER_PHYS_NONSECURE_PPI] = "phys", 72 [ARCH_TIMER_VIRT_PPI] = "virt", 73 [ARCH_TIMER_HYP_PPI] = "hyp-phys", 74 [ARCH_TIMER_HYP_VIRT_PPI] = "hyp-virt", 75 }; 76 77 static struct clock_event_device __percpu *arch_timer_evt; 78 79 static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI; 80 static bool arch_timer_c3stop __ro_after_init; 81 static bool arch_timer_mem_use_virtual __ro_after_init; 82 static bool arch_counter_suspend_stop __ro_after_init; 83 #ifdef CONFIG_GENERIC_GETTIMEOFDAY 84 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER; 85 #else 86 static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE; 87 #endif /* CONFIG_GENERIC_GETTIMEOFDAY */ 88 89 static cpumask_t evtstrm_available = CPU_MASK_NONE; 90 static bool evtstrm_enable __ro_after_init = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); 91 92 static int __init early_evtstrm_cfg(char *buf) 93 { 94 return strtobool(buf, &evtstrm_enable); 95 } 96 early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg); 97 98 /* 99 * Architected system timer support. 100 */ 101 102 static __always_inline 103 void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val, 104 struct clock_event_device *clk) 105 { 106 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 107 struct arch_timer *timer = to_arch_timer(clk); 108 switch (reg) { 109 case ARCH_TIMER_REG_CTRL: 110 writel_relaxed(val, timer->base + CNTP_CTL); 111 break; 112 case ARCH_TIMER_REG_TVAL: 113 writel_relaxed(val, timer->base + CNTP_TVAL); 114 break; 115 } 116 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 117 struct arch_timer *timer = to_arch_timer(clk); 118 switch (reg) { 119 case ARCH_TIMER_REG_CTRL: 120 writel_relaxed(val, timer->base + CNTV_CTL); 121 break; 122 case ARCH_TIMER_REG_TVAL: 123 writel_relaxed(val, timer->base + CNTV_TVAL); 124 break; 125 } 126 } else { 127 arch_timer_reg_write_cp15(access, reg, val); 128 } 129 } 130 131 static __always_inline 132 u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, 133 struct clock_event_device *clk) 134 { 135 u32 val; 136 137 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) { 138 struct arch_timer *timer = to_arch_timer(clk); 139 switch (reg) { 140 case ARCH_TIMER_REG_CTRL: 141 val = readl_relaxed(timer->base + CNTP_CTL); 142 break; 143 case ARCH_TIMER_REG_TVAL: 144 val = readl_relaxed(timer->base + CNTP_TVAL); 145 break; 146 } 147 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) { 148 struct arch_timer *timer = to_arch_timer(clk); 149 switch (reg) { 150 case ARCH_TIMER_REG_CTRL: 151 val = readl_relaxed(timer->base + CNTV_CTL); 152 break; 153 case ARCH_TIMER_REG_TVAL: 154 val = readl_relaxed(timer->base + CNTV_TVAL); 155 break; 156 } 157 } else { 158 val = arch_timer_reg_read_cp15(access, reg); 159 } 160 161 return val; 162 } 163 164 static notrace u64 arch_counter_get_cntpct_stable(void) 165 { 166 return __arch_counter_get_cntpct_stable(); 167 } 168 169 static notrace u64 arch_counter_get_cntpct(void) 170 { 171 return __arch_counter_get_cntpct(); 172 } 173 174 static notrace u64 arch_counter_get_cntvct_stable(void) 175 { 176 return __arch_counter_get_cntvct_stable(); 177 } 178 179 static notrace u64 arch_counter_get_cntvct(void) 180 { 181 return __arch_counter_get_cntvct(); 182 } 183 184 /* 185 * Default to cp15 based access because arm64 uses this function for 186 * sched_clock() before DT is probed and the cp15 method is guaranteed 187 * to exist on arm64. arm doesn't use this before DT is probed so even 188 * if we don't have the cp15 accessors we won't have a problem. 189 */ 190 u64 (*arch_timer_read_counter)(void) __ro_after_init = arch_counter_get_cntvct; 191 EXPORT_SYMBOL_GPL(arch_timer_read_counter); 192 193 static u64 arch_counter_read(struct clocksource *cs) 194 { 195 return arch_timer_read_counter(); 196 } 197 198 static u64 arch_counter_read_cc(const struct cyclecounter *cc) 199 { 200 return arch_timer_read_counter(); 201 } 202 203 static struct clocksource clocksource_counter = { 204 .name = "arch_sys_counter", 205 .id = CSID_ARM_ARCH_COUNTER, 206 .rating = 400, 207 .read = arch_counter_read, 208 .mask = CLOCKSOURCE_MASK(56), 209 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 210 }; 211 212 static struct cyclecounter cyclecounter __ro_after_init = { 213 .read = arch_counter_read_cc, 214 .mask = CLOCKSOURCE_MASK(56), 215 }; 216 217 struct ate_acpi_oem_info { 218 char oem_id[ACPI_OEM_ID_SIZE + 1]; 219 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; 220 u32 oem_revision; 221 }; 222 223 #ifdef CONFIG_FSL_ERRATUM_A008585 224 /* 225 * The number of retries is an arbitrary value well beyond the highest number 226 * of iterations the loop has been observed to take. 227 */ 228 #define __fsl_a008585_read_reg(reg) ({ \ 229 u64 _old, _new; \ 230 int _retries = 200; \ 231 \ 232 do { \ 233 _old = read_sysreg(reg); \ 234 _new = read_sysreg(reg); \ 235 _retries--; \ 236 } while (unlikely(_old != _new) && _retries); \ 237 \ 238 WARN_ON_ONCE(!_retries); \ 239 _new; \ 240 }) 241 242 static u32 notrace fsl_a008585_read_cntp_tval_el0(void) 243 { 244 return __fsl_a008585_read_reg(cntp_tval_el0); 245 } 246 247 static u32 notrace fsl_a008585_read_cntv_tval_el0(void) 248 { 249 return __fsl_a008585_read_reg(cntv_tval_el0); 250 } 251 252 static u64 notrace fsl_a008585_read_cntpct_el0(void) 253 { 254 return __fsl_a008585_read_reg(cntpct_el0); 255 } 256 257 static u64 notrace fsl_a008585_read_cntvct_el0(void) 258 { 259 return __fsl_a008585_read_reg(cntvct_el0); 260 } 261 #endif 262 263 #ifdef CONFIG_HISILICON_ERRATUM_161010101 264 /* 265 * Verify whether the value of the second read is larger than the first by 266 * less than 32 is the only way to confirm the value is correct, so clear the 267 * lower 5 bits to check whether the difference is greater than 32 or not. 268 * Theoretically the erratum should not occur more than twice in succession 269 * when reading the system counter, but it is possible that some interrupts 270 * may lead to more than twice read errors, triggering the warning, so setting 271 * the number of retries far beyond the number of iterations the loop has been 272 * observed to take. 273 */ 274 #define __hisi_161010101_read_reg(reg) ({ \ 275 u64 _old, _new; \ 276 int _retries = 50; \ 277 \ 278 do { \ 279 _old = read_sysreg(reg); \ 280 _new = read_sysreg(reg); \ 281 _retries--; \ 282 } while (unlikely((_new - _old) >> 5) && _retries); \ 283 \ 284 WARN_ON_ONCE(!_retries); \ 285 _new; \ 286 }) 287 288 static u32 notrace hisi_161010101_read_cntp_tval_el0(void) 289 { 290 return __hisi_161010101_read_reg(cntp_tval_el0); 291 } 292 293 static u32 notrace hisi_161010101_read_cntv_tval_el0(void) 294 { 295 return __hisi_161010101_read_reg(cntv_tval_el0); 296 } 297 298 static u64 notrace hisi_161010101_read_cntpct_el0(void) 299 { 300 return __hisi_161010101_read_reg(cntpct_el0); 301 } 302 303 static u64 notrace hisi_161010101_read_cntvct_el0(void) 304 { 305 return __hisi_161010101_read_reg(cntvct_el0); 306 } 307 308 static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { 309 /* 310 * Note that trailing spaces are required to properly match 311 * the OEM table information. 312 */ 313 { 314 .oem_id = "HISI ", 315 .oem_table_id = "HIP05 ", 316 .oem_revision = 0, 317 }, 318 { 319 .oem_id = "HISI ", 320 .oem_table_id = "HIP06 ", 321 .oem_revision = 0, 322 }, 323 { 324 .oem_id = "HISI ", 325 .oem_table_id = "HIP07 ", 326 .oem_revision = 0, 327 }, 328 { /* Sentinel indicating the end of the OEM array */ }, 329 }; 330 #endif 331 332 #ifdef CONFIG_ARM64_ERRATUM_858921 333 static u64 notrace arm64_858921_read_cntpct_el0(void) 334 { 335 u64 old, new; 336 337 old = read_sysreg(cntpct_el0); 338 new = read_sysreg(cntpct_el0); 339 return (((old ^ new) >> 32) & 1) ? old : new; 340 } 341 342 static u64 notrace arm64_858921_read_cntvct_el0(void) 343 { 344 u64 old, new; 345 346 old = read_sysreg(cntvct_el0); 347 new = read_sysreg(cntvct_el0); 348 return (((old ^ new) >> 32) & 1) ? old : new; 349 } 350 #endif 351 352 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 353 /* 354 * The low bits of the counter registers are indeterminate while bit 10 or 355 * greater is rolling over. Since the counter value can jump both backward 356 * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values 357 * with all ones or all zeros in the low bits. Bound the loop by the maximum 358 * number of CPU cycles in 3 consecutive 24 MHz counter periods. 359 */ 360 #define __sun50i_a64_read_reg(reg) ({ \ 361 u64 _val; \ 362 int _retries = 150; \ 363 \ 364 do { \ 365 _val = read_sysreg(reg); \ 366 _retries--; \ 367 } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \ 368 \ 369 WARN_ON_ONCE(!_retries); \ 370 _val; \ 371 }) 372 373 static u64 notrace sun50i_a64_read_cntpct_el0(void) 374 { 375 return __sun50i_a64_read_reg(cntpct_el0); 376 } 377 378 static u64 notrace sun50i_a64_read_cntvct_el0(void) 379 { 380 return __sun50i_a64_read_reg(cntvct_el0); 381 } 382 383 static u32 notrace sun50i_a64_read_cntp_tval_el0(void) 384 { 385 return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0(); 386 } 387 388 static u32 notrace sun50i_a64_read_cntv_tval_el0(void) 389 { 390 return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0(); 391 } 392 #endif 393 394 #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND 395 DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); 396 EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); 397 398 static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0); 399 400 static void erratum_set_next_event_tval_generic(const int access, unsigned long evt, 401 struct clock_event_device *clk) 402 { 403 unsigned long ctrl; 404 u64 cval; 405 406 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 407 ctrl |= ARCH_TIMER_CTRL_ENABLE; 408 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 409 410 if (access == ARCH_TIMER_PHYS_ACCESS) { 411 cval = evt + arch_counter_get_cntpct_stable(); 412 write_sysreg(cval, cntp_cval_el0); 413 } else { 414 cval = evt + arch_counter_get_cntvct_stable(); 415 write_sysreg(cval, cntv_cval_el0); 416 } 417 418 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 419 } 420 421 static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt, 422 struct clock_event_device *clk) 423 { 424 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk); 425 return 0; 426 } 427 428 static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt, 429 struct clock_event_device *clk) 430 { 431 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk); 432 return 0; 433 } 434 435 static const struct arch_timer_erratum_workaround ool_workarounds[] = { 436 #ifdef CONFIG_FSL_ERRATUM_A008585 437 { 438 .match_type = ate_match_dt, 439 .id = "fsl,erratum-a008585", 440 .desc = "Freescale erratum a005858", 441 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, 442 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, 443 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0, 444 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, 445 .set_next_event_phys = erratum_set_next_event_tval_phys, 446 .set_next_event_virt = erratum_set_next_event_tval_virt, 447 }, 448 #endif 449 #ifdef CONFIG_HISILICON_ERRATUM_161010101 450 { 451 .match_type = ate_match_dt, 452 .id = "hisilicon,erratum-161010101", 453 .desc = "HiSilicon erratum 161010101", 454 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, 455 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, 456 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, 457 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, 458 .set_next_event_phys = erratum_set_next_event_tval_phys, 459 .set_next_event_virt = erratum_set_next_event_tval_virt, 460 }, 461 { 462 .match_type = ate_match_acpi_oem_info, 463 .id = hisi_161010101_oem_info, 464 .desc = "HiSilicon erratum 161010101", 465 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, 466 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, 467 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, 468 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, 469 .set_next_event_phys = erratum_set_next_event_tval_phys, 470 .set_next_event_virt = erratum_set_next_event_tval_virt, 471 }, 472 #endif 473 #ifdef CONFIG_ARM64_ERRATUM_858921 474 { 475 .match_type = ate_match_local_cap_id, 476 .id = (void *)ARM64_WORKAROUND_858921, 477 .desc = "ARM erratum 858921", 478 .read_cntpct_el0 = arm64_858921_read_cntpct_el0, 479 .read_cntvct_el0 = arm64_858921_read_cntvct_el0, 480 }, 481 #endif 482 #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 483 { 484 .match_type = ate_match_dt, 485 .id = "allwinner,erratum-unknown1", 486 .desc = "Allwinner erratum UNKNOWN1", 487 .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0, 488 .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0, 489 .read_cntpct_el0 = sun50i_a64_read_cntpct_el0, 490 .read_cntvct_el0 = sun50i_a64_read_cntvct_el0, 491 .set_next_event_phys = erratum_set_next_event_tval_phys, 492 .set_next_event_virt = erratum_set_next_event_tval_virt, 493 }, 494 #endif 495 #ifdef CONFIG_ARM64_ERRATUM_1418040 496 { 497 .match_type = ate_match_local_cap_id, 498 .id = (void *)ARM64_WORKAROUND_1418040, 499 .desc = "ARM erratum 1418040", 500 .disable_compat_vdso = true, 501 }, 502 #endif 503 }; 504 505 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, 506 const void *); 507 508 static 509 bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa, 510 const void *arg) 511 { 512 const struct device_node *np = arg; 513 514 return of_property_read_bool(np, wa->id); 515 } 516 517 static 518 bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa, 519 const void *arg) 520 { 521 return this_cpu_has_cap((uintptr_t)wa->id); 522 } 523 524 525 static 526 bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa, 527 const void *arg) 528 { 529 static const struct ate_acpi_oem_info empty_oem_info = {}; 530 const struct ate_acpi_oem_info *info = wa->id; 531 const struct acpi_table_header *table = arg; 532 533 /* Iterate over the ACPI OEM info array, looking for a match */ 534 while (memcmp(info, &empty_oem_info, sizeof(*info))) { 535 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && 536 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && 537 info->oem_revision == table->oem_revision) 538 return true; 539 540 info++; 541 } 542 543 return false; 544 } 545 546 static const struct arch_timer_erratum_workaround * 547 arch_timer_iterate_errata(enum arch_timer_erratum_match_type type, 548 ate_match_fn_t match_fn, 549 void *arg) 550 { 551 int i; 552 553 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) { 554 if (ool_workarounds[i].match_type != type) 555 continue; 556 557 if (match_fn(&ool_workarounds[i], arg)) 558 return &ool_workarounds[i]; 559 } 560 561 return NULL; 562 } 563 564 static 565 void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa, 566 bool local) 567 { 568 int i; 569 570 if (local) { 571 __this_cpu_write(timer_unstable_counter_workaround, wa); 572 } else { 573 for_each_possible_cpu(i) 574 per_cpu(timer_unstable_counter_workaround, i) = wa; 575 } 576 577 if (wa->read_cntvct_el0 || wa->read_cntpct_el0) 578 atomic_set(&timer_unstable_counter_workaround_in_use, 1); 579 580 /* 581 * Don't use the vdso fastpath if errata require using the 582 * out-of-line counter accessor. We may change our mind pretty 583 * late in the game (with a per-CPU erratum, for example), so 584 * change both the default value and the vdso itself. 585 */ 586 if (wa->read_cntvct_el0) { 587 clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE; 588 vdso_default = VDSO_CLOCKMODE_NONE; 589 } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) { 590 vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT; 591 clocksource_counter.vdso_clock_mode = vdso_default; 592 } 593 } 594 595 static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type, 596 void *arg) 597 { 598 const struct arch_timer_erratum_workaround *wa, *__wa; 599 ate_match_fn_t match_fn = NULL; 600 bool local = false; 601 602 switch (type) { 603 case ate_match_dt: 604 match_fn = arch_timer_check_dt_erratum; 605 break; 606 case ate_match_local_cap_id: 607 match_fn = arch_timer_check_local_cap_erratum; 608 local = true; 609 break; 610 case ate_match_acpi_oem_info: 611 match_fn = arch_timer_check_acpi_oem_erratum; 612 break; 613 default: 614 WARN_ON(1); 615 return; 616 } 617 618 wa = arch_timer_iterate_errata(type, match_fn, arg); 619 if (!wa) 620 return; 621 622 __wa = __this_cpu_read(timer_unstable_counter_workaround); 623 if (__wa && wa != __wa) 624 pr_warn("Can't enable workaround for %s (clashes with %s\n)", 625 wa->desc, __wa->desc); 626 627 if (__wa) 628 return; 629 630 arch_timer_enable_workaround(wa, local); 631 pr_info("Enabling %s workaround for %s\n", 632 local ? "local" : "global", wa->desc); 633 } 634 635 static bool arch_timer_this_cpu_has_cntvct_wa(void) 636 { 637 return has_erratum_handler(read_cntvct_el0); 638 } 639 640 static bool arch_timer_counter_has_wa(void) 641 { 642 return atomic_read(&timer_unstable_counter_workaround_in_use); 643 } 644 #else 645 #define arch_timer_check_ool_workaround(t,a) do { } while(0) 646 #define arch_timer_this_cpu_has_cntvct_wa() ({false;}) 647 #define arch_timer_counter_has_wa() ({false;}) 648 #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */ 649 650 static __always_inline irqreturn_t timer_handler(const int access, 651 struct clock_event_device *evt) 652 { 653 unsigned long ctrl; 654 655 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt); 656 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 657 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 658 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt); 659 evt->event_handler(evt); 660 return IRQ_HANDLED; 661 } 662 663 return IRQ_NONE; 664 } 665 666 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 667 { 668 struct clock_event_device *evt = dev_id; 669 670 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 671 } 672 673 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 674 { 675 struct clock_event_device *evt = dev_id; 676 677 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 678 } 679 680 static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id) 681 { 682 struct clock_event_device *evt = dev_id; 683 684 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt); 685 } 686 687 static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id) 688 { 689 struct clock_event_device *evt = dev_id; 690 691 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt); 692 } 693 694 static __always_inline int timer_shutdown(const int access, 695 struct clock_event_device *clk) 696 { 697 unsigned long ctrl; 698 699 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 700 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 701 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 702 703 return 0; 704 } 705 706 static int arch_timer_shutdown_virt(struct clock_event_device *clk) 707 { 708 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk); 709 } 710 711 static int arch_timer_shutdown_phys(struct clock_event_device *clk) 712 { 713 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk); 714 } 715 716 static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk) 717 { 718 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk); 719 } 720 721 static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk) 722 { 723 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk); 724 } 725 726 static __always_inline void set_next_event(const int access, unsigned long evt, 727 struct clock_event_device *clk) 728 { 729 unsigned long ctrl; 730 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); 731 ctrl |= ARCH_TIMER_CTRL_ENABLE; 732 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 733 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk); 734 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); 735 } 736 737 static int arch_timer_set_next_event_virt(unsigned long evt, 738 struct clock_event_device *clk) 739 { 740 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk); 741 return 0; 742 } 743 744 static int arch_timer_set_next_event_phys(unsigned long evt, 745 struct clock_event_device *clk) 746 { 747 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk); 748 return 0; 749 } 750 751 static int arch_timer_set_next_event_virt_mem(unsigned long evt, 752 struct clock_event_device *clk) 753 { 754 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk); 755 return 0; 756 } 757 758 static int arch_timer_set_next_event_phys_mem(unsigned long evt, 759 struct clock_event_device *clk) 760 { 761 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk); 762 return 0; 763 } 764 765 static void __arch_timer_setup(unsigned type, 766 struct clock_event_device *clk) 767 { 768 clk->features = CLOCK_EVT_FEAT_ONESHOT; 769 770 if (type == ARCH_TIMER_TYPE_CP15) { 771 typeof(clk->set_next_event) sne; 772 773 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL); 774 775 if (arch_timer_c3stop) 776 clk->features |= CLOCK_EVT_FEAT_C3STOP; 777 clk->name = "arch_sys_timer"; 778 clk->rating = 450; 779 clk->cpumask = cpumask_of(smp_processor_id()); 780 clk->irq = arch_timer_ppi[arch_timer_uses_ppi]; 781 switch (arch_timer_uses_ppi) { 782 case ARCH_TIMER_VIRT_PPI: 783 clk->set_state_shutdown = arch_timer_shutdown_virt; 784 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; 785 sne = erratum_handler(set_next_event_virt); 786 break; 787 case ARCH_TIMER_PHYS_SECURE_PPI: 788 case ARCH_TIMER_PHYS_NONSECURE_PPI: 789 case ARCH_TIMER_HYP_PPI: 790 clk->set_state_shutdown = arch_timer_shutdown_phys; 791 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; 792 sne = erratum_handler(set_next_event_phys); 793 break; 794 default: 795 BUG(); 796 } 797 798 clk->set_next_event = sne; 799 } else { 800 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 801 clk->name = "arch_mem_timer"; 802 clk->rating = 400; 803 clk->cpumask = cpu_possible_mask; 804 if (arch_timer_mem_use_virtual) { 805 clk->set_state_shutdown = arch_timer_shutdown_virt_mem; 806 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; 807 clk->set_next_event = 808 arch_timer_set_next_event_virt_mem; 809 } else { 810 clk->set_state_shutdown = arch_timer_shutdown_phys_mem; 811 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem; 812 clk->set_next_event = 813 arch_timer_set_next_event_phys_mem; 814 } 815 } 816 817 clk->set_state_shutdown(clk); 818 819 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff); 820 } 821 822 static void arch_timer_evtstrm_enable(int divider) 823 { 824 u32 cntkctl = arch_timer_get_cntkctl(); 825 826 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; 827 /* Set the divider and enable virtual event stream */ 828 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) 829 | ARCH_TIMER_VIRT_EVT_EN; 830 arch_timer_set_cntkctl(cntkctl); 831 arch_timer_set_evtstrm_feature(); 832 cpumask_set_cpu(smp_processor_id(), &evtstrm_available); 833 } 834 835 static void arch_timer_configure_evtstream(void) 836 { 837 int evt_stream_div, lsb; 838 839 /* 840 * As the event stream can at most be generated at half the frequency 841 * of the counter, use half the frequency when computing the divider. 842 */ 843 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2; 844 845 /* 846 * Find the closest power of two to the divisor. If the adjacent bit 847 * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1). 848 */ 849 lsb = fls(evt_stream_div) - 1; 850 if (lsb > 0 && (evt_stream_div & BIT(lsb - 1))) 851 lsb++; 852 853 /* enable event stream */ 854 arch_timer_evtstrm_enable(max(0, min(lsb, 15))); 855 } 856 857 static void arch_counter_set_user_access(void) 858 { 859 u32 cntkctl = arch_timer_get_cntkctl(); 860 861 /* Disable user access to the timers and both counters */ 862 /* Also disable virtual event stream */ 863 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN 864 | ARCH_TIMER_USR_VT_ACCESS_EN 865 | ARCH_TIMER_USR_VCT_ACCESS_EN 866 | ARCH_TIMER_VIRT_EVT_EN 867 | ARCH_TIMER_USR_PCT_ACCESS_EN); 868 869 /* 870 * Enable user access to the virtual counter if it doesn't 871 * need to be workaround. The vdso may have been already 872 * disabled though. 873 */ 874 if (arch_timer_this_cpu_has_cntvct_wa()) 875 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id()); 876 else 877 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; 878 879 arch_timer_set_cntkctl(cntkctl); 880 } 881 882 static bool arch_timer_has_nonsecure_ppi(void) 883 { 884 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI && 885 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 886 } 887 888 static u32 check_ppi_trigger(int irq) 889 { 890 u32 flags = irq_get_trigger_type(irq); 891 892 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) { 893 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq); 894 pr_warn("WARNING: Please fix your firmware\n"); 895 flags = IRQF_TRIGGER_LOW; 896 } 897 898 return flags; 899 } 900 901 static int arch_timer_starting_cpu(unsigned int cpu) 902 { 903 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 904 u32 flags; 905 906 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk); 907 908 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]); 909 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags); 910 911 if (arch_timer_has_nonsecure_ppi()) { 912 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 913 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], 914 flags); 915 } 916 917 arch_counter_set_user_access(); 918 if (evtstrm_enable) 919 arch_timer_configure_evtstream(); 920 921 return 0; 922 } 923 924 static int validate_timer_rate(void) 925 { 926 if (!arch_timer_rate) 927 return -EINVAL; 928 929 /* Arch timer frequency < 1MHz can cause trouble */ 930 WARN_ON(arch_timer_rate < 1000000); 931 932 return 0; 933 } 934 935 /* 936 * For historical reasons, when probing with DT we use whichever (non-zero) 937 * rate was probed first, and don't verify that others match. If the first node 938 * probed has a clock-frequency property, this overrides the HW register. 939 */ 940 static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np) 941 { 942 /* Who has more than one independent system counter? */ 943 if (arch_timer_rate) 944 return; 945 946 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) 947 arch_timer_rate = rate; 948 949 /* Check the timer frequency. */ 950 if (validate_timer_rate()) 951 pr_warn("frequency not available\n"); 952 } 953 954 static void __init arch_timer_banner(unsigned type) 955 { 956 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n", 957 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "", 958 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? 959 " and " : "", 960 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "", 961 (unsigned long)arch_timer_rate / 1000000, 962 (unsigned long)(arch_timer_rate / 10000) % 100, 963 type & ARCH_TIMER_TYPE_CP15 ? 964 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" : 965 "", 966 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "", 967 type & ARCH_TIMER_TYPE_MEM ? 968 arch_timer_mem_use_virtual ? "virt" : "phys" : 969 ""); 970 } 971 972 u32 arch_timer_get_rate(void) 973 { 974 return arch_timer_rate; 975 } 976 977 bool arch_timer_evtstrm_available(void) 978 { 979 /* 980 * We might get called from a preemptible context. This is fine 981 * because availability of the event stream should be always the same 982 * for a preemptible context and context where we might resume a task. 983 */ 984 return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available); 985 } 986 987 static u64 arch_counter_get_cntvct_mem(void) 988 { 989 u32 vct_lo, vct_hi, tmp_hi; 990 991 do { 992 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 993 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO); 994 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI); 995 } while (vct_hi != tmp_hi); 996 997 return ((u64) vct_hi << 32) | vct_lo; 998 } 999 1000 static struct arch_timer_kvm_info arch_timer_kvm_info; 1001 1002 struct arch_timer_kvm_info *arch_timer_get_kvm_info(void) 1003 { 1004 return &arch_timer_kvm_info; 1005 } 1006 1007 static void __init arch_counter_register(unsigned type) 1008 { 1009 u64 start_count; 1010 1011 /* Register the CP15 based counter if we have one */ 1012 if (type & ARCH_TIMER_TYPE_CP15) { 1013 u64 (*rd)(void); 1014 1015 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || 1016 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) { 1017 if (arch_timer_counter_has_wa()) 1018 rd = arch_counter_get_cntvct_stable; 1019 else 1020 rd = arch_counter_get_cntvct; 1021 } else { 1022 if (arch_timer_counter_has_wa()) 1023 rd = arch_counter_get_cntpct_stable; 1024 else 1025 rd = arch_counter_get_cntpct; 1026 } 1027 1028 arch_timer_read_counter = rd; 1029 clocksource_counter.vdso_clock_mode = vdso_default; 1030 } else { 1031 arch_timer_read_counter = arch_counter_get_cntvct_mem; 1032 } 1033 1034 if (!arch_counter_suspend_stop) 1035 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; 1036 start_count = arch_timer_read_counter(); 1037 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 1038 cyclecounter.mult = clocksource_counter.mult; 1039 cyclecounter.shift = clocksource_counter.shift; 1040 timecounter_init(&arch_timer_kvm_info.timecounter, 1041 &cyclecounter, start_count); 1042 1043 /* 56 bits minimum, so we assume worst case rollover */ 1044 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); 1045 } 1046 1047 static void arch_timer_stop(struct clock_event_device *clk) 1048 { 1049 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id()); 1050 1051 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]); 1052 if (arch_timer_has_nonsecure_ppi()) 1053 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]); 1054 1055 clk->set_state_shutdown(clk); 1056 } 1057 1058 static int arch_timer_dying_cpu(unsigned int cpu) 1059 { 1060 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); 1061 1062 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); 1063 1064 arch_timer_stop(clk); 1065 return 0; 1066 } 1067 1068 #ifdef CONFIG_CPU_PM 1069 static DEFINE_PER_CPU(unsigned long, saved_cntkctl); 1070 static int arch_timer_cpu_pm_notify(struct notifier_block *self, 1071 unsigned long action, void *hcpu) 1072 { 1073 if (action == CPU_PM_ENTER) { 1074 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl()); 1075 1076 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available); 1077 } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) { 1078 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl)); 1079 1080 if (arch_timer_have_evtstrm_feature()) 1081 cpumask_set_cpu(smp_processor_id(), &evtstrm_available); 1082 } 1083 return NOTIFY_OK; 1084 } 1085 1086 static struct notifier_block arch_timer_cpu_pm_notifier = { 1087 .notifier_call = arch_timer_cpu_pm_notify, 1088 }; 1089 1090 static int __init arch_timer_cpu_pm_init(void) 1091 { 1092 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); 1093 } 1094 1095 static void __init arch_timer_cpu_pm_deinit(void) 1096 { 1097 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier)); 1098 } 1099 1100 #else 1101 static int __init arch_timer_cpu_pm_init(void) 1102 { 1103 return 0; 1104 } 1105 1106 static void __init arch_timer_cpu_pm_deinit(void) 1107 { 1108 } 1109 #endif 1110 1111 static int __init arch_timer_register(void) 1112 { 1113 int err; 1114 int ppi; 1115 1116 arch_timer_evt = alloc_percpu(struct clock_event_device); 1117 if (!arch_timer_evt) { 1118 err = -ENOMEM; 1119 goto out; 1120 } 1121 1122 ppi = arch_timer_ppi[arch_timer_uses_ppi]; 1123 switch (arch_timer_uses_ppi) { 1124 case ARCH_TIMER_VIRT_PPI: 1125 err = request_percpu_irq(ppi, arch_timer_handler_virt, 1126 "arch_timer", arch_timer_evt); 1127 break; 1128 case ARCH_TIMER_PHYS_SECURE_PPI: 1129 case ARCH_TIMER_PHYS_NONSECURE_PPI: 1130 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1131 "arch_timer", arch_timer_evt); 1132 if (!err && arch_timer_has_nonsecure_ppi()) { 1133 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; 1134 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1135 "arch_timer", arch_timer_evt); 1136 if (err) 1137 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI], 1138 arch_timer_evt); 1139 } 1140 break; 1141 case ARCH_TIMER_HYP_PPI: 1142 err = request_percpu_irq(ppi, arch_timer_handler_phys, 1143 "arch_timer", arch_timer_evt); 1144 break; 1145 default: 1146 BUG(); 1147 } 1148 1149 if (err) { 1150 pr_err("can't register interrupt %d (%d)\n", ppi, err); 1151 goto out_free; 1152 } 1153 1154 err = arch_timer_cpu_pm_init(); 1155 if (err) 1156 goto out_unreg_notify; 1157 1158 /* Register and immediately configure the timer on the boot CPU */ 1159 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, 1160 "clockevents/arm/arch_timer:starting", 1161 arch_timer_starting_cpu, arch_timer_dying_cpu); 1162 if (err) 1163 goto out_unreg_cpupm; 1164 return 0; 1165 1166 out_unreg_cpupm: 1167 arch_timer_cpu_pm_deinit(); 1168 1169 out_unreg_notify: 1170 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); 1171 if (arch_timer_has_nonsecure_ppi()) 1172 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI], 1173 arch_timer_evt); 1174 1175 out_free: 1176 free_percpu(arch_timer_evt); 1177 out: 1178 return err; 1179 } 1180 1181 static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq) 1182 { 1183 int ret; 1184 irq_handler_t func; 1185 struct arch_timer *t; 1186 1187 t = kzalloc(sizeof(*t), GFP_KERNEL); 1188 if (!t) 1189 return -ENOMEM; 1190 1191 t->base = base; 1192 t->evt.irq = irq; 1193 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt); 1194 1195 if (arch_timer_mem_use_virtual) 1196 func = arch_timer_handler_virt_mem; 1197 else 1198 func = arch_timer_handler_phys_mem; 1199 1200 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt); 1201 if (ret) { 1202 pr_err("Failed to request mem timer irq\n"); 1203 kfree(t); 1204 } 1205 1206 return ret; 1207 } 1208 1209 static const struct of_device_id arch_timer_of_match[] __initconst = { 1210 { .compatible = "arm,armv7-timer", }, 1211 { .compatible = "arm,armv8-timer", }, 1212 {}, 1213 }; 1214 1215 static const struct of_device_id arch_timer_mem_of_match[] __initconst = { 1216 { .compatible = "arm,armv7-timer-mem", }, 1217 {}, 1218 }; 1219 1220 static bool __init arch_timer_needs_of_probing(void) 1221 { 1222 struct device_node *dn; 1223 bool needs_probing = false; 1224 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM; 1225 1226 /* We have two timers, and both device-tree nodes are probed. */ 1227 if ((arch_timers_present & mask) == mask) 1228 return false; 1229 1230 /* 1231 * Only one type of timer is probed, 1232 * check if we have another type of timer node in device-tree. 1233 */ 1234 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) 1235 dn = of_find_matching_node(NULL, arch_timer_mem_of_match); 1236 else 1237 dn = of_find_matching_node(NULL, arch_timer_of_match); 1238 1239 if (dn && of_device_is_available(dn)) 1240 needs_probing = true; 1241 1242 of_node_put(dn); 1243 1244 return needs_probing; 1245 } 1246 1247 static int __init arch_timer_common_init(void) 1248 { 1249 arch_timer_banner(arch_timers_present); 1250 arch_counter_register(arch_timers_present); 1251 return arch_timer_arch_init(); 1252 } 1253 1254 /** 1255 * arch_timer_select_ppi() - Select suitable PPI for the current system. 1256 * 1257 * If HYP mode is available, we know that the physical timer 1258 * has been configured to be accessible from PL1. Use it, so 1259 * that a guest can use the virtual timer instead. 1260 * 1261 * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE 1262 * accesses to CNTP_*_EL1 registers are silently redirected to 1263 * their CNTHP_*_EL2 counterparts, and use a different PPI 1264 * number. 1265 * 1266 * If no interrupt provided for virtual timer, we'll have to 1267 * stick to the physical timer. It'd better be accessible... 1268 * For arm64 we never use the secure interrupt. 1269 * 1270 * Return: a suitable PPI type for the current system. 1271 */ 1272 static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void) 1273 { 1274 if (is_kernel_in_hyp_mode()) 1275 return ARCH_TIMER_HYP_PPI; 1276 1277 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI]) 1278 return ARCH_TIMER_VIRT_PPI; 1279 1280 if (IS_ENABLED(CONFIG_ARM64)) 1281 return ARCH_TIMER_PHYS_NONSECURE_PPI; 1282 1283 return ARCH_TIMER_PHYS_SECURE_PPI; 1284 } 1285 1286 static void __init arch_timer_populate_kvm_info(void) 1287 { 1288 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; 1289 if (is_kernel_in_hyp_mode()) 1290 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]; 1291 } 1292 1293 static int __init arch_timer_of_init(struct device_node *np) 1294 { 1295 int i, irq, ret; 1296 u32 rate; 1297 bool has_names; 1298 1299 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1300 pr_warn("multiple nodes in dt, skipping\n"); 1301 return 0; 1302 } 1303 1304 arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1305 1306 has_names = of_property_read_bool(np, "interrupt-names"); 1307 1308 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) { 1309 if (has_names) 1310 irq = of_irq_get_byname(np, arch_timer_ppi_names[i]); 1311 else 1312 irq = of_irq_get(np, i); 1313 if (irq > 0) 1314 arch_timer_ppi[i] = irq; 1315 } 1316 1317 arch_timer_populate_kvm_info(); 1318 1319 rate = arch_timer_get_cntfrq(); 1320 arch_timer_of_configure_rate(rate, np); 1321 1322 arch_timer_c3stop = !of_property_read_bool(np, "always-on"); 1323 1324 /* Check for globally applicable workarounds */ 1325 arch_timer_check_ool_workaround(ate_match_dt, np); 1326 1327 /* 1328 * If we cannot rely on firmware initializing the timer registers then 1329 * we should use the physical timers instead. 1330 */ 1331 if (IS_ENABLED(CONFIG_ARM) && 1332 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) 1333 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI; 1334 else 1335 arch_timer_uses_ppi = arch_timer_select_ppi(); 1336 1337 if (!arch_timer_ppi[arch_timer_uses_ppi]) { 1338 pr_err("No interrupt available, giving up\n"); 1339 return -EINVAL; 1340 } 1341 1342 /* On some systems, the counter stops ticking when in suspend. */ 1343 arch_counter_suspend_stop = of_property_read_bool(np, 1344 "arm,no-tick-in-suspend"); 1345 1346 ret = arch_timer_register(); 1347 if (ret) 1348 return ret; 1349 1350 if (arch_timer_needs_of_probing()) 1351 return 0; 1352 1353 return arch_timer_common_init(); 1354 } 1355 TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); 1356 TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init); 1357 1358 static u32 __init 1359 arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame) 1360 { 1361 void __iomem *base; 1362 u32 rate; 1363 1364 base = ioremap(frame->cntbase, frame->size); 1365 if (!base) { 1366 pr_err("Unable to map frame @ %pa\n", &frame->cntbase); 1367 return 0; 1368 } 1369 1370 rate = readl_relaxed(base + CNTFRQ); 1371 1372 iounmap(base); 1373 1374 return rate; 1375 } 1376 1377 static struct arch_timer_mem_frame * __init 1378 arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) 1379 { 1380 struct arch_timer_mem_frame *frame, *best_frame = NULL; 1381 void __iomem *cntctlbase; 1382 u32 cnttidr; 1383 int i; 1384 1385 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size); 1386 if (!cntctlbase) { 1387 pr_err("Can't map CNTCTLBase @ %pa\n", 1388 &timer_mem->cntctlbase); 1389 return NULL; 1390 } 1391 1392 cnttidr = readl_relaxed(cntctlbase + CNTTIDR); 1393 1394 /* 1395 * Try to find a virtual capable frame. Otherwise fall back to a 1396 * physical capable frame. 1397 */ 1398 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1399 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | 1400 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; 1401 1402 frame = &timer_mem->frame[i]; 1403 if (!frame->valid) 1404 continue; 1405 1406 /* Try enabling everything, and see what sticks */ 1407 writel_relaxed(cntacr, cntctlbase + CNTACR(i)); 1408 cntacr = readl_relaxed(cntctlbase + CNTACR(i)); 1409 1410 if ((cnttidr & CNTTIDR_VIRT(i)) && 1411 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) { 1412 best_frame = frame; 1413 arch_timer_mem_use_virtual = true; 1414 break; 1415 } 1416 1417 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) 1418 continue; 1419 1420 best_frame = frame; 1421 } 1422 1423 iounmap(cntctlbase); 1424 1425 return best_frame; 1426 } 1427 1428 static int __init 1429 arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame) 1430 { 1431 void __iomem *base; 1432 int ret, irq = 0; 1433 1434 if (arch_timer_mem_use_virtual) 1435 irq = frame->virt_irq; 1436 else 1437 irq = frame->phys_irq; 1438 1439 if (!irq) { 1440 pr_err("Frame missing %s irq.\n", 1441 arch_timer_mem_use_virtual ? "virt" : "phys"); 1442 return -EINVAL; 1443 } 1444 1445 if (!request_mem_region(frame->cntbase, frame->size, 1446 "arch_mem_timer")) 1447 return -EBUSY; 1448 1449 base = ioremap(frame->cntbase, frame->size); 1450 if (!base) { 1451 pr_err("Can't map frame's registers\n"); 1452 return -ENXIO; 1453 } 1454 1455 ret = arch_timer_mem_register(base, irq); 1456 if (ret) { 1457 iounmap(base); 1458 return ret; 1459 } 1460 1461 arch_counter_base = base; 1462 arch_timers_present |= ARCH_TIMER_TYPE_MEM; 1463 1464 return 0; 1465 } 1466 1467 static int __init arch_timer_mem_of_init(struct device_node *np) 1468 { 1469 struct arch_timer_mem *timer_mem; 1470 struct arch_timer_mem_frame *frame; 1471 struct device_node *frame_node; 1472 struct resource res; 1473 int ret = -EINVAL; 1474 u32 rate; 1475 1476 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL); 1477 if (!timer_mem) 1478 return -ENOMEM; 1479 1480 if (of_address_to_resource(np, 0, &res)) 1481 goto out; 1482 timer_mem->cntctlbase = res.start; 1483 timer_mem->size = resource_size(&res); 1484 1485 for_each_available_child_of_node(np, frame_node) { 1486 u32 n; 1487 struct arch_timer_mem_frame *frame; 1488 1489 if (of_property_read_u32(frame_node, "frame-number", &n)) { 1490 pr_err(FW_BUG "Missing frame-number.\n"); 1491 of_node_put(frame_node); 1492 goto out; 1493 } 1494 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) { 1495 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n", 1496 ARCH_TIMER_MEM_MAX_FRAMES - 1); 1497 of_node_put(frame_node); 1498 goto out; 1499 } 1500 frame = &timer_mem->frame[n]; 1501 1502 if (frame->valid) { 1503 pr_err(FW_BUG "Duplicated frame-number.\n"); 1504 of_node_put(frame_node); 1505 goto out; 1506 } 1507 1508 if (of_address_to_resource(frame_node, 0, &res)) { 1509 of_node_put(frame_node); 1510 goto out; 1511 } 1512 frame->cntbase = res.start; 1513 frame->size = resource_size(&res); 1514 1515 frame->virt_irq = irq_of_parse_and_map(frame_node, 1516 ARCH_TIMER_VIRT_SPI); 1517 frame->phys_irq = irq_of_parse_and_map(frame_node, 1518 ARCH_TIMER_PHYS_SPI); 1519 1520 frame->valid = true; 1521 } 1522 1523 frame = arch_timer_mem_find_best_frame(timer_mem); 1524 if (!frame) { 1525 pr_err("Unable to find a suitable frame in timer @ %pa\n", 1526 &timer_mem->cntctlbase); 1527 ret = -EINVAL; 1528 goto out; 1529 } 1530 1531 rate = arch_timer_mem_frame_get_cntfrq(frame); 1532 arch_timer_of_configure_rate(rate, np); 1533 1534 ret = arch_timer_mem_frame_register(frame); 1535 if (!ret && !arch_timer_needs_of_probing()) 1536 ret = arch_timer_common_init(); 1537 out: 1538 kfree(timer_mem); 1539 return ret; 1540 } 1541 TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", 1542 arch_timer_mem_of_init); 1543 1544 #ifdef CONFIG_ACPI_GTDT 1545 static int __init 1546 arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) 1547 { 1548 struct arch_timer_mem_frame *frame; 1549 u32 rate; 1550 int i; 1551 1552 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) { 1553 frame = &timer_mem->frame[i]; 1554 1555 if (!frame->valid) 1556 continue; 1557 1558 rate = arch_timer_mem_frame_get_cntfrq(frame); 1559 if (rate == arch_timer_rate) 1560 continue; 1561 1562 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n", 1563 &frame->cntbase, 1564 (unsigned long)rate, (unsigned long)arch_timer_rate); 1565 1566 return -EINVAL; 1567 } 1568 1569 return 0; 1570 } 1571 1572 static int __init arch_timer_mem_acpi_init(int platform_timer_count) 1573 { 1574 struct arch_timer_mem *timers, *timer; 1575 struct arch_timer_mem_frame *frame, *best_frame = NULL; 1576 int timer_count, i, ret = 0; 1577 1578 timers = kcalloc(platform_timer_count, sizeof(*timers), 1579 GFP_KERNEL); 1580 if (!timers) 1581 return -ENOMEM; 1582 1583 ret = acpi_arch_timer_mem_init(timers, &timer_count); 1584 if (ret || !timer_count) 1585 goto out; 1586 1587 /* 1588 * While unlikely, it's theoretically possible that none of the frames 1589 * in a timer expose the combination of feature we want. 1590 */ 1591 for (i = 0; i < timer_count; i++) { 1592 timer = &timers[i]; 1593 1594 frame = arch_timer_mem_find_best_frame(timer); 1595 if (!best_frame) 1596 best_frame = frame; 1597 1598 ret = arch_timer_mem_verify_cntfrq(timer); 1599 if (ret) { 1600 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); 1601 goto out; 1602 } 1603 1604 if (!best_frame) /* implies !frame */ 1605 /* 1606 * Only complain about missing suitable frames if we 1607 * haven't already found one in a previous iteration. 1608 */ 1609 pr_err("Unable to find a suitable frame in timer @ %pa\n", 1610 &timer->cntctlbase); 1611 } 1612 1613 if (best_frame) 1614 ret = arch_timer_mem_frame_register(best_frame); 1615 out: 1616 kfree(timers); 1617 return ret; 1618 } 1619 1620 /* Initialize per-processor generic timer and memory-mapped timer(if present) */ 1621 static int __init arch_timer_acpi_init(struct acpi_table_header *table) 1622 { 1623 int ret, platform_timer_count; 1624 1625 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) { 1626 pr_warn("already initialized, skipping\n"); 1627 return -EINVAL; 1628 } 1629 1630 arch_timers_present |= ARCH_TIMER_TYPE_CP15; 1631 1632 ret = acpi_gtdt_init(table, &platform_timer_count); 1633 if (ret) 1634 return ret; 1635 1636 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] = 1637 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI); 1638 1639 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] = 1640 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI); 1641 1642 arch_timer_ppi[ARCH_TIMER_HYP_PPI] = 1643 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); 1644 1645 arch_timer_populate_kvm_info(); 1646 1647 /* 1648 * When probing via ACPI, we have no mechanism to override the sysreg 1649 * CNTFRQ value. This *must* be correct. 1650 */ 1651 arch_timer_rate = arch_timer_get_cntfrq(); 1652 ret = validate_timer_rate(); 1653 if (ret) { 1654 pr_err(FW_BUG "frequency not available.\n"); 1655 return ret; 1656 } 1657 1658 arch_timer_uses_ppi = arch_timer_select_ppi(); 1659 if (!arch_timer_ppi[arch_timer_uses_ppi]) { 1660 pr_err("No interrupt available, giving up\n"); 1661 return -EINVAL; 1662 } 1663 1664 /* Always-on capability */ 1665 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi); 1666 1667 /* Check for globally applicable workarounds */ 1668 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table); 1669 1670 ret = arch_timer_register(); 1671 if (ret) 1672 return ret; 1673 1674 if (platform_timer_count && 1675 arch_timer_mem_acpi_init(platform_timer_count)) 1676 pr_err("Failed to initialize memory-mapped timer.\n"); 1677 1678 return arch_timer_common_init(); 1679 } 1680 TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init); 1681 #endif 1682 1683 int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *ts, 1684 struct clocksource **cs) 1685 { 1686 struct arm_smccc_res hvc_res; 1687 u32 ptp_counter; 1688 ktime_t ktime; 1689 1690 if (!IS_ENABLED(CONFIG_HAVE_ARM_SMCCC_DISCOVERY)) 1691 return -EOPNOTSUPP; 1692 1693 if (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) 1694 ptp_counter = KVM_PTP_VIRT_COUNTER; 1695 else 1696 ptp_counter = KVM_PTP_PHYS_COUNTER; 1697 1698 arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID, 1699 ptp_counter, &hvc_res); 1700 1701 if ((int)(hvc_res.a0) < 0) 1702 return -EOPNOTSUPP; 1703 1704 ktime = (u64)hvc_res.a0 << 32 | hvc_res.a1; 1705 *ts = ktime_to_timespec64(ktime); 1706 if (cycle) 1707 *cycle = (u64)hvc_res.a2 << 32 | hvc_res.a3; 1708 if (cs) 1709 *cs = &clocksource_counter; 1710 1711 return 0; 1712 } 1713 EXPORT_SYMBOL_GPL(kvm_arch_ptp_get_crosststamp); 1714