1 /* 2 * linux/arch/arm/kernel/arch_timer.c 3 * 4 * Copyright (C) 2011 ARM Ltd. 5 * All Rights Reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/smp.h> 16 #include <linux/cpu.h> 17 #include <linux/jiffies.h> 18 #include <linux/clockchips.h> 19 #include <linux/interrupt.h> 20 #include <linux/of_irq.h> 21 #include <linux/io.h> 22 23 #include <asm/cputype.h> 24 #include <asm/delay.h> 25 #include <asm/localtimer.h> 26 #include <asm/arch_timer.h> 27 #include <asm/system_info.h> 28 #include <asm/sched_clock.h> 29 30 static unsigned long arch_timer_rate; 31 32 enum ppi_nr { 33 PHYS_SECURE_PPI, 34 PHYS_NONSECURE_PPI, 35 VIRT_PPI, 36 HYP_PPI, 37 MAX_TIMER_PPI 38 }; 39 40 static int arch_timer_ppi[MAX_TIMER_PPI]; 41 42 static struct clock_event_device __percpu **arch_timer_evt; 43 static struct delay_timer arch_delay_timer; 44 45 static bool arch_timer_use_virtual = true; 46 47 /* 48 * Architected system timer support. 49 */ 50 51 #define ARCH_TIMER_CTRL_ENABLE (1 << 0) 52 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1) 53 #define ARCH_TIMER_CTRL_IT_STAT (1 << 2) 54 55 #define ARCH_TIMER_REG_CTRL 0 56 #define ARCH_TIMER_REG_FREQ 1 57 #define ARCH_TIMER_REG_TVAL 2 58 59 #define ARCH_TIMER_PHYS_ACCESS 0 60 #define ARCH_TIMER_VIRT_ACCESS 1 61 62 /* 63 * These register accessors are marked inline so the compiler can 64 * nicely work out which register we want, and chuck away the rest of 65 * the code. At least it does so with a recent GCC (4.6.3). 66 */ 67 static inline void arch_timer_reg_write(const int access, const int reg, u32 val) 68 { 69 if (access == ARCH_TIMER_PHYS_ACCESS) { 70 switch (reg) { 71 case ARCH_TIMER_REG_CTRL: 72 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val)); 73 break; 74 case ARCH_TIMER_REG_TVAL: 75 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 76 break; 77 } 78 } 79 80 if (access == ARCH_TIMER_VIRT_ACCESS) { 81 switch (reg) { 82 case ARCH_TIMER_REG_CTRL: 83 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); 84 break; 85 case ARCH_TIMER_REG_TVAL: 86 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val)); 87 break; 88 } 89 } 90 91 isb(); 92 } 93 94 static inline u32 arch_timer_reg_read(const int access, const int reg) 95 { 96 u32 val = 0; 97 98 if (access == ARCH_TIMER_PHYS_ACCESS) { 99 switch (reg) { 100 case ARCH_TIMER_REG_CTRL: 101 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val)); 102 break; 103 case ARCH_TIMER_REG_TVAL: 104 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 105 break; 106 case ARCH_TIMER_REG_FREQ: 107 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val)); 108 break; 109 } 110 } 111 112 if (access == ARCH_TIMER_VIRT_ACCESS) { 113 switch (reg) { 114 case ARCH_TIMER_REG_CTRL: 115 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); 116 break; 117 case ARCH_TIMER_REG_TVAL: 118 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val)); 119 break; 120 } 121 } 122 123 return val; 124 } 125 126 static inline cycle_t arch_timer_counter_read(const int access) 127 { 128 cycle_t cval = 0; 129 130 if (access == ARCH_TIMER_PHYS_ACCESS) 131 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval)); 132 133 if (access == ARCH_TIMER_VIRT_ACCESS) 134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval)); 135 136 return cval; 137 } 138 139 static inline cycle_t arch_counter_get_cntpct(void) 140 { 141 return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS); 142 } 143 144 static inline cycle_t arch_counter_get_cntvct(void) 145 { 146 return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS); 147 } 148 149 static irqreturn_t inline timer_handler(const int access, 150 struct clock_event_device *evt) 151 { 152 unsigned long ctrl; 153 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 154 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 155 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 156 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 157 evt->event_handler(evt); 158 return IRQ_HANDLED; 159 } 160 161 return IRQ_NONE; 162 } 163 164 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 165 { 166 struct clock_event_device *evt = *(struct clock_event_device **)dev_id; 167 168 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 169 } 170 171 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 172 { 173 struct clock_event_device *evt = *(struct clock_event_device **)dev_id; 174 175 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 176 } 177 178 static inline void timer_set_mode(const int access, int mode) 179 { 180 unsigned long ctrl; 181 switch (mode) { 182 case CLOCK_EVT_MODE_UNUSED: 183 case CLOCK_EVT_MODE_SHUTDOWN: 184 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 185 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 186 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 187 break; 188 default: 189 break; 190 } 191 } 192 193 static void arch_timer_set_mode_virt(enum clock_event_mode mode, 194 struct clock_event_device *clk) 195 { 196 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); 197 } 198 199 static void arch_timer_set_mode_phys(enum clock_event_mode mode, 200 struct clock_event_device *clk) 201 { 202 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); 203 } 204 205 static inline void set_next_event(const int access, unsigned long evt) 206 { 207 unsigned long ctrl; 208 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 209 ctrl |= ARCH_TIMER_CTRL_ENABLE; 210 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 211 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); 212 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 213 } 214 215 static int arch_timer_set_next_event_virt(unsigned long evt, 216 struct clock_event_device *unused) 217 { 218 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); 219 return 0; 220 } 221 222 static int arch_timer_set_next_event_phys(unsigned long evt, 223 struct clock_event_device *unused) 224 { 225 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); 226 return 0; 227 } 228 229 static int __cpuinit arch_timer_setup(struct clock_event_device *clk) 230 { 231 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 232 clk->name = "arch_sys_timer"; 233 clk->rating = 450; 234 if (arch_timer_use_virtual) { 235 clk->irq = arch_timer_ppi[VIRT_PPI]; 236 clk->set_mode = arch_timer_set_mode_virt; 237 clk->set_next_event = arch_timer_set_next_event_virt; 238 } else { 239 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 240 clk->set_mode = arch_timer_set_mode_phys; 241 clk->set_next_event = arch_timer_set_next_event_phys; 242 } 243 244 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); 245 246 clockevents_config_and_register(clk, arch_timer_rate, 247 0xf, 0x7fffffff); 248 249 *__this_cpu_ptr(arch_timer_evt) = clk; 250 251 if (arch_timer_use_virtual) 252 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); 253 else { 254 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); 255 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 256 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 257 } 258 259 return 0; 260 } 261 262 /* Is the optional system timer available? */ 263 static int local_timer_is_architected(void) 264 { 265 return (cpu_architecture() >= CPU_ARCH_ARMv7) && 266 ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1; 267 } 268 269 static int arch_timer_available(void) 270 { 271 unsigned long freq; 272 273 if (!local_timer_is_architected()) 274 return -ENXIO; 275 276 if (arch_timer_rate == 0) { 277 freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS, 278 ARCH_TIMER_REG_FREQ); 279 280 /* Check the timer frequency. */ 281 if (freq == 0) { 282 pr_warn("Architected timer frequency not available\n"); 283 return -EINVAL; 284 } 285 286 arch_timer_rate = freq; 287 } 288 289 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", 290 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100, 291 arch_timer_use_virtual ? "virt" : "phys"); 292 return 0; 293 } 294 295 static u32 notrace arch_counter_get_cntpct32(void) 296 { 297 cycle_t cnt = arch_counter_get_cntpct(); 298 299 /* 300 * The sched_clock infrastructure only knows about counters 301 * with at most 32bits. Forget about the upper 24 bits for the 302 * time being... 303 */ 304 return (u32)cnt; 305 } 306 307 static u32 notrace arch_counter_get_cntvct32(void) 308 { 309 cycle_t cnt = arch_counter_get_cntvct(); 310 311 /* 312 * The sched_clock infrastructure only knows about counters 313 * with at most 32bits. Forget about the upper 24 bits for the 314 * time being... 315 */ 316 return (u32)cnt; 317 } 318 319 static cycle_t arch_counter_read(struct clocksource *cs) 320 { 321 /* 322 * Always use the physical counter for the clocksource. 323 * CNTHCTL.PL1PCTEN must be set to 1. 324 */ 325 return arch_counter_get_cntpct(); 326 } 327 328 static unsigned long arch_timer_read_current_timer(void) 329 { 330 return arch_counter_get_cntpct(); 331 } 332 333 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 334 { 335 /* 336 * Always use the physical counter for the clocksource. 337 * CNTHCTL.PL1PCTEN must be set to 1. 338 */ 339 return arch_counter_get_cntpct(); 340 } 341 342 static struct clocksource clocksource_counter = { 343 .name = "arch_sys_counter", 344 .rating = 400, 345 .read = arch_counter_read, 346 .mask = CLOCKSOURCE_MASK(56), 347 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 348 }; 349 350 static struct cyclecounter cyclecounter = { 351 .read = arch_counter_read_cc, 352 .mask = CLOCKSOURCE_MASK(56), 353 }; 354 355 static struct timecounter timecounter; 356 357 struct timecounter *arch_timer_get_timecounter(void) 358 { 359 return &timecounter; 360 } 361 362 static void __cpuinit arch_timer_stop(struct clock_event_device *clk) 363 { 364 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 365 clk->irq, smp_processor_id()); 366 367 if (arch_timer_use_virtual) 368 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); 369 else { 370 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); 371 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 372 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); 373 } 374 375 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); 376 } 377 378 static struct local_timer_ops arch_timer_ops __cpuinitdata = { 379 .setup = arch_timer_setup, 380 .stop = arch_timer_stop, 381 }; 382 383 static struct clock_event_device arch_timer_global_evt; 384 385 static int __init arch_timer_register(void) 386 { 387 int err; 388 int ppi; 389 390 err = arch_timer_available(); 391 if (err) 392 goto out; 393 394 arch_timer_evt = alloc_percpu(struct clock_event_device *); 395 if (!arch_timer_evt) { 396 err = -ENOMEM; 397 goto out; 398 } 399 400 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 401 cyclecounter.mult = clocksource_counter.mult; 402 cyclecounter.shift = clocksource_counter.shift; 403 timecounter_init(&timecounter, &cyclecounter, 404 arch_counter_get_cntpct()); 405 406 if (arch_timer_use_virtual) { 407 ppi = arch_timer_ppi[VIRT_PPI]; 408 err = request_percpu_irq(ppi, arch_timer_handler_virt, 409 "arch_timer", arch_timer_evt); 410 } else { 411 ppi = arch_timer_ppi[PHYS_SECURE_PPI]; 412 err = request_percpu_irq(ppi, arch_timer_handler_phys, 413 "arch_timer", arch_timer_evt); 414 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { 415 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; 416 err = request_percpu_irq(ppi, arch_timer_handler_phys, 417 "arch_timer", arch_timer_evt); 418 if (err) 419 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 420 arch_timer_evt); 421 } 422 } 423 424 if (err) { 425 pr_err("arch_timer: can't register interrupt %d (%d)\n", 426 ppi, err); 427 goto out_free; 428 } 429 430 err = local_timer_register(&arch_timer_ops); 431 if (err) { 432 /* 433 * We couldn't register as a local timer (could be 434 * because we're on a UP platform, or because some 435 * other local timer is already present...). Try as a 436 * global timer instead. 437 */ 438 arch_timer_global_evt.cpumask = cpumask_of(0); 439 err = arch_timer_setup(&arch_timer_global_evt); 440 } 441 if (err) 442 goto out_free_irq; 443 444 /* Use the architected timer for the delay loop. */ 445 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; 446 arch_delay_timer.freq = arch_timer_rate; 447 register_current_timer_delay(&arch_delay_timer); 448 return 0; 449 450 out_free_irq: 451 if (arch_timer_use_virtual) 452 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); 453 else { 454 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 455 arch_timer_evt); 456 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 457 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 458 arch_timer_evt); 459 } 460 461 out_free: 462 free_percpu(arch_timer_evt); 463 out: 464 return err; 465 } 466 467 static const struct of_device_id arch_timer_of_match[] __initconst = { 468 { .compatible = "arm,armv7-timer", }, 469 {}, 470 }; 471 472 int __init arch_timer_of_register(void) 473 { 474 struct device_node *np; 475 u32 freq; 476 int i; 477 478 np = of_find_matching_node(NULL, arch_timer_of_match); 479 if (!np) { 480 pr_err("arch_timer: can't find DT node\n"); 481 return -ENODEV; 482 } 483 484 /* Try to determine the frequency from the device tree or CNTFRQ */ 485 if (!of_property_read_u32(np, "clock-frequency", &freq)) 486 arch_timer_rate = freq; 487 488 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 489 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 490 491 /* 492 * If no interrupt provided for virtual timer, we'll have to 493 * stick to the physical timer. It'd better be accessible... 494 */ 495 if (!arch_timer_ppi[VIRT_PPI]) { 496 arch_timer_use_virtual = false; 497 498 if (!arch_timer_ppi[PHYS_SECURE_PPI] || 499 !arch_timer_ppi[PHYS_NONSECURE_PPI]) { 500 pr_warn("arch_timer: No interrupt available, giving up\n"); 501 return -EINVAL; 502 } 503 } 504 505 return arch_timer_register(); 506 } 507 508 int __init arch_timer_sched_clock_init(void) 509 { 510 u32 (*cnt32)(void); 511 int err; 512 513 err = arch_timer_available(); 514 if (err) 515 return err; 516 517 if (arch_timer_use_virtual) 518 cnt32 = arch_counter_get_cntvct32; 519 else 520 cnt32 = arch_counter_get_cntpct32; 521 522 setup_sched_clock(cnt32, 32, arch_timer_rate); 523 return 0; 524 } 525