1 /* 2 * linux/drivers/clocksource/arm_arch_timer.c 3 * 4 * Copyright (C) 2011 ARM Ltd. 5 * All Rights Reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/smp.h> 15 #include <linux/cpu.h> 16 #include <linux/clockchips.h> 17 #include <linux/interrupt.h> 18 #include <linux/of_irq.h> 19 #include <linux/io.h> 20 21 #include <asm/arch_timer.h> 22 #include <asm/virt.h> 23 24 #include <clocksource/arm_arch_timer.h> 25 26 static u32 arch_timer_rate; 27 28 enum ppi_nr { 29 PHYS_SECURE_PPI, 30 PHYS_NONSECURE_PPI, 31 VIRT_PPI, 32 HYP_PPI, 33 MAX_TIMER_PPI 34 }; 35 36 static int arch_timer_ppi[MAX_TIMER_PPI]; 37 38 static struct clock_event_device __percpu *arch_timer_evt; 39 40 static bool arch_timer_use_virtual = true; 41 42 /* 43 * Architected system timer support. 44 */ 45 46 static inline irqreturn_t timer_handler(const int access, 47 struct clock_event_device *evt) 48 { 49 unsigned long ctrl; 50 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 51 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) { 52 ctrl |= ARCH_TIMER_CTRL_IT_MASK; 53 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 54 evt->event_handler(evt); 55 return IRQ_HANDLED; 56 } 57 58 return IRQ_NONE; 59 } 60 61 static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id) 62 { 63 struct clock_event_device *evt = dev_id; 64 65 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt); 66 } 67 68 static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id) 69 { 70 struct clock_event_device *evt = dev_id; 71 72 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt); 73 } 74 75 static inline void timer_set_mode(const int access, int mode) 76 { 77 unsigned long ctrl; 78 switch (mode) { 79 case CLOCK_EVT_MODE_UNUSED: 80 case CLOCK_EVT_MODE_SHUTDOWN: 81 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 82 ctrl &= ~ARCH_TIMER_CTRL_ENABLE; 83 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 84 break; 85 default: 86 break; 87 } 88 } 89 90 static void arch_timer_set_mode_virt(enum clock_event_mode mode, 91 struct clock_event_device *clk) 92 { 93 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode); 94 } 95 96 static void arch_timer_set_mode_phys(enum clock_event_mode mode, 97 struct clock_event_device *clk) 98 { 99 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode); 100 } 101 102 static inline void set_next_event(const int access, unsigned long evt) 103 { 104 unsigned long ctrl; 105 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL); 106 ctrl |= ARCH_TIMER_CTRL_ENABLE; 107 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; 108 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt); 109 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl); 110 } 111 112 static int arch_timer_set_next_event_virt(unsigned long evt, 113 struct clock_event_device *unused) 114 { 115 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt); 116 return 0; 117 } 118 119 static int arch_timer_set_next_event_phys(unsigned long evt, 120 struct clock_event_device *unused) 121 { 122 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt); 123 return 0; 124 } 125 126 static int __cpuinit arch_timer_setup(struct clock_event_device *clk) 127 { 128 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; 129 clk->name = "arch_sys_timer"; 130 clk->rating = 450; 131 if (arch_timer_use_virtual) { 132 clk->irq = arch_timer_ppi[VIRT_PPI]; 133 clk->set_mode = arch_timer_set_mode_virt; 134 clk->set_next_event = arch_timer_set_next_event_virt; 135 } else { 136 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; 137 clk->set_mode = arch_timer_set_mode_phys; 138 clk->set_next_event = arch_timer_set_next_event_phys; 139 } 140 141 clk->cpumask = cpumask_of(smp_processor_id()); 142 143 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL); 144 145 clockevents_config_and_register(clk, arch_timer_rate, 146 0xf, 0x7fffffff); 147 148 if (arch_timer_use_virtual) 149 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0); 150 else { 151 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0); 152 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 153 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0); 154 } 155 156 arch_counter_set_user_access(); 157 158 return 0; 159 } 160 161 static int arch_timer_available(void) 162 { 163 u32 freq; 164 165 if (arch_timer_rate == 0) { 166 freq = arch_timer_get_cntfrq(); 167 168 /* Check the timer frequency. */ 169 if (freq == 0) { 170 pr_warn("Architected timer frequency not available\n"); 171 return -EINVAL; 172 } 173 174 arch_timer_rate = freq; 175 } 176 177 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n", 178 (unsigned long)arch_timer_rate / 1000000, 179 (unsigned long)(arch_timer_rate / 10000) % 100, 180 arch_timer_use_virtual ? "virt" : "phys"); 181 return 0; 182 } 183 184 u32 arch_timer_get_rate(void) 185 { 186 return arch_timer_rate; 187 } 188 189 /* 190 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to 191 * call it before it has been initialised. Rather than incur a performance 192 * penalty checking for initialisation, provide a default implementation that 193 * won't lead to time appearing to jump backwards. 194 */ 195 static u64 arch_timer_read_zero(void) 196 { 197 return 0; 198 } 199 200 u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero; 201 202 static cycle_t arch_counter_read(struct clocksource *cs) 203 { 204 return arch_timer_read_counter(); 205 } 206 207 static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 208 { 209 return arch_timer_read_counter(); 210 } 211 212 static struct clocksource clocksource_counter = { 213 .name = "arch_sys_counter", 214 .rating = 400, 215 .read = arch_counter_read, 216 .mask = CLOCKSOURCE_MASK(56), 217 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 218 }; 219 220 static struct cyclecounter cyclecounter = { 221 .read = arch_counter_read_cc, 222 .mask = CLOCKSOURCE_MASK(56), 223 }; 224 225 static struct timecounter timecounter; 226 227 struct timecounter *arch_timer_get_timecounter(void) 228 { 229 return &timecounter; 230 } 231 232 static void __cpuinit arch_timer_stop(struct clock_event_device *clk) 233 { 234 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n", 235 clk->irq, smp_processor_id()); 236 237 if (arch_timer_use_virtual) 238 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]); 239 else { 240 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]); 241 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 242 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]); 243 } 244 245 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk); 246 } 247 248 static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self, 249 unsigned long action, void *hcpu) 250 { 251 /* 252 * Grab cpu pointer in each case to avoid spurious 253 * preemptible warnings 254 */ 255 switch (action & ~CPU_TASKS_FROZEN) { 256 case CPU_STARTING: 257 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 258 break; 259 case CPU_DYING: 260 arch_timer_stop(this_cpu_ptr(arch_timer_evt)); 261 break; 262 } 263 264 return NOTIFY_OK; 265 } 266 267 static struct notifier_block arch_timer_cpu_nb __cpuinitdata = { 268 .notifier_call = arch_timer_cpu_notify, 269 }; 270 271 static int __init arch_timer_register(void) 272 { 273 int err; 274 int ppi; 275 276 err = arch_timer_available(); 277 if (err) 278 goto out; 279 280 arch_timer_evt = alloc_percpu(struct clock_event_device); 281 if (!arch_timer_evt) { 282 err = -ENOMEM; 283 goto out; 284 } 285 286 clocksource_register_hz(&clocksource_counter, arch_timer_rate); 287 cyclecounter.mult = clocksource_counter.mult; 288 cyclecounter.shift = clocksource_counter.shift; 289 timecounter_init(&timecounter, &cyclecounter, 290 arch_counter_get_cntpct()); 291 292 if (arch_timer_use_virtual) { 293 ppi = arch_timer_ppi[VIRT_PPI]; 294 err = request_percpu_irq(ppi, arch_timer_handler_virt, 295 "arch_timer", arch_timer_evt); 296 } else { 297 ppi = arch_timer_ppi[PHYS_SECURE_PPI]; 298 err = request_percpu_irq(ppi, arch_timer_handler_phys, 299 "arch_timer", arch_timer_evt); 300 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) { 301 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI]; 302 err = request_percpu_irq(ppi, arch_timer_handler_phys, 303 "arch_timer", arch_timer_evt); 304 if (err) 305 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 306 arch_timer_evt); 307 } 308 } 309 310 if (err) { 311 pr_err("arch_timer: can't register interrupt %d (%d)\n", 312 ppi, err); 313 goto out_free; 314 } 315 316 err = register_cpu_notifier(&arch_timer_cpu_nb); 317 if (err) 318 goto out_free_irq; 319 320 /* Immediately configure the timer on the boot CPU */ 321 arch_timer_setup(this_cpu_ptr(arch_timer_evt)); 322 323 return 0; 324 325 out_free_irq: 326 if (arch_timer_use_virtual) 327 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt); 328 else { 329 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 330 arch_timer_evt); 331 if (arch_timer_ppi[PHYS_NONSECURE_PPI]) 332 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 333 arch_timer_evt); 334 } 335 336 out_free: 337 free_percpu(arch_timer_evt); 338 out: 339 return err; 340 } 341 342 static void __init arch_timer_init(struct device_node *np) 343 { 344 u32 freq; 345 int i; 346 347 if (arch_timer_get_rate()) { 348 pr_warn("arch_timer: multiple nodes in dt, skipping\n"); 349 return; 350 } 351 352 /* Try to determine the frequency from the device tree or CNTFRQ */ 353 if (!of_property_read_u32(np, "clock-frequency", &freq)) 354 arch_timer_rate = freq; 355 356 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) 357 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 358 359 of_node_put(np); 360 361 /* 362 * If HYP mode is available, we know that the physical timer 363 * has been configured to be accessible from PL1. Use it, so 364 * that a guest can use the virtual timer instead. 365 * 366 * If no interrupt provided for virtual timer, we'll have to 367 * stick to the physical timer. It'd better be accessible... 368 */ 369 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) { 370 arch_timer_use_virtual = false; 371 372 if (!arch_timer_ppi[PHYS_SECURE_PPI] || 373 !arch_timer_ppi[PHYS_NONSECURE_PPI]) { 374 pr_warn("arch_timer: No interrupt available, giving up\n"); 375 return; 376 } 377 } 378 379 if (arch_timer_use_virtual) 380 arch_timer_read_counter = arch_counter_get_cntvct; 381 else 382 arch_timer_read_counter = arch_counter_get_cntpct; 383 384 arch_timer_register(); 385 arch_timer_arch_init(); 386 } 387 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init); 388 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init); 389