1 /* 2 * linux/arch/arm/kernel/smp_twd.c 3 * 4 * Copyright (C) 2002 ARM Ltd. 5 * All Rights Reserved 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/clk.h> 14 #include <linux/delay.h> 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/smp.h> 18 #include <linux/jiffies.h> 19 #include <linux/clockchips.h> 20 #include <linux/interrupt.h> 21 #include <linux/io.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 25 #include <asm/smp_plat.h> 26 #include <asm/smp_twd.h> 27 #include <asm/localtimer.h> 28 29 /* set up by the platform code */ 30 static void __iomem *twd_base; 31 32 static struct clk *twd_clk; 33 static unsigned long twd_timer_rate; 34 static DEFINE_PER_CPU(bool, percpu_setup_called); 35 36 static struct clock_event_device __percpu **twd_evt; 37 static int twd_ppi; 38 39 static void twd_set_mode(enum clock_event_mode mode, 40 struct clock_event_device *clk) 41 { 42 unsigned long ctrl; 43 44 switch (mode) { 45 case CLOCK_EVT_MODE_PERIODIC: 46 ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE 47 | TWD_TIMER_CONTROL_PERIODIC; 48 __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ), 49 twd_base + TWD_TIMER_LOAD); 50 break; 51 case CLOCK_EVT_MODE_ONESHOT: 52 /* period set, and timer enabled in 'next_event' hook */ 53 ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT; 54 break; 55 case CLOCK_EVT_MODE_UNUSED: 56 case CLOCK_EVT_MODE_SHUTDOWN: 57 default: 58 ctrl = 0; 59 } 60 61 __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); 62 } 63 64 static int twd_set_next_event(unsigned long evt, 65 struct clock_event_device *unused) 66 { 67 unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL); 68 69 ctrl |= TWD_TIMER_CONTROL_ENABLE; 70 71 __raw_writel(evt, twd_base + TWD_TIMER_COUNTER); 72 __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL); 73 74 return 0; 75 } 76 77 /* 78 * local_timer_ack: checks for a local timer interrupt. 79 * 80 * If a local timer interrupt has occurred, acknowledge and return 1. 81 * Otherwise, return 0. 82 */ 83 static int twd_timer_ack(void) 84 { 85 if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) { 86 __raw_writel(1, twd_base + TWD_TIMER_INTSTAT); 87 return 1; 88 } 89 90 return 0; 91 } 92 93 static void twd_timer_stop(struct clock_event_device *clk) 94 { 95 twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk); 96 disable_percpu_irq(clk->irq); 97 } 98 99 #ifdef CONFIG_COMMON_CLK 100 101 /* 102 * Updates clockevent frequency when the cpu frequency changes. 103 * Called on the cpu that is changing frequency with interrupts disabled. 104 */ 105 static void twd_update_frequency(void *new_rate) 106 { 107 twd_timer_rate = *((unsigned long *) new_rate); 108 109 clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); 110 } 111 112 static int twd_rate_change(struct notifier_block *nb, 113 unsigned long flags, void *data) 114 { 115 struct clk_notifier_data *cnd = data; 116 117 /* 118 * The twd clock events must be reprogrammed to account for the new 119 * frequency. The timer is local to a cpu, so cross-call to the 120 * changing cpu. 121 */ 122 if (flags == POST_RATE_CHANGE) 123 on_each_cpu(twd_update_frequency, 124 (void *)&cnd->new_rate, 1); 125 126 return NOTIFY_OK; 127 } 128 129 static struct notifier_block twd_clk_nb = { 130 .notifier_call = twd_rate_change, 131 }; 132 133 static int twd_clk_init(void) 134 { 135 if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) 136 return clk_notifier_register(twd_clk, &twd_clk_nb); 137 138 return 0; 139 } 140 core_initcall(twd_clk_init); 141 142 #elif defined (CONFIG_CPU_FREQ) 143 144 #include <linux/cpufreq.h> 145 146 /* 147 * Updates clockevent frequency when the cpu frequency changes. 148 * Called on the cpu that is changing frequency with interrupts disabled. 149 */ 150 static void twd_update_frequency(void *data) 151 { 152 twd_timer_rate = clk_get_rate(twd_clk); 153 154 clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); 155 } 156 157 static int twd_cpufreq_transition(struct notifier_block *nb, 158 unsigned long state, void *data) 159 { 160 struct cpufreq_freqs *freqs = data; 161 162 /* 163 * The twd clock events must be reprogrammed to account for the new 164 * frequency. The timer is local to a cpu, so cross-call to the 165 * changing cpu. 166 */ 167 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) 168 smp_call_function_single(freqs->cpu, twd_update_frequency, 169 NULL, 1); 170 171 return NOTIFY_OK; 172 } 173 174 static struct notifier_block twd_cpufreq_nb = { 175 .notifier_call = twd_cpufreq_transition, 176 }; 177 178 static int twd_cpufreq_init(void) 179 { 180 if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) 181 return cpufreq_register_notifier(&twd_cpufreq_nb, 182 CPUFREQ_TRANSITION_NOTIFIER); 183 184 return 0; 185 } 186 core_initcall(twd_cpufreq_init); 187 188 #endif 189 190 static void twd_calibrate_rate(void) 191 { 192 unsigned long count; 193 u64 waitjiffies; 194 195 /* 196 * If this is the first time round, we need to work out how fast 197 * the timer ticks 198 */ 199 if (twd_timer_rate == 0) { 200 printk(KERN_INFO "Calibrating local timer... "); 201 202 /* Wait for a tick to start */ 203 waitjiffies = get_jiffies_64() + 1; 204 205 while (get_jiffies_64() < waitjiffies) 206 udelay(10); 207 208 /* OK, now the tick has started, let's get the timer going */ 209 waitjiffies += 5; 210 211 /* enable, no interrupt or reload */ 212 __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL); 213 214 /* maximum value */ 215 __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER); 216 217 while (get_jiffies_64() < waitjiffies) 218 udelay(10); 219 220 count = __raw_readl(twd_base + TWD_TIMER_COUNTER); 221 222 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); 223 224 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, 225 (twd_timer_rate / 10000) % 100); 226 } 227 } 228 229 static irqreturn_t twd_handler(int irq, void *dev_id) 230 { 231 struct clock_event_device *evt = *(struct clock_event_device **)dev_id; 232 233 if (twd_timer_ack()) { 234 evt->event_handler(evt); 235 return IRQ_HANDLED; 236 } 237 238 return IRQ_NONE; 239 } 240 241 static void twd_get_clock(struct device_node *np) 242 { 243 int err; 244 245 if (np) 246 twd_clk = of_clk_get(np, 0); 247 else 248 twd_clk = clk_get_sys("smp_twd", NULL); 249 250 if (IS_ERR(twd_clk)) { 251 pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); 252 return; 253 } 254 255 err = clk_prepare_enable(twd_clk); 256 if (err) { 257 pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); 258 clk_put(twd_clk); 259 return; 260 } 261 262 twd_timer_rate = clk_get_rate(twd_clk); 263 } 264 265 /* 266 * Setup the local clock events for a CPU. 267 */ 268 static int twd_timer_setup(struct clock_event_device *clk) 269 { 270 struct clock_event_device **this_cpu_clk; 271 int cpu = smp_processor_id(); 272 273 /* 274 * If the basic setup for this CPU has been done before don't 275 * bother with the below. 276 */ 277 if (per_cpu(percpu_setup_called, cpu)) { 278 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 279 clockevents_register_device(*__this_cpu_ptr(twd_evt)); 280 enable_percpu_irq(clk->irq, 0); 281 return 0; 282 } 283 per_cpu(percpu_setup_called, cpu) = true; 284 285 twd_calibrate_rate(); 286 287 /* 288 * The following is done once per CPU the first time .setup() is 289 * called. 290 */ 291 __raw_writel(0, twd_base + TWD_TIMER_CONTROL); 292 293 clk->name = "local_timer"; 294 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 295 CLOCK_EVT_FEAT_C3STOP; 296 clk->rating = 350; 297 clk->set_mode = twd_set_mode; 298 clk->set_next_event = twd_set_next_event; 299 clk->irq = twd_ppi; 300 301 this_cpu_clk = __this_cpu_ptr(twd_evt); 302 *this_cpu_clk = clk; 303 304 clockevents_config_and_register(clk, twd_timer_rate, 305 0xf, 0xffffffff); 306 enable_percpu_irq(clk->irq, 0); 307 308 return 0; 309 } 310 311 static struct local_timer_ops twd_lt_ops = { 312 .setup = twd_timer_setup, 313 .stop = twd_timer_stop, 314 }; 315 316 static int __init twd_local_timer_common_register(struct device_node *np) 317 { 318 int err; 319 320 twd_evt = alloc_percpu(struct clock_event_device *); 321 if (!twd_evt) { 322 err = -ENOMEM; 323 goto out_free; 324 } 325 326 err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt); 327 if (err) { 328 pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err); 329 goto out_free; 330 } 331 332 err = local_timer_register(&twd_lt_ops); 333 if (err) 334 goto out_irq; 335 336 twd_get_clock(np); 337 338 return 0; 339 340 out_irq: 341 free_percpu_irq(twd_ppi, twd_evt); 342 out_free: 343 iounmap(twd_base); 344 twd_base = NULL; 345 free_percpu(twd_evt); 346 347 return err; 348 } 349 350 int __init twd_local_timer_register(struct twd_local_timer *tlt) 351 { 352 if (twd_base || twd_evt) 353 return -EBUSY; 354 355 twd_ppi = tlt->res[1].start; 356 357 twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0])); 358 if (!twd_base) 359 return -ENOMEM; 360 361 return twd_local_timer_common_register(NULL); 362 } 363 364 #ifdef CONFIG_OF 365 static void __init twd_local_timer_of_register(struct device_node *np) 366 { 367 int err; 368 369 if (!is_smp() || !setup_max_cpus) 370 return; 371 372 twd_ppi = irq_of_parse_and_map(np, 0); 373 if (!twd_ppi) { 374 err = -EINVAL; 375 goto out; 376 } 377 378 twd_base = of_iomap(np, 0); 379 if (!twd_base) { 380 err = -ENOMEM; 381 goto out; 382 } 383 384 err = twd_local_timer_common_register(np); 385 386 out: 387 WARN(err, "twd_local_timer_of_register failed (%d)\n", err); 388 } 389 CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register); 390 CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register); 391 CLOCKSOURCE_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register); 392 #endif 393