1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/init.h> 3 #include <linux/clocksource.h> 4 #include <linux/clockchips.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 8 #include <linux/clk.h> 9 #include <linux/err.h> 10 #include <linux/ioport.h> 11 #include <linux/io.h> 12 #include <linux/of_address.h> 13 #include <linux/of_irq.h> 14 #include <linux/sched_clock.h> 15 #include <linux/syscore_ops.h> 16 #include <soc/at91/atmel_tcb.h> 17 18 19 /* 20 * We're configured to use a specific TC block, one that's not hooked 21 * up to external hardware, to provide a time solution: 22 * 23 * - Two channels combine to create a free-running 32 bit counter 24 * with a base rate of 5+ MHz, packaged as a clocksource (with 25 * resolution better than 200 nsec). 26 * - Some chips support 32 bit counter. A single channel is used for 27 * this 32 bit free-running counter. the second channel is not used. 28 * 29 * - The third channel may be used to provide a 16-bit clockevent 30 * source, used in either periodic or oneshot mode. This runs 31 * at 32 KiHZ, and can handle delays of up to two seconds. 32 * 33 * REVISIT behavior during system suspend states... we should disable 34 * all clocks and save the power. Easily done for clockevent devices, 35 * but clocksources won't necessarily get the needed notifications. 36 * For deeper system sleep states, this will be mandatory... 37 */ 38 39 static void __iomem *tcaddr; 40 static struct 41 { 42 u32 cmr; 43 u32 imr; 44 u32 rc; 45 bool clken; 46 } tcb_cache[3]; 47 static u32 bmr_cache; 48 49 static u64 tc_get_cycles(struct clocksource *cs) 50 { 51 unsigned long flags; 52 u32 lower, upper; 53 54 raw_local_irq_save(flags); 55 do { 56 upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)); 57 lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 58 } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV))); 59 60 raw_local_irq_restore(flags); 61 return (upper << 16) | lower; 62 } 63 64 static u64 tc_get_cycles32(struct clocksource *cs) 65 { 66 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 67 } 68 69 static void tc_clksrc_suspend(struct clocksource *cs) 70 { 71 int i; 72 73 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) { 74 tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR)); 75 tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR)); 76 tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC)); 77 tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) & 78 ATMEL_TC_CLKSTA); 79 } 80 81 bmr_cache = readl(tcaddr + ATMEL_TC_BMR); 82 } 83 84 static void tc_clksrc_resume(struct clocksource *cs) 85 { 86 int i; 87 88 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) { 89 /* Restore registers for the channel, RA and RB are not used */ 90 writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR)); 91 writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC)); 92 writel(0, tcaddr + ATMEL_TC_REG(i, RA)); 93 writel(0, tcaddr + ATMEL_TC_REG(i, RB)); 94 /* Disable all the interrupts */ 95 writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR)); 96 /* Reenable interrupts that were enabled before suspending */ 97 writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER)); 98 /* Start the clock if it was used */ 99 if (tcb_cache[i].clken) 100 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR)); 101 } 102 103 /* Dual channel, chain channels */ 104 writel(bmr_cache, tcaddr + ATMEL_TC_BMR); 105 /* Finally, trigger all the channels*/ 106 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 107 } 108 109 static struct clocksource clksrc = { 110 .rating = 200, 111 .read = tc_get_cycles, 112 .mask = CLOCKSOURCE_MASK(32), 113 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 114 .suspend = tc_clksrc_suspend, 115 .resume = tc_clksrc_resume, 116 }; 117 118 static u64 notrace tc_sched_clock_read(void) 119 { 120 return tc_get_cycles(&clksrc); 121 } 122 123 static u64 notrace tc_sched_clock_read32(void) 124 { 125 return tc_get_cycles32(&clksrc); 126 } 127 128 #ifdef CONFIG_GENERIC_CLOCKEVENTS 129 130 struct tc_clkevt_device { 131 struct clock_event_device clkevt; 132 struct clk *clk; 133 void __iomem *regs; 134 }; 135 136 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) 137 { 138 return container_of(clkevt, struct tc_clkevt_device, clkevt); 139 } 140 141 /* For now, we always use the 32K clock ... this optimizes for NO_HZ, 142 * because using one of the divided clocks would usually mean the 143 * tick rate can never be less than several dozen Hz (vs 0.5 Hz). 144 * 145 * A divided clock could be good for high resolution timers, since 146 * 30.5 usec resolution can seem "low". 147 */ 148 static u32 timer_clock; 149 150 static int tc_shutdown(struct clock_event_device *d) 151 { 152 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 153 void __iomem *regs = tcd->regs; 154 155 writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 156 writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 157 if (!clockevent_state_detached(d)) 158 clk_disable(tcd->clk); 159 160 return 0; 161 } 162 163 static int tc_set_oneshot(struct clock_event_device *d) 164 { 165 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 166 void __iomem *regs = tcd->regs; 167 168 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) 169 tc_shutdown(d); 170 171 clk_enable(tcd->clk); 172 173 /* slow clock, count up to RC, then irq and stop */ 174 writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | 175 ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); 176 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); 177 178 /* set_next_event() configures and starts the timer */ 179 return 0; 180 } 181 182 static int tc_set_periodic(struct clock_event_device *d) 183 { 184 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 185 void __iomem *regs = tcd->regs; 186 187 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) 188 tc_shutdown(d); 189 190 /* By not making the gentime core emulate periodic mode on top 191 * of oneshot, we get lower overhead and improved accuracy. 192 */ 193 clk_enable(tcd->clk); 194 195 /* slow clock, count up to RC, then irq and restart */ 196 writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, 197 regs + ATMEL_TC_REG(2, CMR)); 198 writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); 199 200 /* Enable clock and interrupts on RC compare */ 201 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); 202 203 /* go go gadget! */ 204 writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + 205 ATMEL_TC_REG(2, CCR)); 206 return 0; 207 } 208 209 static int tc_next_event(unsigned long delta, struct clock_event_device *d) 210 { 211 writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC)); 212 213 /* go go gadget! */ 214 writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, 215 tcaddr + ATMEL_TC_REG(2, CCR)); 216 return 0; 217 } 218 219 static struct tc_clkevt_device clkevt = { 220 .clkevt = { 221 .features = CLOCK_EVT_FEAT_PERIODIC | 222 CLOCK_EVT_FEAT_ONESHOT, 223 /* Should be lower than at91rm9200's system timer */ 224 .rating = 125, 225 .set_next_event = tc_next_event, 226 .set_state_shutdown = tc_shutdown, 227 .set_state_periodic = tc_set_periodic, 228 .set_state_oneshot = tc_set_oneshot, 229 }, 230 }; 231 232 static irqreturn_t ch2_irq(int irq, void *handle) 233 { 234 struct tc_clkevt_device *dev = handle; 235 unsigned int sr; 236 237 sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR)); 238 if (sr & ATMEL_TC_CPCS) { 239 dev->clkevt.event_handler(&dev->clkevt); 240 return IRQ_HANDLED; 241 } 242 243 return IRQ_NONE; 244 } 245 246 static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) 247 { 248 int ret; 249 struct clk *t2_clk = tc->clk[2]; 250 int irq = tc->irq[2]; 251 252 ret = clk_prepare_enable(tc->slow_clk); 253 if (ret) 254 return ret; 255 256 /* try to enable t2 clk to avoid future errors in mode change */ 257 ret = clk_prepare_enable(t2_clk); 258 if (ret) { 259 clk_disable_unprepare(tc->slow_clk); 260 return ret; 261 } 262 263 clk_disable(t2_clk); 264 265 clkevt.regs = tc->regs; 266 clkevt.clk = t2_clk; 267 268 timer_clock = clk32k_divisor_idx; 269 270 clkevt.clkevt.cpumask = cpumask_of(0); 271 272 ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt); 273 if (ret) { 274 clk_unprepare(t2_clk); 275 clk_disable_unprepare(tc->slow_clk); 276 return ret; 277 } 278 279 clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); 280 281 return ret; 282 } 283 284 #else /* !CONFIG_GENERIC_CLOCKEVENTS */ 285 286 static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) 287 { 288 /* NOTHING */ 289 return 0; 290 } 291 292 #endif 293 294 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) 295 { 296 /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */ 297 writel(mck_divisor_idx /* likely divide-by-8 */ 298 | ATMEL_TC_WAVE 299 | ATMEL_TC_WAVESEL_UP /* free-run */ 300 | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ 301 | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ 302 tcaddr + ATMEL_TC_REG(0, CMR)); 303 writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); 304 writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); 305 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ 306 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); 307 308 /* channel 1: waveform mode, input TIOA0 */ 309 writel(ATMEL_TC_XC1 /* input: TIOA0 */ 310 | ATMEL_TC_WAVE 311 | ATMEL_TC_WAVESEL_UP, /* free-run */ 312 tcaddr + ATMEL_TC_REG(1, CMR)); 313 writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */ 314 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); 315 316 /* chain channel 0 to channel 1*/ 317 writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); 318 /* then reset all the timers */ 319 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 320 } 321 322 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx) 323 { 324 /* channel 0: waveform mode, input mclk/8 */ 325 writel(mck_divisor_idx /* likely divide-by-8 */ 326 | ATMEL_TC_WAVE 327 | ATMEL_TC_WAVESEL_UP, /* free-run */ 328 tcaddr + ATMEL_TC_REG(0, CMR)); 329 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ 330 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); 331 332 /* then reset all the timers */ 333 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 334 } 335 336 static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, }; 337 338 static const struct of_device_id atmel_tcb_of_match[] = { 339 { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, }, 340 { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, }, 341 { /* sentinel */ } 342 }; 343 344 static int __init tcb_clksrc_init(struct device_node *node) 345 { 346 struct atmel_tc tc; 347 struct clk *t0_clk; 348 const struct of_device_id *match; 349 u64 (*tc_sched_clock)(void); 350 u32 rate, divided_rate = 0; 351 int best_divisor_idx = -1; 352 int clk32k_divisor_idx = -1; 353 int bits; 354 int i; 355 int ret; 356 357 /* Protect against multiple calls */ 358 if (tcaddr) 359 return 0; 360 361 tc.regs = of_iomap(node->parent, 0); 362 if (!tc.regs) 363 return -ENXIO; 364 365 t0_clk = of_clk_get_by_name(node->parent, "t0_clk"); 366 if (IS_ERR(t0_clk)) 367 return PTR_ERR(t0_clk); 368 369 tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); 370 if (IS_ERR(tc.slow_clk)) 371 return PTR_ERR(tc.slow_clk); 372 373 tc.clk[0] = t0_clk; 374 tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk"); 375 if (IS_ERR(tc.clk[1])) 376 tc.clk[1] = t0_clk; 377 tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk"); 378 if (IS_ERR(tc.clk[2])) 379 tc.clk[2] = t0_clk; 380 381 tc.irq[2] = of_irq_get(node->parent, 2); 382 if (tc.irq[2] <= 0) { 383 tc.irq[2] = of_irq_get(node->parent, 0); 384 if (tc.irq[2] <= 0) 385 return -EINVAL; 386 } 387 388 match = of_match_node(atmel_tcb_of_match, node->parent); 389 bits = (uintptr_t)match->data; 390 391 for (i = 0; i < ARRAY_SIZE(tc.irq); i++) 392 writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR)); 393 394 ret = clk_prepare_enable(t0_clk); 395 if (ret) { 396 pr_debug("can't enable T0 clk\n"); 397 return ret; 398 } 399 400 /* How fast will we be counting? Pick something over 5 MHz. */ 401 rate = (u32) clk_get_rate(t0_clk); 402 for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) { 403 unsigned divisor = atmel_tcb_divisors[i]; 404 unsigned tmp; 405 406 /* remember 32 KiHz clock for later */ 407 if (!divisor) { 408 clk32k_divisor_idx = i; 409 continue; 410 } 411 412 tmp = rate / divisor; 413 pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); 414 if (best_divisor_idx > 0) { 415 if (tmp < 5 * 1000 * 1000) 416 continue; 417 } 418 divided_rate = tmp; 419 best_divisor_idx = i; 420 } 421 422 clksrc.name = kbasename(node->parent->full_name); 423 clkevt.clkevt.name = kbasename(node->parent->full_name); 424 pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000, 425 ((divided_rate % 1000000) + 500) / 1000); 426 427 tcaddr = tc.regs; 428 429 if (bits == 32) { 430 /* use apropriate function to read 32 bit counter */ 431 clksrc.read = tc_get_cycles32; 432 /* setup ony channel 0 */ 433 tcb_setup_single_chan(&tc, best_divisor_idx); 434 tc_sched_clock = tc_sched_clock_read32; 435 } else { 436 /* we have three clocks no matter what the 437 * underlying platform supports. 438 */ 439 ret = clk_prepare_enable(tc.clk[1]); 440 if (ret) { 441 pr_debug("can't enable T1 clk\n"); 442 goto err_disable_t0; 443 } 444 /* setup both channel 0 & 1 */ 445 tcb_setup_dual_chan(&tc, best_divisor_idx); 446 tc_sched_clock = tc_sched_clock_read; 447 } 448 449 /* and away we go! */ 450 ret = clocksource_register_hz(&clksrc, divided_rate); 451 if (ret) 452 goto err_disable_t1; 453 454 /* channel 2: periodic and oneshot timer support */ 455 ret = setup_clkevents(&tc, clk32k_divisor_idx); 456 if (ret) 457 goto err_unregister_clksrc; 458 459 sched_clock_register(tc_sched_clock, 32, divided_rate); 460 461 return 0; 462 463 err_unregister_clksrc: 464 clocksource_unregister(&clksrc); 465 466 err_disable_t1: 467 if (bits != 32) 468 clk_disable_unprepare(tc.clk[1]); 469 470 err_disable_t0: 471 clk_disable_unprepare(t0_clk); 472 473 tcaddr = NULL; 474 475 return ret; 476 } 477 TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); 478