1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/init.h> 3 #include <linux/clocksource.h> 4 #include <linux/clockchips.h> 5 #include <linux/interrupt.h> 6 #include <linux/irq.h> 7 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/err.h> 11 #include <linux/ioport.h> 12 #include <linux/io.h> 13 #include <linux/of_address.h> 14 #include <linux/of_irq.h> 15 #include <linux/sched_clock.h> 16 #include <linux/syscore_ops.h> 17 #include <soc/at91/atmel_tcb.h> 18 19 20 /* 21 * We're configured to use a specific TC block, one that's not hooked 22 * up to external hardware, to provide a time solution: 23 * 24 * - Two channels combine to create a free-running 32 bit counter 25 * with a base rate of 5+ MHz, packaged as a clocksource (with 26 * resolution better than 200 nsec). 27 * - Some chips support 32 bit counter. A single channel is used for 28 * this 32 bit free-running counter. the second channel is not used. 29 * 30 * - The third channel may be used to provide a clockevent source, used in 31 * either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ, 32 * and can handle delays of up to two seconds. For 32-bit counters, it runs at 33 * the same rate as the clocksource 34 * 35 * REVISIT behavior during system suspend states... we should disable 36 * all clocks and save the power. Easily done for clockevent devices, 37 * but clocksources won't necessarily get the needed notifications. 38 * For deeper system sleep states, this will be mandatory... 39 */ 40 41 static void __iomem *tcaddr; 42 static struct 43 { 44 u32 cmr; 45 u32 imr; 46 u32 rc; 47 bool clken; 48 } tcb_cache[3]; 49 static u32 bmr_cache; 50 51 static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128 }; 52 53 static u64 tc_get_cycles(struct clocksource *cs) 54 { 55 unsigned long flags; 56 u32 lower, upper; 57 58 raw_local_irq_save(flags); 59 do { 60 upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)); 61 lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 62 } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV))); 63 64 raw_local_irq_restore(flags); 65 return (upper << 16) | lower; 66 } 67 68 static u64 tc_get_cycles32(struct clocksource *cs) 69 { 70 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 71 } 72 73 static void tc_clksrc_suspend(struct clocksource *cs) 74 { 75 int i; 76 77 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) { 78 tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR)); 79 tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR)); 80 tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC)); 81 tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) & 82 ATMEL_TC_CLKSTA); 83 } 84 85 bmr_cache = readl(tcaddr + ATMEL_TC_BMR); 86 } 87 88 static void tc_clksrc_resume(struct clocksource *cs) 89 { 90 int i; 91 92 for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) { 93 /* Restore registers for the channel, RA and RB are not used */ 94 writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR)); 95 writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC)); 96 writel(0, tcaddr + ATMEL_TC_REG(i, RA)); 97 writel(0, tcaddr + ATMEL_TC_REG(i, RB)); 98 /* Disable all the interrupts */ 99 writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR)); 100 /* Reenable interrupts that were enabled before suspending */ 101 writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER)); 102 /* Start the clock if it was used */ 103 if (tcb_cache[i].clken) 104 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR)); 105 } 106 107 /* Dual channel, chain channels */ 108 writel(bmr_cache, tcaddr + ATMEL_TC_BMR); 109 /* Finally, trigger all the channels*/ 110 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 111 } 112 113 static struct clocksource clksrc = { 114 .rating = 200, 115 .read = tc_get_cycles, 116 .mask = CLOCKSOURCE_MASK(32), 117 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 118 .suspend = tc_clksrc_suspend, 119 .resume = tc_clksrc_resume, 120 }; 121 122 static u64 notrace tc_sched_clock_read(void) 123 { 124 return tc_get_cycles(&clksrc); 125 } 126 127 static u64 notrace tc_sched_clock_read32(void) 128 { 129 return tc_get_cycles32(&clksrc); 130 } 131 132 static struct delay_timer tc_delay_timer; 133 134 static unsigned long tc_delay_timer_read(void) 135 { 136 return tc_get_cycles(&clksrc); 137 } 138 139 static unsigned long notrace tc_delay_timer_read32(void) 140 { 141 return tc_get_cycles32(&clksrc); 142 } 143 144 #ifdef CONFIG_GENERIC_CLOCKEVENTS 145 146 struct tc_clkevt_device { 147 struct clock_event_device clkevt; 148 struct clk *clk; 149 u32 rate; 150 void __iomem *regs; 151 }; 152 153 static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) 154 { 155 return container_of(clkevt, struct tc_clkevt_device, clkevt); 156 } 157 158 static u32 timer_clock; 159 160 static int tc_shutdown(struct clock_event_device *d) 161 { 162 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 163 void __iomem *regs = tcd->regs; 164 165 writel(0xff, regs + ATMEL_TC_REG(2, IDR)); 166 writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); 167 if (!clockevent_state_detached(d)) 168 clk_disable(tcd->clk); 169 170 return 0; 171 } 172 173 static int tc_set_oneshot(struct clock_event_device *d) 174 { 175 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 176 void __iomem *regs = tcd->regs; 177 178 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) 179 tc_shutdown(d); 180 181 clk_enable(tcd->clk); 182 183 /* count up to RC, then irq and stop */ 184 writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | 185 ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); 186 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); 187 188 /* set_next_event() configures and starts the timer */ 189 return 0; 190 } 191 192 static int tc_set_periodic(struct clock_event_device *d) 193 { 194 struct tc_clkevt_device *tcd = to_tc_clkevt(d); 195 void __iomem *regs = tcd->regs; 196 197 if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) 198 tc_shutdown(d); 199 200 /* By not making the gentime core emulate periodic mode on top 201 * of oneshot, we get lower overhead and improved accuracy. 202 */ 203 clk_enable(tcd->clk); 204 205 /* count up to RC, then irq and restart */ 206 writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, 207 regs + ATMEL_TC_REG(2, CMR)); 208 writel((tcd->rate + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); 209 210 /* Enable clock and interrupts on RC compare */ 211 writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); 212 213 /* go go gadget! */ 214 writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + 215 ATMEL_TC_REG(2, CCR)); 216 return 0; 217 } 218 219 static int tc_next_event(unsigned long delta, struct clock_event_device *d) 220 { 221 writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC)); 222 223 /* go go gadget! */ 224 writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, 225 tcaddr + ATMEL_TC_REG(2, CCR)); 226 return 0; 227 } 228 229 static struct tc_clkevt_device clkevt = { 230 .clkevt = { 231 .features = CLOCK_EVT_FEAT_PERIODIC | 232 CLOCK_EVT_FEAT_ONESHOT, 233 /* Should be lower than at91rm9200's system timer */ 234 .rating = 125, 235 .set_next_event = tc_next_event, 236 .set_state_shutdown = tc_shutdown, 237 .set_state_periodic = tc_set_periodic, 238 .set_state_oneshot = tc_set_oneshot, 239 }, 240 }; 241 242 static irqreturn_t ch2_irq(int irq, void *handle) 243 { 244 struct tc_clkevt_device *dev = handle; 245 unsigned int sr; 246 247 sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR)); 248 if (sr & ATMEL_TC_CPCS) { 249 dev->clkevt.event_handler(&dev->clkevt); 250 return IRQ_HANDLED; 251 } 252 253 return IRQ_NONE; 254 } 255 256 static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) 257 { 258 int ret; 259 struct clk *t2_clk = tc->clk[2]; 260 int irq = tc->irq[2]; 261 int bits = tc->tcb_config->counter_width; 262 263 /* try to enable t2 clk to avoid future errors in mode change */ 264 ret = clk_prepare_enable(t2_clk); 265 if (ret) 266 return ret; 267 268 clkevt.regs = tc->regs; 269 clkevt.clk = t2_clk; 270 271 if (bits == 32) { 272 timer_clock = divisor_idx; 273 clkevt.rate = clk_get_rate(t2_clk) / atmel_tcb_divisors[divisor_idx]; 274 } else { 275 ret = clk_prepare_enable(tc->slow_clk); 276 if (ret) { 277 clk_disable_unprepare(t2_clk); 278 return ret; 279 } 280 281 clkevt.rate = clk_get_rate(tc->slow_clk); 282 timer_clock = ATMEL_TC_TIMER_CLOCK5; 283 } 284 285 clk_disable(t2_clk); 286 287 clkevt.clkevt.cpumask = cpumask_of(0); 288 289 ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt); 290 if (ret) { 291 clk_unprepare(t2_clk); 292 if (bits != 32) 293 clk_disable_unprepare(tc->slow_clk); 294 return ret; 295 } 296 297 clockevents_config_and_register(&clkevt.clkevt, clkevt.rate, 1, BIT(bits) - 1); 298 299 return ret; 300 } 301 302 #else /* !CONFIG_GENERIC_CLOCKEVENTS */ 303 304 static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) 305 { 306 /* NOTHING */ 307 return 0; 308 } 309 310 #endif 311 312 static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx) 313 { 314 /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */ 315 writel(mck_divisor_idx /* likely divide-by-8 */ 316 | ATMEL_TC_WAVE 317 | ATMEL_TC_WAVESEL_UP /* free-run */ 318 | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */ 319 | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */ 320 tcaddr + ATMEL_TC_REG(0, CMR)); 321 writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA)); 322 writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC)); 323 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ 324 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); 325 326 /* channel 1: waveform mode, input TIOA0 */ 327 writel(ATMEL_TC_XC1 /* input: TIOA0 */ 328 | ATMEL_TC_WAVE 329 | ATMEL_TC_WAVESEL_UP, /* free-run */ 330 tcaddr + ATMEL_TC_REG(1, CMR)); 331 writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */ 332 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR)); 333 334 /* chain channel 0 to channel 1*/ 335 writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR); 336 /* then reset all the timers */ 337 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 338 } 339 340 static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx) 341 { 342 /* channel 0: waveform mode, input mclk/8 */ 343 writel(mck_divisor_idx /* likely divide-by-8 */ 344 | ATMEL_TC_WAVE 345 | ATMEL_TC_WAVESEL_UP, /* free-run */ 346 tcaddr + ATMEL_TC_REG(0, CMR)); 347 writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */ 348 writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR)); 349 350 /* then reset all the timers */ 351 writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR); 352 } 353 354 static struct atmel_tcb_config tcb_rm9200_config = { 355 .counter_width = 16, 356 }; 357 358 static struct atmel_tcb_config tcb_sam9x5_config = { 359 .counter_width = 32, 360 }; 361 362 static struct atmel_tcb_config tcb_sama5d2_config = { 363 .counter_width = 32, 364 .has_gclk = 1, 365 }; 366 367 static const struct of_device_id atmel_tcb_of_match[] = { 368 { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, }, 369 { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, }, 370 { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, }, 371 { /* sentinel */ } 372 }; 373 374 static int __init tcb_clksrc_init(struct device_node *node) 375 { 376 struct atmel_tc tc; 377 struct clk *t0_clk; 378 const struct of_device_id *match; 379 u64 (*tc_sched_clock)(void); 380 u32 rate, divided_rate = 0; 381 int best_divisor_idx = -1; 382 int bits; 383 int i; 384 int ret; 385 386 /* Protect against multiple calls */ 387 if (tcaddr) 388 return 0; 389 390 tc.regs = of_iomap(node->parent, 0); 391 if (!tc.regs) 392 return -ENXIO; 393 394 t0_clk = of_clk_get_by_name(node->parent, "t0_clk"); 395 if (IS_ERR(t0_clk)) 396 return PTR_ERR(t0_clk); 397 398 tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk"); 399 if (IS_ERR(tc.slow_clk)) 400 return PTR_ERR(tc.slow_clk); 401 402 tc.clk[0] = t0_clk; 403 tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk"); 404 if (IS_ERR(tc.clk[1])) 405 tc.clk[1] = t0_clk; 406 tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk"); 407 if (IS_ERR(tc.clk[2])) 408 tc.clk[2] = t0_clk; 409 410 tc.irq[2] = of_irq_get(node->parent, 2); 411 if (tc.irq[2] <= 0) { 412 tc.irq[2] = of_irq_get(node->parent, 0); 413 if (tc.irq[2] <= 0) 414 return -EINVAL; 415 } 416 417 match = of_match_node(atmel_tcb_of_match, node->parent); 418 if (!match) 419 return -ENODEV; 420 421 tc.tcb_config = match->data; 422 bits = tc.tcb_config->counter_width; 423 424 for (i = 0; i < ARRAY_SIZE(tc.irq); i++) 425 writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR)); 426 427 ret = clk_prepare_enable(t0_clk); 428 if (ret) { 429 pr_debug("can't enable T0 clk\n"); 430 return ret; 431 } 432 433 /* How fast will we be counting? Pick something over 5 MHz. */ 434 rate = (u32) clk_get_rate(t0_clk); 435 i = 0; 436 if (tc.tcb_config->has_gclk) 437 i = 1; 438 for (; i < ARRAY_SIZE(atmel_tcb_divisors); i++) { 439 unsigned divisor = atmel_tcb_divisors[i]; 440 unsigned tmp; 441 442 tmp = rate / divisor; 443 pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp); 444 if ((best_divisor_idx >= 0) && (tmp < 5 * 1000 * 1000)) 445 break; 446 divided_rate = tmp; 447 best_divisor_idx = i; 448 } 449 450 clksrc.name = kbasename(node->parent->full_name); 451 clkevt.clkevt.name = kbasename(node->parent->full_name); 452 pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000, 453 ((divided_rate % 1000000) + 500) / 1000); 454 455 tcaddr = tc.regs; 456 457 if (bits == 32) { 458 /* use apropriate function to read 32 bit counter */ 459 clksrc.read = tc_get_cycles32; 460 /* setup ony channel 0 */ 461 tcb_setup_single_chan(&tc, best_divisor_idx); 462 tc_sched_clock = tc_sched_clock_read32; 463 tc_delay_timer.read_current_timer = tc_delay_timer_read32; 464 } else { 465 /* we have three clocks no matter what the 466 * underlying platform supports. 467 */ 468 ret = clk_prepare_enable(tc.clk[1]); 469 if (ret) { 470 pr_debug("can't enable T1 clk\n"); 471 goto err_disable_t0; 472 } 473 /* setup both channel 0 & 1 */ 474 tcb_setup_dual_chan(&tc, best_divisor_idx); 475 tc_sched_clock = tc_sched_clock_read; 476 tc_delay_timer.read_current_timer = tc_delay_timer_read; 477 } 478 479 /* and away we go! */ 480 ret = clocksource_register_hz(&clksrc, divided_rate); 481 if (ret) 482 goto err_disable_t1; 483 484 /* channel 2: periodic and oneshot timer support */ 485 ret = setup_clkevents(&tc, best_divisor_idx); 486 if (ret) 487 goto err_unregister_clksrc; 488 489 sched_clock_register(tc_sched_clock, 32, divided_rate); 490 491 tc_delay_timer.freq = divided_rate; 492 register_current_timer_delay(&tc_delay_timer); 493 494 return 0; 495 496 err_unregister_clksrc: 497 clocksource_unregister(&clksrc); 498 499 err_disable_t1: 500 if (bits != 32) 501 clk_disable_unprepare(tc.clk[1]); 502 503 err_disable_t0: 504 clk_disable_unprepare(t0_clk); 505 506 tcaddr = NULL; 507 508 return ret; 509 } 510 TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init); 511