1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Copyright (C) 2000-2001 Deep Blue Solutions 4 // Copyright (C) 2002 Shane Nay (shane@minirl.com) 5 // Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com) 6 // Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) 7 8 #include <linux/interrupt.h> 9 #include <linux/irq.h> 10 #include <linux/clockchips.h> 11 #include <linux/clk.h> 12 #include <linux/delay.h> 13 #include <linux/err.h> 14 #include <linux/sched_clock.h> 15 #include <linux/slab.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <soc/imx/timer.h> 20 21 /* 22 * There are 4 versions of the timer hardware on Freescale MXC hardware. 23 * - MX1/MXL 24 * - MX21, MX27. 25 * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0) 26 * - MX6DL, MX6SX, MX6Q(rev1.1+) 27 */ 28 29 /* defines common for all i.MX */ 30 #define MXC_TCTL 0x00 31 #define MXC_TCTL_TEN (1 << 0) /* Enable module */ 32 #define MXC_TPRER 0x04 33 34 /* MX1, MX21, MX27 */ 35 #define MX1_2_TCTL_CLK_PCLK1 (1 << 1) 36 #define MX1_2_TCTL_IRQEN (1 << 4) 37 #define MX1_2_TCTL_FRR (1 << 8) 38 #define MX1_2_TCMP 0x08 39 #define MX1_2_TCN 0x10 40 #define MX1_2_TSTAT 0x14 41 42 /* MX21, MX27 */ 43 #define MX2_TSTAT_CAPT (1 << 1) 44 #define MX2_TSTAT_COMP (1 << 0) 45 46 /* MX31, MX35, MX25, MX5, MX6 */ 47 #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */ 48 #define V2_TCTL_CLK_IPG (1 << 6) 49 #define V2_TCTL_CLK_PER (2 << 6) 50 #define V2_TCTL_CLK_OSC_DIV8 (5 << 6) 51 #define V2_TCTL_FRR (1 << 9) 52 #define V2_TCTL_24MEN (1 << 10) 53 #define V2_TPRER_PRE24M 12 54 #define V2_IR 0x0c 55 #define V2_TSTAT 0x08 56 #define V2_TSTAT_OF1 (1 << 0) 57 #define V2_TCN 0x24 58 #define V2_TCMP 0x10 59 60 #define V2_TIMER_RATE_OSC_DIV8 3000000 61 62 struct imx_timer { 63 enum imx_gpt_type type; 64 void __iomem *base; 65 int irq; 66 struct clk *clk_per; 67 struct clk *clk_ipg; 68 const struct imx_gpt_data *gpt; 69 struct clock_event_device ced; 70 struct irqaction act; 71 }; 72 73 struct imx_gpt_data { 74 int reg_tstat; 75 int reg_tcn; 76 int reg_tcmp; 77 void (*gpt_setup_tctl)(struct imx_timer *imxtm); 78 void (*gpt_irq_enable)(struct imx_timer *imxtm); 79 void (*gpt_irq_disable)(struct imx_timer *imxtm); 80 void (*gpt_irq_acknowledge)(struct imx_timer *imxtm); 81 int (*set_next_event)(unsigned long evt, 82 struct clock_event_device *ced); 83 }; 84 85 static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced) 86 { 87 return container_of(ced, struct imx_timer, ced); 88 } 89 90 static void imx1_gpt_irq_disable(struct imx_timer *imxtm) 91 { 92 unsigned int tmp; 93 94 tmp = readl_relaxed(imxtm->base + MXC_TCTL); 95 writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); 96 } 97 #define imx21_gpt_irq_disable imx1_gpt_irq_disable 98 99 static void imx31_gpt_irq_disable(struct imx_timer *imxtm) 100 { 101 writel_relaxed(0, imxtm->base + V2_IR); 102 } 103 #define imx6dl_gpt_irq_disable imx31_gpt_irq_disable 104 105 static void imx1_gpt_irq_enable(struct imx_timer *imxtm) 106 { 107 unsigned int tmp; 108 109 tmp = readl_relaxed(imxtm->base + MXC_TCTL); 110 writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL); 111 } 112 #define imx21_gpt_irq_enable imx1_gpt_irq_enable 113 114 static void imx31_gpt_irq_enable(struct imx_timer *imxtm) 115 { 116 writel_relaxed(1<<0, imxtm->base + V2_IR); 117 } 118 #define imx6dl_gpt_irq_enable imx31_gpt_irq_enable 119 120 static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm) 121 { 122 writel_relaxed(0, imxtm->base + MX1_2_TSTAT); 123 } 124 125 static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm) 126 { 127 writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP, 128 imxtm->base + MX1_2_TSTAT); 129 } 130 131 static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm) 132 { 133 writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT); 134 } 135 #define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge 136 137 static void __iomem *sched_clock_reg; 138 139 static u64 notrace mxc_read_sched_clock(void) 140 { 141 return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0; 142 } 143 144 #if defined(CONFIG_ARM) 145 static struct delay_timer imx_delay_timer; 146 147 static unsigned long imx_read_current_timer(void) 148 { 149 return readl_relaxed(sched_clock_reg); 150 } 151 #endif 152 153 static int __init mxc_clocksource_init(struct imx_timer *imxtm) 154 { 155 unsigned int c = clk_get_rate(imxtm->clk_per); 156 void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn; 157 158 #if defined(CONFIG_ARM) 159 imx_delay_timer.read_current_timer = &imx_read_current_timer; 160 imx_delay_timer.freq = c; 161 register_current_timer_delay(&imx_delay_timer); 162 #endif 163 164 sched_clock_reg = reg; 165 166 sched_clock_register(mxc_read_sched_clock, 32, c); 167 return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32, 168 clocksource_mmio_readl_up); 169 } 170 171 /* clock event */ 172 173 static int mx1_2_set_next_event(unsigned long evt, 174 struct clock_event_device *ced) 175 { 176 struct imx_timer *imxtm = to_imx_timer(ced); 177 unsigned long tcmp; 178 179 tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt; 180 181 writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP); 182 183 return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ? 184 -ETIME : 0; 185 } 186 187 static int v2_set_next_event(unsigned long evt, 188 struct clock_event_device *ced) 189 { 190 struct imx_timer *imxtm = to_imx_timer(ced); 191 unsigned long tcmp; 192 193 tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt; 194 195 writel_relaxed(tcmp, imxtm->base + V2_TCMP); 196 197 return evt < 0x7fffffff && 198 (int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ? 199 -ETIME : 0; 200 } 201 202 static int mxc_shutdown(struct clock_event_device *ced) 203 { 204 struct imx_timer *imxtm = to_imx_timer(ced); 205 u32 tcn; 206 207 /* Disable interrupt in GPT module */ 208 imxtm->gpt->gpt_irq_disable(imxtm); 209 210 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); 211 /* Set event time into far-far future */ 212 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); 213 214 /* Clear pending interrupt */ 215 imxtm->gpt->gpt_irq_acknowledge(imxtm); 216 217 #ifdef DEBUG 218 printk(KERN_INFO "%s: changing mode\n", __func__); 219 #endif /* DEBUG */ 220 221 return 0; 222 } 223 224 static int mxc_set_oneshot(struct clock_event_device *ced) 225 { 226 struct imx_timer *imxtm = to_imx_timer(ced); 227 228 /* Disable interrupt in GPT module */ 229 imxtm->gpt->gpt_irq_disable(imxtm); 230 231 if (!clockevent_state_oneshot(ced)) { 232 u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn); 233 /* Set event time into far-far future */ 234 writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp); 235 236 /* Clear pending interrupt */ 237 imxtm->gpt->gpt_irq_acknowledge(imxtm); 238 } 239 240 #ifdef DEBUG 241 printk(KERN_INFO "%s: changing mode\n", __func__); 242 #endif /* DEBUG */ 243 244 /* 245 * Do not put overhead of interrupt enable/disable into 246 * mxc_set_next_event(), the core has about 4 minutes 247 * to call mxc_set_next_event() or shutdown clock after 248 * mode switching 249 */ 250 imxtm->gpt->gpt_irq_enable(imxtm); 251 252 return 0; 253 } 254 255 /* 256 * IRQ handler for the timer 257 */ 258 static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id) 259 { 260 struct clock_event_device *ced = dev_id; 261 struct imx_timer *imxtm = to_imx_timer(ced); 262 uint32_t tstat; 263 264 tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat); 265 266 imxtm->gpt->gpt_irq_acknowledge(imxtm); 267 268 ced->event_handler(ced); 269 270 return IRQ_HANDLED; 271 } 272 273 static int __init mxc_clockevent_init(struct imx_timer *imxtm) 274 { 275 struct clock_event_device *ced = &imxtm->ced; 276 struct irqaction *act = &imxtm->act; 277 278 ced->name = "mxc_timer1"; 279 ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; 280 ced->set_state_shutdown = mxc_shutdown; 281 ced->set_state_oneshot = mxc_set_oneshot; 282 ced->tick_resume = mxc_shutdown; 283 ced->set_next_event = imxtm->gpt->set_next_event; 284 ced->rating = 200; 285 ced->cpumask = cpumask_of(0); 286 ced->irq = imxtm->irq; 287 clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), 288 0xff, 0xfffffffe); 289 290 act->name = "i.MX Timer Tick"; 291 act->flags = IRQF_TIMER | IRQF_IRQPOLL; 292 act->handler = mxc_timer_interrupt; 293 act->dev_id = ced; 294 295 return setup_irq(imxtm->irq, act); 296 } 297 298 static void imx1_gpt_setup_tctl(struct imx_timer *imxtm) 299 { 300 u32 tctl_val; 301 302 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; 303 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); 304 } 305 #define imx21_gpt_setup_tctl imx1_gpt_setup_tctl 306 307 static void imx31_gpt_setup_tctl(struct imx_timer *imxtm) 308 { 309 u32 tctl_val; 310 311 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; 312 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) 313 tctl_val |= V2_TCTL_CLK_OSC_DIV8; 314 else 315 tctl_val |= V2_TCTL_CLK_PER; 316 317 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); 318 } 319 320 static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm) 321 { 322 u32 tctl_val; 323 324 tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; 325 if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) { 326 tctl_val |= V2_TCTL_CLK_OSC_DIV8; 327 /* 24 / 8 = 3 MHz */ 328 writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER); 329 tctl_val |= V2_TCTL_24MEN; 330 } else { 331 tctl_val |= V2_TCTL_CLK_PER; 332 } 333 334 writel_relaxed(tctl_val, imxtm->base + MXC_TCTL); 335 } 336 337 static const struct imx_gpt_data imx1_gpt_data = { 338 .reg_tstat = MX1_2_TSTAT, 339 .reg_tcn = MX1_2_TCN, 340 .reg_tcmp = MX1_2_TCMP, 341 .gpt_irq_enable = imx1_gpt_irq_enable, 342 .gpt_irq_disable = imx1_gpt_irq_disable, 343 .gpt_irq_acknowledge = imx1_gpt_irq_acknowledge, 344 .gpt_setup_tctl = imx1_gpt_setup_tctl, 345 .set_next_event = mx1_2_set_next_event, 346 }; 347 348 static const struct imx_gpt_data imx21_gpt_data = { 349 .reg_tstat = MX1_2_TSTAT, 350 .reg_tcn = MX1_2_TCN, 351 .reg_tcmp = MX1_2_TCMP, 352 .gpt_irq_enable = imx21_gpt_irq_enable, 353 .gpt_irq_disable = imx21_gpt_irq_disable, 354 .gpt_irq_acknowledge = imx21_gpt_irq_acknowledge, 355 .gpt_setup_tctl = imx21_gpt_setup_tctl, 356 .set_next_event = mx1_2_set_next_event, 357 }; 358 359 static const struct imx_gpt_data imx31_gpt_data = { 360 .reg_tstat = V2_TSTAT, 361 .reg_tcn = V2_TCN, 362 .reg_tcmp = V2_TCMP, 363 .gpt_irq_enable = imx31_gpt_irq_enable, 364 .gpt_irq_disable = imx31_gpt_irq_disable, 365 .gpt_irq_acknowledge = imx31_gpt_irq_acknowledge, 366 .gpt_setup_tctl = imx31_gpt_setup_tctl, 367 .set_next_event = v2_set_next_event, 368 }; 369 370 static const struct imx_gpt_data imx6dl_gpt_data = { 371 .reg_tstat = V2_TSTAT, 372 .reg_tcn = V2_TCN, 373 .reg_tcmp = V2_TCMP, 374 .gpt_irq_enable = imx6dl_gpt_irq_enable, 375 .gpt_irq_disable = imx6dl_gpt_irq_disable, 376 .gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge, 377 .gpt_setup_tctl = imx6dl_gpt_setup_tctl, 378 .set_next_event = v2_set_next_event, 379 }; 380 381 static int __init _mxc_timer_init(struct imx_timer *imxtm) 382 { 383 int ret; 384 385 switch (imxtm->type) { 386 case GPT_TYPE_IMX1: 387 imxtm->gpt = &imx1_gpt_data; 388 break; 389 case GPT_TYPE_IMX21: 390 imxtm->gpt = &imx21_gpt_data; 391 break; 392 case GPT_TYPE_IMX31: 393 imxtm->gpt = &imx31_gpt_data; 394 break; 395 case GPT_TYPE_IMX6DL: 396 imxtm->gpt = &imx6dl_gpt_data; 397 break; 398 default: 399 return -EINVAL; 400 } 401 402 if (IS_ERR(imxtm->clk_per)) { 403 pr_err("i.MX timer: unable to get clk\n"); 404 return PTR_ERR(imxtm->clk_per); 405 } 406 407 if (!IS_ERR(imxtm->clk_ipg)) 408 clk_prepare_enable(imxtm->clk_ipg); 409 410 clk_prepare_enable(imxtm->clk_per); 411 412 /* 413 * Initialise to a known state (all timers off, and timing reset) 414 */ 415 416 writel_relaxed(0, imxtm->base + MXC_TCTL); 417 writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */ 418 419 imxtm->gpt->gpt_setup_tctl(imxtm); 420 421 /* init and register the timer to the framework */ 422 ret = mxc_clocksource_init(imxtm); 423 if (ret) 424 return ret; 425 426 return mxc_clockevent_init(imxtm); 427 } 428 429 void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) 430 { 431 struct imx_timer *imxtm; 432 433 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); 434 BUG_ON(!imxtm); 435 436 imxtm->clk_per = clk_get_sys("imx-gpt.0", "per"); 437 imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg"); 438 439 imxtm->base = ioremap(pbase, SZ_4K); 440 BUG_ON(!imxtm->base); 441 442 imxtm->type = type; 443 imxtm->irq = irq; 444 445 _mxc_timer_init(imxtm); 446 } 447 448 static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type type) 449 { 450 struct imx_timer *imxtm; 451 static int initialized; 452 int ret; 453 454 /* Support one instance only */ 455 if (initialized) 456 return 0; 457 458 imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL); 459 if (!imxtm) 460 return -ENOMEM; 461 462 imxtm->base = of_iomap(np, 0); 463 if (!imxtm->base) 464 return -ENXIO; 465 466 imxtm->irq = irq_of_parse_and_map(np, 0); 467 if (imxtm->irq <= 0) 468 return -EINVAL; 469 470 imxtm->clk_ipg = of_clk_get_by_name(np, "ipg"); 471 472 /* Try osc_per first, and fall back to per otherwise */ 473 imxtm->clk_per = of_clk_get_by_name(np, "osc_per"); 474 if (IS_ERR(imxtm->clk_per)) 475 imxtm->clk_per = of_clk_get_by_name(np, "per"); 476 477 imxtm->type = type; 478 479 ret = _mxc_timer_init(imxtm); 480 if (ret) 481 return ret; 482 483 initialized = 1; 484 485 return 0; 486 } 487 488 static int __init imx1_timer_init_dt(struct device_node *np) 489 { 490 return mxc_timer_init_dt(np, GPT_TYPE_IMX1); 491 } 492 493 static int __init imx21_timer_init_dt(struct device_node *np) 494 { 495 return mxc_timer_init_dt(np, GPT_TYPE_IMX21); 496 } 497 498 static int __init imx31_timer_init_dt(struct device_node *np) 499 { 500 enum imx_gpt_type type = GPT_TYPE_IMX31; 501 502 /* 503 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S 504 * GPT device, while they actually have different programming model. 505 * This is a workaround to keep the existing i.MX6DL/S DTBs continue 506 * working with the new kernel. 507 */ 508 if (of_machine_is_compatible("fsl,imx6dl")) 509 type = GPT_TYPE_IMX6DL; 510 511 return mxc_timer_init_dt(np, type); 512 } 513 514 static int __init imx6dl_timer_init_dt(struct device_node *np) 515 { 516 return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL); 517 } 518 519 TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt); 520 TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt); 521 TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt); 522 TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt); 523 TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt); 524 TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt); 525 TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt); 526 TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt); 527 TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt); 528 TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt); 529 TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt); 530 TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt); 531