1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 //  Copyright (C) 2000-2001 Deep Blue Solutions
4 //  Copyright (C) 2002 Shane Nay (shane@minirl.com)
5 //  Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
6 //  Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
7 
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/clockchips.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/err.h>
14 #include <linux/sched_clock.h>
15 #include <linux/slab.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/of_irq.h>
19 #include <soc/imx/timer.h>
20 
21 /*
22  * There are 4 versions of the timer hardware on Freescale MXC hardware.
23  *  - MX1/MXL
24  *  - MX21, MX27.
25  *  - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
26  *  - MX6DL, MX6SX, MX6Q(rev1.1+)
27  */
28 
29 /* defines common for all i.MX */
30 #define MXC_TCTL		0x00
31 #define MXC_TCTL_TEN		(1 << 0) /* Enable module */
32 #define MXC_TPRER		0x04
33 
34 /* MX1, MX21, MX27 */
35 #define MX1_2_TCTL_CLK_PCLK1	(1 << 1)
36 #define MX1_2_TCTL_IRQEN	(1 << 4)
37 #define MX1_2_TCTL_FRR		(1 << 8)
38 #define MX1_2_TCMP		0x08
39 #define MX1_2_TCN		0x10
40 #define MX1_2_TSTAT		0x14
41 
42 /* MX21, MX27 */
43 #define MX2_TSTAT_CAPT		(1 << 1)
44 #define MX2_TSTAT_COMP		(1 << 0)
45 
46 /* MX31, MX35, MX25, MX5, MX6 */
47 #define V2_TCTL_WAITEN		(1 << 3) /* Wait enable mode */
48 #define V2_TCTL_CLK_IPG		(1 << 6)
49 #define V2_TCTL_CLK_PER		(2 << 6)
50 #define V2_TCTL_CLK_OSC_DIV8	(5 << 6)
51 #define V2_TCTL_FRR		(1 << 9)
52 #define V2_TCTL_24MEN		(1 << 10)
53 #define V2_TPRER_PRE24M		12
54 #define V2_IR			0x0c
55 #define V2_TSTAT		0x08
56 #define V2_TSTAT_OF1		(1 << 0)
57 #define V2_TCN			0x24
58 #define V2_TCMP			0x10
59 
60 #define V2_TIMER_RATE_OSC_DIV8	3000000
61 
62 struct imx_timer {
63 	enum imx_gpt_type type;
64 	void __iomem *base;
65 	int irq;
66 	struct clk *clk_per;
67 	struct clk *clk_ipg;
68 	const struct imx_gpt_data *gpt;
69 	struct clock_event_device ced;
70 	struct irqaction act;
71 };
72 
73 struct imx_gpt_data {
74 	int reg_tstat;
75 	int reg_tcn;
76 	int reg_tcmp;
77 	void (*gpt_setup_tctl)(struct imx_timer *imxtm);
78 	void (*gpt_irq_enable)(struct imx_timer *imxtm);
79 	void (*gpt_irq_disable)(struct imx_timer *imxtm);
80 	void (*gpt_irq_acknowledge)(struct imx_timer *imxtm);
81 	int (*set_next_event)(unsigned long evt,
82 			      struct clock_event_device *ced);
83 };
84 
85 static inline struct imx_timer *to_imx_timer(struct clock_event_device *ced)
86 {
87 	return container_of(ced, struct imx_timer, ced);
88 }
89 
90 static void imx1_gpt_irq_disable(struct imx_timer *imxtm)
91 {
92 	unsigned int tmp;
93 
94 	tmp = readl_relaxed(imxtm->base + MXC_TCTL);
95 	writel_relaxed(tmp & ~MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
96 }
97 #define imx21_gpt_irq_disable imx1_gpt_irq_disable
98 
99 static void imx31_gpt_irq_disable(struct imx_timer *imxtm)
100 {
101 	writel_relaxed(0, imxtm->base + V2_IR);
102 }
103 #define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
104 
105 static void imx1_gpt_irq_enable(struct imx_timer *imxtm)
106 {
107 	unsigned int tmp;
108 
109 	tmp = readl_relaxed(imxtm->base + MXC_TCTL);
110 	writel_relaxed(tmp | MX1_2_TCTL_IRQEN, imxtm->base + MXC_TCTL);
111 }
112 #define imx21_gpt_irq_enable imx1_gpt_irq_enable
113 
114 static void imx31_gpt_irq_enable(struct imx_timer *imxtm)
115 {
116 	writel_relaxed(1<<0, imxtm->base + V2_IR);
117 }
118 #define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
119 
120 static void imx1_gpt_irq_acknowledge(struct imx_timer *imxtm)
121 {
122 	writel_relaxed(0, imxtm->base + MX1_2_TSTAT);
123 }
124 
125 static void imx21_gpt_irq_acknowledge(struct imx_timer *imxtm)
126 {
127 	writel_relaxed(MX2_TSTAT_CAPT | MX2_TSTAT_COMP,
128 				imxtm->base + MX1_2_TSTAT);
129 }
130 
131 static void imx31_gpt_irq_acknowledge(struct imx_timer *imxtm)
132 {
133 	writel_relaxed(V2_TSTAT_OF1, imxtm->base + V2_TSTAT);
134 }
135 #define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
136 
137 static void __iomem *sched_clock_reg;
138 
139 static u64 notrace mxc_read_sched_clock(void)
140 {
141 	return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
142 }
143 
144 static struct delay_timer imx_delay_timer;
145 
146 static unsigned long imx_read_current_timer(void)
147 {
148 	return readl_relaxed(sched_clock_reg);
149 }
150 
151 static int __init mxc_clocksource_init(struct imx_timer *imxtm)
152 {
153 	unsigned int c = clk_get_rate(imxtm->clk_per);
154 	void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
155 
156 	imx_delay_timer.read_current_timer = &imx_read_current_timer;
157 	imx_delay_timer.freq = c;
158 	register_current_timer_delay(&imx_delay_timer);
159 
160 	sched_clock_reg = reg;
161 
162 	sched_clock_register(mxc_read_sched_clock, 32, c);
163 	return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
164 			clocksource_mmio_readl_up);
165 }
166 
167 /* clock event */
168 
169 static int mx1_2_set_next_event(unsigned long evt,
170 			      struct clock_event_device *ced)
171 {
172 	struct imx_timer *imxtm = to_imx_timer(ced);
173 	unsigned long tcmp;
174 
175 	tcmp = readl_relaxed(imxtm->base + MX1_2_TCN) + evt;
176 
177 	writel_relaxed(tcmp, imxtm->base + MX1_2_TCMP);
178 
179 	return (int)(tcmp - readl_relaxed(imxtm->base + MX1_2_TCN)) < 0 ?
180 				-ETIME : 0;
181 }
182 
183 static int v2_set_next_event(unsigned long evt,
184 			      struct clock_event_device *ced)
185 {
186 	struct imx_timer *imxtm = to_imx_timer(ced);
187 	unsigned long tcmp;
188 
189 	tcmp = readl_relaxed(imxtm->base + V2_TCN) + evt;
190 
191 	writel_relaxed(tcmp, imxtm->base + V2_TCMP);
192 
193 	return evt < 0x7fffffff &&
194 		(int)(tcmp - readl_relaxed(imxtm->base + V2_TCN)) < 0 ?
195 				-ETIME : 0;
196 }
197 
198 static int mxc_shutdown(struct clock_event_device *ced)
199 {
200 	struct imx_timer *imxtm = to_imx_timer(ced);
201 	unsigned long flags;
202 	u32 tcn;
203 
204 	/*
205 	 * The timer interrupt generation is disabled at least
206 	 * for enough time to call mxc_set_next_event()
207 	 */
208 	local_irq_save(flags);
209 
210 	/* Disable interrupt in GPT module */
211 	imxtm->gpt->gpt_irq_disable(imxtm);
212 
213 	tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
214 	/* Set event time into far-far future */
215 	writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
216 
217 	/* Clear pending interrupt */
218 	imxtm->gpt->gpt_irq_acknowledge(imxtm);
219 
220 #ifdef DEBUG
221 	printk(KERN_INFO "%s: changing mode\n", __func__);
222 #endif /* DEBUG */
223 
224 	local_irq_restore(flags);
225 
226 	return 0;
227 }
228 
229 static int mxc_set_oneshot(struct clock_event_device *ced)
230 {
231 	struct imx_timer *imxtm = to_imx_timer(ced);
232 	unsigned long flags;
233 
234 	/*
235 	 * The timer interrupt generation is disabled at least
236 	 * for enough time to call mxc_set_next_event()
237 	 */
238 	local_irq_save(flags);
239 
240 	/* Disable interrupt in GPT module */
241 	imxtm->gpt->gpt_irq_disable(imxtm);
242 
243 	if (!clockevent_state_oneshot(ced)) {
244 		u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
245 		/* Set event time into far-far future */
246 		writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
247 
248 		/* Clear pending interrupt */
249 		imxtm->gpt->gpt_irq_acknowledge(imxtm);
250 	}
251 
252 #ifdef DEBUG
253 	printk(KERN_INFO "%s: changing mode\n", __func__);
254 #endif /* DEBUG */
255 
256 	/*
257 	 * Do not put overhead of interrupt enable/disable into
258 	 * mxc_set_next_event(), the core has about 4 minutes
259 	 * to call mxc_set_next_event() or shutdown clock after
260 	 * mode switching
261 	 */
262 	imxtm->gpt->gpt_irq_enable(imxtm);
263 	local_irq_restore(flags);
264 
265 	return 0;
266 }
267 
268 /*
269  * IRQ handler for the timer
270  */
271 static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
272 {
273 	struct clock_event_device *ced = dev_id;
274 	struct imx_timer *imxtm = to_imx_timer(ced);
275 	uint32_t tstat;
276 
277 	tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
278 
279 	imxtm->gpt->gpt_irq_acknowledge(imxtm);
280 
281 	ced->event_handler(ced);
282 
283 	return IRQ_HANDLED;
284 }
285 
286 static int __init mxc_clockevent_init(struct imx_timer *imxtm)
287 {
288 	struct clock_event_device *ced = &imxtm->ced;
289 	struct irqaction *act = &imxtm->act;
290 
291 	ced->name = "mxc_timer1";
292 	ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
293 	ced->set_state_shutdown = mxc_shutdown;
294 	ced->set_state_oneshot = mxc_set_oneshot;
295 	ced->tick_resume = mxc_shutdown;
296 	ced->set_next_event = imxtm->gpt->set_next_event;
297 	ced->rating = 200;
298 	ced->cpumask = cpumask_of(0);
299 	ced->irq = imxtm->irq;
300 	clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per),
301 					0xff, 0xfffffffe);
302 
303 	act->name = "i.MX Timer Tick";
304 	act->flags = IRQF_TIMER | IRQF_IRQPOLL;
305 	act->handler = mxc_timer_interrupt;
306 	act->dev_id = ced;
307 
308 	return setup_irq(imxtm->irq, act);
309 }
310 
311 static void imx1_gpt_setup_tctl(struct imx_timer *imxtm)
312 {
313 	u32 tctl_val;
314 
315 	tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
316 	writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
317 }
318 #define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
319 
320 static void imx31_gpt_setup_tctl(struct imx_timer *imxtm)
321 {
322 	u32 tctl_val;
323 
324 	tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
325 	if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8)
326 		tctl_val |= V2_TCTL_CLK_OSC_DIV8;
327 	else
328 		tctl_val |= V2_TCTL_CLK_PER;
329 
330 	writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
331 }
332 
333 static void imx6dl_gpt_setup_tctl(struct imx_timer *imxtm)
334 {
335 	u32 tctl_val;
336 
337 	tctl_val = V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
338 	if (clk_get_rate(imxtm->clk_per) == V2_TIMER_RATE_OSC_DIV8) {
339 		tctl_val |= V2_TCTL_CLK_OSC_DIV8;
340 		/* 24 / 8 = 3 MHz */
341 		writel_relaxed(7 << V2_TPRER_PRE24M, imxtm->base + MXC_TPRER);
342 		tctl_val |= V2_TCTL_24MEN;
343 	} else {
344 		tctl_val |= V2_TCTL_CLK_PER;
345 	}
346 
347 	writel_relaxed(tctl_val, imxtm->base + MXC_TCTL);
348 }
349 
350 static const struct imx_gpt_data imx1_gpt_data = {
351 	.reg_tstat = MX1_2_TSTAT,
352 	.reg_tcn = MX1_2_TCN,
353 	.reg_tcmp = MX1_2_TCMP,
354 	.gpt_irq_enable = imx1_gpt_irq_enable,
355 	.gpt_irq_disable = imx1_gpt_irq_disable,
356 	.gpt_irq_acknowledge = imx1_gpt_irq_acknowledge,
357 	.gpt_setup_tctl = imx1_gpt_setup_tctl,
358 	.set_next_event = mx1_2_set_next_event,
359 };
360 
361 static const struct imx_gpt_data imx21_gpt_data = {
362 	.reg_tstat = MX1_2_TSTAT,
363 	.reg_tcn = MX1_2_TCN,
364 	.reg_tcmp = MX1_2_TCMP,
365 	.gpt_irq_enable = imx21_gpt_irq_enable,
366 	.gpt_irq_disable = imx21_gpt_irq_disable,
367 	.gpt_irq_acknowledge = imx21_gpt_irq_acknowledge,
368 	.gpt_setup_tctl = imx21_gpt_setup_tctl,
369 	.set_next_event = mx1_2_set_next_event,
370 };
371 
372 static const struct imx_gpt_data imx31_gpt_data = {
373 	.reg_tstat = V2_TSTAT,
374 	.reg_tcn = V2_TCN,
375 	.reg_tcmp = V2_TCMP,
376 	.gpt_irq_enable = imx31_gpt_irq_enable,
377 	.gpt_irq_disable = imx31_gpt_irq_disable,
378 	.gpt_irq_acknowledge = imx31_gpt_irq_acknowledge,
379 	.gpt_setup_tctl = imx31_gpt_setup_tctl,
380 	.set_next_event = v2_set_next_event,
381 };
382 
383 static const struct imx_gpt_data imx6dl_gpt_data = {
384 	.reg_tstat = V2_TSTAT,
385 	.reg_tcn = V2_TCN,
386 	.reg_tcmp = V2_TCMP,
387 	.gpt_irq_enable = imx6dl_gpt_irq_enable,
388 	.gpt_irq_disable = imx6dl_gpt_irq_disable,
389 	.gpt_irq_acknowledge = imx6dl_gpt_irq_acknowledge,
390 	.gpt_setup_tctl = imx6dl_gpt_setup_tctl,
391 	.set_next_event = v2_set_next_event,
392 };
393 
394 static int __init _mxc_timer_init(struct imx_timer *imxtm)
395 {
396 	int ret;
397 
398 	switch (imxtm->type) {
399 	case GPT_TYPE_IMX1:
400 		imxtm->gpt = &imx1_gpt_data;
401 		break;
402 	case GPT_TYPE_IMX21:
403 		imxtm->gpt = &imx21_gpt_data;
404 		break;
405 	case GPT_TYPE_IMX31:
406 		imxtm->gpt = &imx31_gpt_data;
407 		break;
408 	case GPT_TYPE_IMX6DL:
409 		imxtm->gpt = &imx6dl_gpt_data;
410 		break;
411 	default:
412 		return -EINVAL;
413 	}
414 
415 	if (IS_ERR(imxtm->clk_per)) {
416 		pr_err("i.MX timer: unable to get clk\n");
417 		return PTR_ERR(imxtm->clk_per);
418 	}
419 
420 	if (!IS_ERR(imxtm->clk_ipg))
421 		clk_prepare_enable(imxtm->clk_ipg);
422 
423 	clk_prepare_enable(imxtm->clk_per);
424 
425 	/*
426 	 * Initialise to a known state (all timers off, and timing reset)
427 	 */
428 
429 	writel_relaxed(0, imxtm->base + MXC_TCTL);
430 	writel_relaxed(0, imxtm->base + MXC_TPRER); /* see datasheet note */
431 
432 	imxtm->gpt->gpt_setup_tctl(imxtm);
433 
434 	/* init and register the timer to the framework */
435 	ret = mxc_clocksource_init(imxtm);
436 	if (ret)
437 		return ret;
438 
439 	return mxc_clockevent_init(imxtm);
440 }
441 
442 void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
443 {
444 	struct imx_timer *imxtm;
445 
446 	imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
447 	BUG_ON(!imxtm);
448 
449 	imxtm->clk_per = clk_get_sys("imx-gpt.0", "per");
450 	imxtm->clk_ipg = clk_get_sys("imx-gpt.0", "ipg");
451 
452 	imxtm->base = ioremap(pbase, SZ_4K);
453 	BUG_ON(!imxtm->base);
454 
455 	imxtm->type = type;
456 	imxtm->irq = irq;
457 
458 	_mxc_timer_init(imxtm);
459 }
460 
461 static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type type)
462 {
463 	struct imx_timer *imxtm;
464 	static int initialized;
465 	int ret;
466 
467 	/* Support one instance only */
468 	if (initialized)
469 		return 0;
470 
471 	imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
472 	if (!imxtm)
473 		return -ENOMEM;
474 
475 	imxtm->base = of_iomap(np, 0);
476 	if (!imxtm->base)
477 		return -ENXIO;
478 
479 	imxtm->irq = irq_of_parse_and_map(np, 0);
480 	if (imxtm->irq <= 0)
481 		return -EINVAL;
482 
483 	imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
484 
485 	/* Try osc_per first, and fall back to per otherwise */
486 	imxtm->clk_per = of_clk_get_by_name(np, "osc_per");
487 	if (IS_ERR(imxtm->clk_per))
488 		imxtm->clk_per = of_clk_get_by_name(np, "per");
489 
490 	imxtm->type = type;
491 
492 	ret = _mxc_timer_init(imxtm);
493 	if (ret)
494 		return ret;
495 
496 	initialized = 1;
497 
498 	return 0;
499 }
500 
501 static int __init imx1_timer_init_dt(struct device_node *np)
502 {
503 	return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
504 }
505 
506 static int __init imx21_timer_init_dt(struct device_node *np)
507 {
508 	return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
509 }
510 
511 static int __init imx31_timer_init_dt(struct device_node *np)
512 {
513 	enum imx_gpt_type type = GPT_TYPE_IMX31;
514 
515 	/*
516 	 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
517 	 * GPT device, while they actually have different programming model.
518 	 * This is a workaround to keep the existing i.MX6DL/S DTBs continue
519 	 * working with the new kernel.
520 	 */
521 	if (of_machine_is_compatible("fsl,imx6dl"))
522 		type = GPT_TYPE_IMX6DL;
523 
524 	return mxc_timer_init_dt(np, type);
525 }
526 
527 static int __init imx6dl_timer_init_dt(struct device_node *np)
528 {
529 	return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
530 }
531 
532 TIMER_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
533 TIMER_OF_DECLARE(imx21_timer, "fsl,imx21-gpt", imx21_timer_init_dt);
534 TIMER_OF_DECLARE(imx27_timer, "fsl,imx27-gpt", imx21_timer_init_dt);
535 TIMER_OF_DECLARE(imx31_timer, "fsl,imx31-gpt", imx31_timer_init_dt);
536 TIMER_OF_DECLARE(imx25_timer, "fsl,imx25-gpt", imx31_timer_init_dt);
537 TIMER_OF_DECLARE(imx50_timer, "fsl,imx50-gpt", imx31_timer_init_dt);
538 TIMER_OF_DECLARE(imx51_timer, "fsl,imx51-gpt", imx31_timer_init_dt);
539 TIMER_OF_DECLARE(imx53_timer, "fsl,imx53-gpt", imx31_timer_init_dt);
540 TIMER_OF_DECLARE(imx6q_timer, "fsl,imx6q-gpt", imx31_timer_init_dt);
541 TIMER_OF_DECLARE(imx6dl_timer, "fsl,imx6dl-gpt", imx6dl_timer_init_dt);
542 TIMER_OF_DECLARE(imx6sl_timer, "fsl,imx6sl-gpt", imx6dl_timer_init_dt);
543 TIMER_OF_DECLARE(imx6sx_timer, "fsl,imx6sx-gpt", imx6dl_timer_init_dt);
544