1 // SPDX-License-Identifier: GPL-2.0+
2 #include <linux/clk.h>
3 #include <linux/clocksource.h>
4 #include <linux/clockchips.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/iopoll.h>
8 #include <linux/err.h>
9 #include <linux/of.h>
10 #include <linux/of_address.h>
11 #include <linux/of_irq.h>
12 #include <linux/sched_clock.h>
13 
14 #include <linux/clk/clk-conf.h>
15 
16 #include <clocksource/timer-ti-dm.h>
17 #include <dt-bindings/bus/ti-sysc.h>
18 
19 /* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
20 #define DMTIMER_TYPE1_ENABLE	((1 << 9) | (SYSC_IDLE_SMART << 3) | \
21 				 SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
22 #define DMTIMER_TYPE1_DISABLE	(SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
23 #define DMTIMER_TYPE2_ENABLE	(SYSC_IDLE_SMART_WKUP << 2)
24 #define DMTIMER_RESET_WAIT	100000
25 
26 #define DMTIMER_INST_DONT_CARE	~0U
27 
28 static int counter_32k;
29 static u32 clocksource;
30 static u32 clockevent;
31 
32 /*
33  * Subset of the timer registers we use. Note that the register offsets
34  * depend on the timer revision detected.
35  */
36 struct dmtimer_systimer {
37 	void __iomem *base;
38 	u8 sysc;
39 	u8 irq_stat;
40 	u8 irq_ena;
41 	u8 pend;
42 	u8 load;
43 	u8 counter;
44 	u8 ctrl;
45 	u8 wakeup;
46 	u8 ifctrl;
47 	struct clk *fck;
48 	struct clk *ick;
49 	unsigned long rate;
50 };
51 
52 struct dmtimer_clockevent {
53 	struct clock_event_device dev;
54 	struct dmtimer_systimer t;
55 	u32 period;
56 };
57 
58 struct dmtimer_clocksource {
59 	struct clocksource dev;
60 	struct dmtimer_systimer t;
61 	unsigned int loadval;
62 };
63 
64 /* Assumes v1 ip if bits [31:16] are zero */
65 static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t)
66 {
67 	u32 tidr = readl_relaxed(t->base);
68 
69 	return !(tidr >> 16);
70 }
71 
72 static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t)
73 {
74 	void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET;
75 	int ret;
76 	u32 l;
77 
78 	writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl);
79 	ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100,
80 					DMTIMER_RESET_WAIT);
81 
82 	return ret;
83 }
84 
85 /* Note we must use io_base instead of func_base for type2 OCP regs */
86 static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t)
87 {
88 	void __iomem *sysc = t->base + t->sysc;
89 	u32 l;
90 
91 	l = readl_relaxed(sysc);
92 	l |= BIT(0);
93 	writel_relaxed(l, sysc);
94 
95 	return readl_poll_timeout_atomic(sysc, l, !(l & BIT(0)), 100,
96 					 DMTIMER_RESET_WAIT);
97 }
98 
99 static int __init dmtimer_systimer_reset(struct dmtimer_systimer *t)
100 {
101 	int ret;
102 
103 	if (dmtimer_systimer_revision1(t))
104 		ret = dmtimer_systimer_type1_reset(t);
105 	else
106 		ret = dmtimer_systimer_type2_reset(t);
107 	if (ret < 0) {
108 		pr_err("%s failed with %i\n", __func__, ret);
109 
110 		return ret;
111 	}
112 
113 	return 0;
114 }
115 
116 static const struct of_device_id counter_match_table[] = {
117 	{ .compatible = "ti,omap-counter32k" },
118 	{ /* Sentinel */ },
119 };
120 
121 /*
122  * Check if the SoC als has a usable working 32 KiHz counter. The 32 KiHz
123  * counter is handled by timer-ti-32k, but we need to detect it as it
124  * affects the preferred dmtimer system timer configuration. There is
125  * typically no use for a dmtimer clocksource if the 32 KiHz counter is
126  * present, except on am437x as described below.
127  */
128 static void __init dmtimer_systimer_check_counter32k(void)
129 {
130 	struct device_node *np;
131 
132 	if (counter_32k)
133 		return;
134 
135 	np = of_find_matching_node(NULL, counter_match_table);
136 	if (!np) {
137 		counter_32k = -ENODEV;
138 
139 		return;
140 	}
141 
142 	if (of_device_is_available(np))
143 		counter_32k = 1;
144 	else
145 		counter_32k = -ENODEV;
146 
147 	of_node_put(np);
148 }
149 
150 static const struct of_device_id dmtimer_match_table[] = {
151 	{ .compatible = "ti,omap2420-timer", },
152 	{ .compatible = "ti,omap3430-timer", },
153 	{ .compatible = "ti,omap4430-timer", },
154 	{ .compatible = "ti,omap5430-timer", },
155 	{ .compatible = "ti,am335x-timer", },
156 	{ .compatible = "ti,am335x-timer-1ms", },
157 	{ .compatible = "ti,dm814-timer", },
158 	{ .compatible = "ti,dm816-timer", },
159 	{ /* Sentinel */ },
160 };
161 
162 /*
163  * Checks that system timers are configured to not reset and idle during
164  * the generic timer-ti-dm device driver probe. And that the system timer
165  * source clocks are properly configured. Also, let's not hog any DSP and
166  * PWM capable timers unnecessarily as system timers.
167  */
168 static bool __init dmtimer_is_preferred(struct device_node *np)
169 {
170 	if (!of_device_is_available(np))
171 		return false;
172 
173 	if (!of_property_read_bool(np->parent,
174 				   "ti,no-reset-on-init"))
175 		return false;
176 
177 	if (!of_property_read_bool(np->parent, "ti,no-idle"))
178 		return false;
179 
180 	/* Secure gptimer12 is always clocked with a fixed source */
181 	if (!of_property_read_bool(np, "ti,timer-secure")) {
182 		if (!of_property_read_bool(np, "assigned-clocks"))
183 			return false;
184 
185 		if (!of_property_read_bool(np, "assigned-clock-parents"))
186 			return false;
187 	}
188 
189 	if (of_property_read_bool(np, "ti,timer-dsp"))
190 		return false;
191 
192 	if (of_property_read_bool(np, "ti,timer-pwm"))
193 		return false;
194 
195 	return true;
196 }
197 
198 /*
199  * Finds the first available usable always-on timer, and assigns it to either
200  * clockevent or clocksource depending if the counter_32k is available on the
201  * SoC or not.
202  *
203  * Some omap3 boards with unreliable oscillator must not use the counter_32k
204  * or dmtimer1 with 32 KiHz source. Additionally, the boards with unreliable
205  * oscillator should really set counter_32k as disabled, and delete dmtimer1
206  * ti,always-on property, but let's not count on it. For these quirky cases,
207  * we prefer using the always-on secure dmtimer12 with the internal 32 KiHz
208  * clock as the clocksource, and any available dmtimer as clockevent.
209  *
210  * For am437x, we are using am335x style dmtimer clocksource. It is unclear
211  * if this quirk handling is really needed, but let's change it separately
212  * based on testing as it might cause side effects.
213  */
214 static void __init dmtimer_systimer_assign_alwon(void)
215 {
216 	struct device_node *np;
217 	u32 pa = 0;
218 	bool quirk_unreliable_oscillator = false;
219 
220 	/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
221 	if (of_machine_is_compatible("ti,omap3-beagle") ||
222 	    of_machine_is_compatible("timll,omap3-devkit8000")) {
223 		quirk_unreliable_oscillator = true;
224 		counter_32k = -ENODEV;
225 	}
226 
227 	/* Quirk am437x using am335x style dmtimer clocksource */
228 	if (of_machine_is_compatible("ti,am43"))
229 		counter_32k = -ENODEV;
230 
231 	for_each_matching_node(np, dmtimer_match_table) {
232 		if (!dmtimer_is_preferred(np))
233 			continue;
234 
235 		if (of_property_read_bool(np, "ti,timer-alwon")) {
236 			const __be32 *addr;
237 
238 			addr = of_get_address(np, 0, NULL, NULL);
239 			pa = of_translate_address(np, addr);
240 			if (pa) {
241 				/* Quirky omap3 boards must use dmtimer12 */
242 				if (quirk_unreliable_oscillator &&
243 				    pa == 0x48318000)
244 					continue;
245 
246 				of_node_put(np);
247 				break;
248 			}
249 		}
250 	}
251 
252 	/* Usually no need for dmtimer clocksource if we have counter32 */
253 	if (counter_32k >= 0) {
254 		clockevent = pa;
255 		clocksource = 0;
256 	} else {
257 		clocksource = pa;
258 		clockevent = DMTIMER_INST_DONT_CARE;
259 	}
260 }
261 
262 /* Finds the first usable dmtimer, used for the don't care case */
263 static u32 __init dmtimer_systimer_find_first_available(void)
264 {
265 	struct device_node *np;
266 	const __be32 *addr;
267 	u32 pa = 0;
268 
269 	for_each_matching_node(np, dmtimer_match_table) {
270 		if (!dmtimer_is_preferred(np))
271 			continue;
272 
273 		addr = of_get_address(np, 0, NULL, NULL);
274 		pa = of_translate_address(np, addr);
275 		if (pa) {
276 			if (pa == clocksource || pa == clockevent) {
277 				pa = 0;
278 				continue;
279 			}
280 
281 			of_node_put(np);
282 			break;
283 		}
284 	}
285 
286 	return pa;
287 }
288 
289 /* Selects the best clocksource and clockevent to use */
290 static void __init dmtimer_systimer_select_best(void)
291 {
292 	dmtimer_systimer_check_counter32k();
293 	dmtimer_systimer_assign_alwon();
294 
295 	if (clockevent == DMTIMER_INST_DONT_CARE)
296 		clockevent = dmtimer_systimer_find_first_available();
297 
298 	pr_debug("%s: counter_32k: %i clocksource: %08x clockevent: %08x\n",
299 		 __func__, counter_32k, clocksource, clockevent);
300 }
301 
302 /* Interface clocks are only available on some SoCs variants */
303 static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
304 					      struct device_node *np,
305 					      const char *name,
306 					      unsigned long *rate)
307 {
308 	struct clk *clock;
309 	unsigned long r;
310 	bool is_ick = false;
311 	int error;
312 
313 	is_ick = !strncmp(name, "ick", 3);
314 
315 	clock = of_clk_get_by_name(np, name);
316 	if ((PTR_ERR(clock) == -EINVAL) && is_ick)
317 		return 0;
318 	else if (IS_ERR(clock))
319 		return PTR_ERR(clock);
320 
321 	error = clk_prepare_enable(clock);
322 	if (error)
323 		return error;
324 
325 	r = clk_get_rate(clock);
326 	if (!r)
327 		return -ENODEV;
328 
329 	if (is_ick)
330 		t->ick = clock;
331 	else
332 		t->fck = clock;
333 
334 	*rate = r;
335 
336 	return 0;
337 }
338 
339 static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
340 {
341 	u32 val;
342 
343 	if (dmtimer_systimer_revision1(t))
344 		val = DMTIMER_TYPE1_ENABLE;
345 	else
346 		val = DMTIMER_TYPE2_ENABLE;
347 
348 	writel_relaxed(val, t->base + t->sysc);
349 }
350 
351 static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
352 {
353 	if (!dmtimer_systimer_revision1(t))
354 		return;
355 
356 	writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
357 }
358 
359 static int __init dmtimer_systimer_setup(struct device_node *np,
360 					 struct dmtimer_systimer *t)
361 {
362 	unsigned long rate;
363 	u8 regbase;
364 	int error;
365 
366 	if (!of_device_is_compatible(np->parent, "ti,sysc"))
367 		return -EINVAL;
368 
369 	t->base = of_iomap(np, 0);
370 	if (!t->base)
371 		return -ENXIO;
372 
373 	/*
374 	 * Enable optional assigned-clock-parents configured at the timer
375 	 * node level. For regular device drivers, this is done automatically
376 	 * by bus related code such as platform_drv_probe().
377 	 */
378 	error = of_clk_set_defaults(np, false);
379 	if (error < 0)
380 		pr_err("%s: clock source init failed: %i\n", __func__, error);
381 
382 	/* For ti-sysc, we have timer clocks at the parent module level */
383 	error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
384 	if (error)
385 		goto err_unmap;
386 
387 	t->rate = rate;
388 
389 	error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
390 	if (error)
391 		goto err_unmap;
392 
393 	if (dmtimer_systimer_revision1(t)) {
394 		t->irq_stat = OMAP_TIMER_V1_STAT_OFFSET;
395 		t->irq_ena = OMAP_TIMER_V1_INT_EN_OFFSET;
396 		t->pend = _OMAP_TIMER_WRITE_PEND_OFFSET;
397 		regbase = 0;
398 	} else {
399 		t->irq_stat = OMAP_TIMER_V2_IRQSTATUS;
400 		t->irq_ena = OMAP_TIMER_V2_IRQENABLE_SET;
401 		regbase = OMAP_TIMER_V2_FUNC_OFFSET;
402 		t->pend = regbase + _OMAP_TIMER_WRITE_PEND_OFFSET;
403 	}
404 
405 	t->sysc = OMAP_TIMER_OCP_CFG_OFFSET;
406 	t->load = regbase + _OMAP_TIMER_LOAD_OFFSET;
407 	t->counter = regbase + _OMAP_TIMER_COUNTER_OFFSET;
408 	t->ctrl = regbase + _OMAP_TIMER_CTRL_OFFSET;
409 	t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET;
410 	t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET;
411 
412 	dmtimer_systimer_enable(t);
413 	dmtimer_systimer_reset(t);
414 	pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base),
415 		 readl_relaxed(t->base + t->sysc));
416 
417 	return 0;
418 
419 err_unmap:
420 	iounmap(t->base);
421 
422 	return error;
423 }
424 
425 /* Clockevent */
426 static struct dmtimer_clockevent *
427 to_dmtimer_clockevent(struct clock_event_device *clockevent)
428 {
429 	return container_of(clockevent, struct dmtimer_clockevent, dev);
430 }
431 
432 static irqreturn_t dmtimer_clockevent_interrupt(int irq, void *data)
433 {
434 	struct dmtimer_clockevent *clkevt = data;
435 	struct dmtimer_systimer *t = &clkevt->t;
436 
437 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
438 	clkevt->dev.event_handler(&clkevt->dev);
439 
440 	return IRQ_HANDLED;
441 }
442 
443 static int dmtimer_set_next_event(unsigned long cycles,
444 				  struct clock_event_device *evt)
445 {
446 	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
447 	struct dmtimer_systimer *t = &clkevt->t;
448 	void __iomem *pend = t->base + t->pend;
449 
450 	writel_relaxed(0xffffffff - cycles, t->base + t->counter);
451 	while (readl_relaxed(pend) & WP_TCRR)
452 		cpu_relax();
453 
454 	writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
455 	while (readl_relaxed(pend) & WP_TCLR)
456 		cpu_relax();
457 
458 	return 0;
459 }
460 
461 static int dmtimer_clockevent_shutdown(struct clock_event_device *evt)
462 {
463 	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
464 	struct dmtimer_systimer *t = &clkevt->t;
465 	void __iomem *ctrl = t->base + t->ctrl;
466 	u32 l;
467 
468 	l = readl_relaxed(ctrl);
469 	if (l & OMAP_TIMER_CTRL_ST) {
470 		l &= ~BIT(0);
471 		writel_relaxed(l, ctrl);
472 		/* Flush posted write */
473 		l = readl_relaxed(ctrl);
474 		/*  Wait for functional clock period x 3.5 */
475 		udelay(3500000 / t->rate + 1);
476 	}
477 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
478 
479 	return 0;
480 }
481 
482 static int dmtimer_set_periodic(struct clock_event_device *evt)
483 {
484 	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
485 	struct dmtimer_systimer *t = &clkevt->t;
486 	void __iomem *pend = t->base + t->pend;
487 
488 	dmtimer_clockevent_shutdown(evt);
489 
490 	/* Looks like we need to first set the load value separately */
491 	writel_relaxed(clkevt->period, t->base + t->load);
492 	while (readl_relaxed(pend) & WP_TLDR)
493 		cpu_relax();
494 
495 	writel_relaxed(clkevt->period, t->base + t->counter);
496 	while (readl_relaxed(pend) & WP_TCRR)
497 		cpu_relax();
498 
499 	writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
500 		       t->base + t->ctrl);
501 	while (readl_relaxed(pend) & WP_TCLR)
502 		cpu_relax();
503 
504 	return 0;
505 }
506 
507 static void omap_clockevent_idle(struct clock_event_device *evt)
508 {
509 	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
510 	struct dmtimer_systimer *t = &clkevt->t;
511 
512 	dmtimer_systimer_disable(t);
513 	clk_disable(t->fck);
514 }
515 
516 static void omap_clockevent_unidle(struct clock_event_device *evt)
517 {
518 	struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
519 	struct dmtimer_systimer *t = &clkevt->t;
520 	int error;
521 
522 	error = clk_enable(t->fck);
523 	if (error)
524 		pr_err("could not enable timer fck on resume: %i\n", error);
525 
526 	dmtimer_systimer_enable(t);
527 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
528 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
529 }
530 
531 static int __init dmtimer_clockevent_init(struct device_node *np)
532 {
533 	struct dmtimer_clockevent *clkevt;
534 	struct clock_event_device *dev;
535 	struct dmtimer_systimer *t;
536 	int error;
537 
538 	clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
539 	if (!clkevt)
540 		return -ENOMEM;
541 
542 	t = &clkevt->t;
543 	dev = &clkevt->dev;
544 
545 	/*
546 	 * We mostly use cpuidle_coupled with ARM local timers for runtime,
547 	 * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
548 	 */
549 	dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
550 	dev->rating = 300;
551 	dev->set_next_event = dmtimer_set_next_event;
552 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
553 	dev->set_state_periodic = dmtimer_set_periodic;
554 	dev->set_state_oneshot = dmtimer_clockevent_shutdown;
555 	dev->tick_resume = dmtimer_clockevent_shutdown;
556 	dev->cpumask = cpu_possible_mask;
557 
558 	dev->irq = irq_of_parse_and_map(np, 0);
559 	if (!dev->irq) {
560 		error = -ENXIO;
561 		goto err_out_free;
562 	}
563 
564 	error = dmtimer_systimer_setup(np, &clkevt->t);
565 	if (error)
566 		goto err_out_free;
567 
568 	clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
569 
570 	/*
571 	 * For clock-event timers we never read the timer counter and
572 	 * so we are not impacted by errata i103 and i767. Therefore,
573 	 * we can safely ignore this errata for clock-event timers.
574 	 */
575 	writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
576 
577 	error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
578 			    IRQF_TIMER, "clockevent", clkevt);
579 	if (error)
580 		goto err_out_unmap;
581 
582 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
583 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
584 
585 	pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
586 		of_find_property(np, "ti,timer-alwon", NULL) ?
587 		"always-on " : "", t->rate, np->parent);
588 
589 	clockevents_config_and_register(dev, t->rate,
590 					3, /* Timer internal resynch latency */
591 					0xffffffff);
592 
593 	if (of_machine_is_compatible("ti,am33xx") ||
594 	    of_machine_is_compatible("ti,am43")) {
595 		dev->suspend = omap_clockevent_idle;
596 		dev->resume = omap_clockevent_unidle;
597 	}
598 
599 	return 0;
600 
601 err_out_unmap:
602 	iounmap(t->base);
603 
604 err_out_free:
605 	kfree(clkevt);
606 
607 	return error;
608 }
609 
610 /* Clocksource */
611 static struct dmtimer_clocksource *
612 to_dmtimer_clocksource(struct clocksource *cs)
613 {
614 	return container_of(cs, struct dmtimer_clocksource, dev);
615 }
616 
617 static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
618 {
619 	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
620 	struct dmtimer_systimer *t = &clksrc->t;
621 
622 	return (u64)readl_relaxed(t->base + t->counter);
623 }
624 
625 static void __iomem *dmtimer_sched_clock_counter;
626 
627 static u64 notrace dmtimer_read_sched_clock(void)
628 {
629 	return readl_relaxed(dmtimer_sched_clock_counter);
630 }
631 
632 static void dmtimer_clocksource_suspend(struct clocksource *cs)
633 {
634 	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
635 	struct dmtimer_systimer *t = &clksrc->t;
636 
637 	clksrc->loadval = readl_relaxed(t->base + t->counter);
638 	dmtimer_systimer_disable(t);
639 	clk_disable(t->fck);
640 }
641 
642 static void dmtimer_clocksource_resume(struct clocksource *cs)
643 {
644 	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
645 	struct dmtimer_systimer *t = &clksrc->t;
646 	int error;
647 
648 	error = clk_enable(t->fck);
649 	if (error)
650 		pr_err("could not enable timer fck on resume: %i\n", error);
651 
652 	dmtimer_systimer_enable(t);
653 	writel_relaxed(clksrc->loadval, t->base + t->counter);
654 	writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
655 		       t->base + t->ctrl);
656 }
657 
658 static int __init dmtimer_clocksource_init(struct device_node *np)
659 {
660 	struct dmtimer_clocksource *clksrc;
661 	struct dmtimer_systimer *t;
662 	struct clocksource *dev;
663 	int error;
664 
665 	clksrc = kzalloc(sizeof(*clksrc), GFP_KERNEL);
666 	if (!clksrc)
667 		return -ENOMEM;
668 
669 	dev = &clksrc->dev;
670 	t = &clksrc->t;
671 
672 	error = dmtimer_systimer_setup(np, t);
673 	if (error)
674 		goto err_out_free;
675 
676 	dev->name = "dmtimer";
677 	dev->rating = 300;
678 	dev->read = dmtimer_clocksource_read_cycles;
679 	dev->mask = CLOCKSOURCE_MASK(32);
680 	dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
681 
682 	/* Unlike for clockevent, legacy code sets suspend only for am4 */
683 	if (of_machine_is_compatible("ti,am43")) {
684 		dev->suspend = dmtimer_clocksource_suspend;
685 		dev->resume = dmtimer_clocksource_resume;
686 	}
687 
688 	writel_relaxed(0, t->base + t->counter);
689 	writel_relaxed(OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR,
690 		       t->base + t->ctrl);
691 
692 	pr_info("TI gptimer clocksource: %s%pOF\n",
693 		of_find_property(np, "ti,timer-alwon", NULL) ?
694 		"always-on " : "", np->parent);
695 
696 	if (!dmtimer_sched_clock_counter) {
697 		dmtimer_sched_clock_counter = t->base + t->counter;
698 		sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
699 	}
700 
701 	if (clocksource_register_hz(dev, t->rate))
702 		pr_err("Could not register clocksource %pOF\n", np);
703 
704 	return 0;
705 
706 err_out_free:
707 	kfree(clksrc);
708 
709 	return -ENODEV;
710 }
711 
712 /*
713  * To detect between a clocksource and clockevent, we assume the device tree
714  * has no interrupts configured for a clocksource timer.
715  */
716 static int __init dmtimer_systimer_init(struct device_node *np)
717 {
718 	const __be32 *addr;
719 	u32 pa;
720 
721 	/* One time init for the preferred timer configuration */
722 	if (!clocksource && !clockevent)
723 		dmtimer_systimer_select_best();
724 
725 	if (!clocksource && !clockevent) {
726 		pr_err("%s: unable to detect system timers, update dtb?\n",
727 		       __func__);
728 
729 		return -EINVAL;
730 	}
731 
732 	addr = of_get_address(np, 0, NULL, NULL);
733 	pa = of_translate_address(np, addr);
734 	if (!pa)
735 		return -EINVAL;
736 
737 	if (counter_32k <= 0 && clocksource == pa)
738 		return dmtimer_clocksource_init(np);
739 
740 	if (clockevent == pa)
741 		return dmtimer_clockevent_init(np);
742 
743 	return 0;
744 }
745 
746 TIMER_OF_DECLARE(systimer_omap2, "ti,omap2420-timer", dmtimer_systimer_init);
747 TIMER_OF_DECLARE(systimer_omap3, "ti,omap3430-timer", dmtimer_systimer_init);
748 TIMER_OF_DECLARE(systimer_omap4, "ti,omap4430-timer", dmtimer_systimer_init);
749 TIMER_OF_DECLARE(systimer_omap5, "ti,omap5430-timer", dmtimer_systimer_init);
750 TIMER_OF_DECLARE(systimer_am33x, "ti,am335x-timer", dmtimer_systimer_init);
751 TIMER_OF_DECLARE(systimer_am3ms, "ti,am335x-timer-1ms", dmtimer_systimer_init);
752 TIMER_OF_DECLARE(systimer_dm814, "ti,dm814-timer", dmtimer_systimer_init);
753 TIMER_OF_DECLARE(systimer_dm816, "ti,dm816-timer", dmtimer_systimer_init);
754