xref: /openbmc/linux/drivers/clocksource/sh_mtu2.c (revision e657c18a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * SuperH Timer Support - MTU2
4  *
5  *  Copyright (C) 2009 Magnus Damm
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/clockchips.h>
10 #include <linux/delay.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/ioport.h>
16 #include <linux/irq.h>
17 #include <linux/module.h>
18 #include <linux/of.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/sh_timer.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 
26 struct sh_mtu2_device;
27 
28 struct sh_mtu2_channel {
29 	struct sh_mtu2_device *mtu;
30 	unsigned int index;
31 
32 	void __iomem *base;
33 
34 	struct clock_event_device ced;
35 };
36 
37 struct sh_mtu2_device {
38 	struct platform_device *pdev;
39 
40 	void __iomem *mapbase;
41 	struct clk *clk;
42 
43 	raw_spinlock_t lock; /* Protect the shared registers */
44 
45 	struct sh_mtu2_channel *channels;
46 	unsigned int num_channels;
47 
48 	bool has_clockevent;
49 };
50 
51 #define TSTR -1 /* shared register */
52 #define TCR  0 /* channel register */
53 #define TMDR 1 /* channel register */
54 #define TIOR 2 /* channel register */
55 #define TIER 3 /* channel register */
56 #define TSR  4 /* channel register */
57 #define TCNT 5 /* channel register */
58 #define TGR  6 /* channel register */
59 
60 #define TCR_CCLR_NONE		(0 << 5)
61 #define TCR_CCLR_TGRA		(1 << 5)
62 #define TCR_CCLR_TGRB		(2 << 5)
63 #define TCR_CCLR_SYNC		(3 << 5)
64 #define TCR_CCLR_TGRC		(5 << 5)
65 #define TCR_CCLR_TGRD		(6 << 5)
66 #define TCR_CCLR_MASK		(7 << 5)
67 #define TCR_CKEG_RISING		(0 << 3)
68 #define TCR_CKEG_FALLING	(1 << 3)
69 #define TCR_CKEG_BOTH		(2 << 3)
70 #define TCR_CKEG_MASK		(3 << 3)
71 /* Values 4 to 7 are channel-dependent */
72 #define TCR_TPSC_P1		(0 << 0)
73 #define TCR_TPSC_P4		(1 << 0)
74 #define TCR_TPSC_P16		(2 << 0)
75 #define TCR_TPSC_P64		(3 << 0)
76 #define TCR_TPSC_CH0_TCLKA	(4 << 0)
77 #define TCR_TPSC_CH0_TCLKB	(5 << 0)
78 #define TCR_TPSC_CH0_TCLKC	(6 << 0)
79 #define TCR_TPSC_CH0_TCLKD	(7 << 0)
80 #define TCR_TPSC_CH1_TCLKA	(4 << 0)
81 #define TCR_TPSC_CH1_TCLKB	(5 << 0)
82 #define TCR_TPSC_CH1_P256	(6 << 0)
83 #define TCR_TPSC_CH1_TCNT2	(7 << 0)
84 #define TCR_TPSC_CH2_TCLKA	(4 << 0)
85 #define TCR_TPSC_CH2_TCLKB	(5 << 0)
86 #define TCR_TPSC_CH2_TCLKC	(6 << 0)
87 #define TCR_TPSC_CH2_P1024	(7 << 0)
88 #define TCR_TPSC_CH34_P256	(4 << 0)
89 #define TCR_TPSC_CH34_P1024	(5 << 0)
90 #define TCR_TPSC_CH34_TCLKA	(6 << 0)
91 #define TCR_TPSC_CH34_TCLKB	(7 << 0)
92 #define TCR_TPSC_MASK		(7 << 0)
93 
94 #define TMDR_BFE		(1 << 6)
95 #define TMDR_BFB		(1 << 5)
96 #define TMDR_BFA		(1 << 4)
97 #define TMDR_MD_NORMAL		(0 << 0)
98 #define TMDR_MD_PWM_1		(2 << 0)
99 #define TMDR_MD_PWM_2		(3 << 0)
100 #define TMDR_MD_PHASE_1		(4 << 0)
101 #define TMDR_MD_PHASE_2		(5 << 0)
102 #define TMDR_MD_PHASE_3		(6 << 0)
103 #define TMDR_MD_PHASE_4		(7 << 0)
104 #define TMDR_MD_PWM_SYNC	(8 << 0)
105 #define TMDR_MD_PWM_COMP_CREST	(13 << 0)
106 #define TMDR_MD_PWM_COMP_TROUGH	(14 << 0)
107 #define TMDR_MD_PWM_COMP_BOTH	(15 << 0)
108 #define TMDR_MD_MASK		(15 << 0)
109 
110 #define TIOC_IOCH(n)		((n) << 4)
111 #define TIOC_IOCL(n)		((n) << 0)
112 #define TIOR_OC_RETAIN		(0 << 0)
113 #define TIOR_OC_0_CLEAR		(1 << 0)
114 #define TIOR_OC_0_SET		(2 << 0)
115 #define TIOR_OC_0_TOGGLE	(3 << 0)
116 #define TIOR_OC_1_CLEAR		(5 << 0)
117 #define TIOR_OC_1_SET		(6 << 0)
118 #define TIOR_OC_1_TOGGLE	(7 << 0)
119 #define TIOR_IC_RISING		(8 << 0)
120 #define TIOR_IC_FALLING		(9 << 0)
121 #define TIOR_IC_BOTH		(10 << 0)
122 #define TIOR_IC_TCNT		(12 << 0)
123 #define TIOR_MASK		(15 << 0)
124 
125 #define TIER_TTGE		(1 << 7)
126 #define TIER_TTGE2		(1 << 6)
127 #define TIER_TCIEU		(1 << 5)
128 #define TIER_TCIEV		(1 << 4)
129 #define TIER_TGIED		(1 << 3)
130 #define TIER_TGIEC		(1 << 2)
131 #define TIER_TGIEB		(1 << 1)
132 #define TIER_TGIEA		(1 << 0)
133 
134 #define TSR_TCFD		(1 << 7)
135 #define TSR_TCFU		(1 << 5)
136 #define TSR_TCFV		(1 << 4)
137 #define TSR_TGFD		(1 << 3)
138 #define TSR_TGFC		(1 << 2)
139 #define TSR_TGFB		(1 << 1)
140 #define TSR_TGFA		(1 << 0)
141 
142 static unsigned long mtu2_reg_offs[] = {
143 	[TCR] = 0,
144 	[TMDR] = 1,
145 	[TIOR] = 2,
146 	[TIER] = 4,
147 	[TSR] = 5,
148 	[TCNT] = 6,
149 	[TGR] = 8,
150 };
151 
152 static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr)
153 {
154 	unsigned long offs;
155 
156 	if (reg_nr == TSTR)
157 		return ioread8(ch->mtu->mapbase + 0x280);
158 
159 	offs = mtu2_reg_offs[reg_nr];
160 
161 	if ((reg_nr == TCNT) || (reg_nr == TGR))
162 		return ioread16(ch->base + offs);
163 	else
164 		return ioread8(ch->base + offs);
165 }
166 
167 static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr,
168 				unsigned long value)
169 {
170 	unsigned long offs;
171 
172 	if (reg_nr == TSTR)
173 		return iowrite8(value, ch->mtu->mapbase + 0x280);
174 
175 	offs = mtu2_reg_offs[reg_nr];
176 
177 	if ((reg_nr == TCNT) || (reg_nr == TGR))
178 		iowrite16(value, ch->base + offs);
179 	else
180 		iowrite8(value, ch->base + offs);
181 }
182 
183 static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start)
184 {
185 	unsigned long flags, value;
186 
187 	/* start stop register shared by multiple timer channels */
188 	raw_spin_lock_irqsave(&ch->mtu->lock, flags);
189 	value = sh_mtu2_read(ch, TSTR);
190 
191 	if (start)
192 		value |= 1 << ch->index;
193 	else
194 		value &= ~(1 << ch->index);
195 
196 	sh_mtu2_write(ch, TSTR, value);
197 	raw_spin_unlock_irqrestore(&ch->mtu->lock, flags);
198 }
199 
200 static int sh_mtu2_enable(struct sh_mtu2_channel *ch)
201 {
202 	unsigned long periodic;
203 	unsigned long rate;
204 	int ret;
205 
206 	pm_runtime_get_sync(&ch->mtu->pdev->dev);
207 	dev_pm_syscore_device(&ch->mtu->pdev->dev, true);
208 
209 	/* enable clock */
210 	ret = clk_enable(ch->mtu->clk);
211 	if (ret) {
212 		dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n",
213 			ch->index);
214 		return ret;
215 	}
216 
217 	/* make sure channel is disabled */
218 	sh_mtu2_start_stop_ch(ch, 0);
219 
220 	rate = clk_get_rate(ch->mtu->clk) / 64;
221 	periodic = (rate + HZ/2) / HZ;
222 
223 	/*
224 	 * "Periodic Counter Operation"
225 	 * Clear on TGRA compare match, divide clock by 64.
226 	 */
227 	sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64);
228 	sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) |
229 		      TIOC_IOCL(TIOR_OC_0_CLEAR));
230 	sh_mtu2_write(ch, TGR, periodic);
231 	sh_mtu2_write(ch, TCNT, 0);
232 	sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL);
233 	sh_mtu2_write(ch, TIER, TIER_TGIEA);
234 
235 	/* enable channel */
236 	sh_mtu2_start_stop_ch(ch, 1);
237 
238 	return 0;
239 }
240 
241 static void sh_mtu2_disable(struct sh_mtu2_channel *ch)
242 {
243 	/* disable channel */
244 	sh_mtu2_start_stop_ch(ch, 0);
245 
246 	/* stop clock */
247 	clk_disable(ch->mtu->clk);
248 
249 	dev_pm_syscore_device(&ch->mtu->pdev->dev, false);
250 	pm_runtime_put(&ch->mtu->pdev->dev);
251 }
252 
253 static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
254 {
255 	struct sh_mtu2_channel *ch = dev_id;
256 
257 	/* acknowledge interrupt */
258 	sh_mtu2_read(ch, TSR);
259 	sh_mtu2_write(ch, TSR, ~TSR_TGFA);
260 
261 	/* notify clockevent layer */
262 	ch->ced.event_handler(&ch->ced);
263 	return IRQ_HANDLED;
264 }
265 
266 static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced)
267 {
268 	return container_of(ced, struct sh_mtu2_channel, ced);
269 }
270 
271 static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
272 {
273 	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
274 
275 	if (clockevent_state_periodic(ced))
276 		sh_mtu2_disable(ch);
277 
278 	return 0;
279 }
280 
281 static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
282 {
283 	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
284 
285 	if (clockevent_state_periodic(ced))
286 		sh_mtu2_disable(ch);
287 
288 	dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
289 		 ch->index);
290 	sh_mtu2_enable(ch);
291 	return 0;
292 }
293 
294 static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
295 {
296 	pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
297 }
298 
299 static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
300 {
301 	pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev);
302 }
303 
304 static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch,
305 					const char *name)
306 {
307 	struct clock_event_device *ced = &ch->ced;
308 
309 	ced->name = name;
310 	ced->features = CLOCK_EVT_FEAT_PERIODIC;
311 	ced->rating = 200;
312 	ced->cpumask = cpu_possible_mask;
313 	ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
314 	ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
315 	ced->suspend = sh_mtu2_clock_event_suspend;
316 	ced->resume = sh_mtu2_clock_event_resume;
317 
318 	dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n",
319 		 ch->index);
320 	clockevents_register_device(ced);
321 }
322 
323 static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name)
324 {
325 	ch->mtu->has_clockevent = true;
326 	sh_mtu2_register_clockevent(ch, name);
327 
328 	return 0;
329 }
330 
331 static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index,
332 				 struct sh_mtu2_device *mtu)
333 {
334 	static const unsigned int channel_offsets[] = {
335 		0x300, 0x380, 0x000,
336 	};
337 	char name[6];
338 	int irq;
339 	int ret;
340 
341 	ch->mtu = mtu;
342 
343 	sprintf(name, "tgi%ua", index);
344 	irq = platform_get_irq_byname(mtu->pdev, name);
345 	if (irq < 0) {
346 		/* Skip channels with no declared interrupt. */
347 		return 0;
348 	}
349 
350 	ret = request_irq(irq, sh_mtu2_interrupt,
351 			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
352 			  dev_name(&ch->mtu->pdev->dev), ch);
353 	if (ret) {
354 		dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n",
355 			index, irq);
356 		return ret;
357 	}
358 
359 	ch->base = mtu->mapbase + channel_offsets[index];
360 	ch->index = index;
361 
362 	return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev));
363 }
364 
365 static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu)
366 {
367 	struct resource *res;
368 
369 	res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0);
370 	if (!res) {
371 		dev_err(&mtu->pdev->dev, "failed to get I/O memory\n");
372 		return -ENXIO;
373 	}
374 
375 	mtu->mapbase = ioremap_nocache(res->start, resource_size(res));
376 	if (mtu->mapbase == NULL)
377 		return -ENXIO;
378 
379 	return 0;
380 }
381 
382 static int sh_mtu2_setup(struct sh_mtu2_device *mtu,
383 			 struct platform_device *pdev)
384 {
385 	unsigned int i;
386 	int ret;
387 
388 	mtu->pdev = pdev;
389 
390 	raw_spin_lock_init(&mtu->lock);
391 
392 	/* Get hold of clock. */
393 	mtu->clk = clk_get(&mtu->pdev->dev, "fck");
394 	if (IS_ERR(mtu->clk)) {
395 		dev_err(&mtu->pdev->dev, "cannot get clock\n");
396 		return PTR_ERR(mtu->clk);
397 	}
398 
399 	ret = clk_prepare(mtu->clk);
400 	if (ret < 0)
401 		goto err_clk_put;
402 
403 	/* Map the memory resource. */
404 	ret = sh_mtu2_map_memory(mtu);
405 	if (ret < 0) {
406 		dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n");
407 		goto err_clk_unprepare;
408 	}
409 
410 	/* Allocate and setup the channels. */
411 	mtu->num_channels = 3;
412 
413 	mtu->channels = kcalloc(mtu->num_channels, sizeof(*mtu->channels),
414 				GFP_KERNEL);
415 	if (mtu->channels == NULL) {
416 		ret = -ENOMEM;
417 		goto err_unmap;
418 	}
419 
420 	for (i = 0; i < mtu->num_channels; ++i) {
421 		ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu);
422 		if (ret < 0)
423 			goto err_unmap;
424 	}
425 
426 	platform_set_drvdata(pdev, mtu);
427 
428 	return 0;
429 
430 err_unmap:
431 	kfree(mtu->channels);
432 	iounmap(mtu->mapbase);
433 err_clk_unprepare:
434 	clk_unprepare(mtu->clk);
435 err_clk_put:
436 	clk_put(mtu->clk);
437 	return ret;
438 }
439 
440 static int sh_mtu2_probe(struct platform_device *pdev)
441 {
442 	struct sh_mtu2_device *mtu = platform_get_drvdata(pdev);
443 	int ret;
444 
445 	if (!is_early_platform_device(pdev)) {
446 		pm_runtime_set_active(&pdev->dev);
447 		pm_runtime_enable(&pdev->dev);
448 	}
449 
450 	if (mtu) {
451 		dev_info(&pdev->dev, "kept as earlytimer\n");
452 		goto out;
453 	}
454 
455 	mtu = kzalloc(sizeof(*mtu), GFP_KERNEL);
456 	if (mtu == NULL)
457 		return -ENOMEM;
458 
459 	ret = sh_mtu2_setup(mtu, pdev);
460 	if (ret) {
461 		kfree(mtu);
462 		pm_runtime_idle(&pdev->dev);
463 		return ret;
464 	}
465 	if (is_early_platform_device(pdev))
466 		return 0;
467 
468  out:
469 	if (mtu->has_clockevent)
470 		pm_runtime_irq_safe(&pdev->dev);
471 	else
472 		pm_runtime_idle(&pdev->dev);
473 
474 	return 0;
475 }
476 
477 static int sh_mtu2_remove(struct platform_device *pdev)
478 {
479 	return -EBUSY; /* cannot unregister clockevent */
480 }
481 
482 static const struct platform_device_id sh_mtu2_id_table[] = {
483 	{ "sh-mtu2", 0 },
484 	{ },
485 };
486 MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table);
487 
488 static const struct of_device_id sh_mtu2_of_table[] __maybe_unused = {
489 	{ .compatible = "renesas,mtu2" },
490 	{ }
491 };
492 MODULE_DEVICE_TABLE(of, sh_mtu2_of_table);
493 
494 static struct platform_driver sh_mtu2_device_driver = {
495 	.probe		= sh_mtu2_probe,
496 	.remove		= sh_mtu2_remove,
497 	.driver		= {
498 		.name	= "sh_mtu2",
499 		.of_match_table = of_match_ptr(sh_mtu2_of_table),
500 	},
501 	.id_table	= sh_mtu2_id_table,
502 };
503 
504 static int __init sh_mtu2_init(void)
505 {
506 	return platform_driver_register(&sh_mtu2_device_driver);
507 }
508 
509 static void __exit sh_mtu2_exit(void)
510 {
511 	platform_driver_unregister(&sh_mtu2_device_driver);
512 }
513 
514 early_platform_init("earlytimer", &sh_mtu2_device_driver);
515 subsys_initcall(sh_mtu2_init);
516 module_exit(sh_mtu2_exit);
517 
518 MODULE_AUTHOR("Magnus Damm");
519 MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
520 MODULE_LICENSE("GPL v2");
521