xref: /openbmc/linux/arch/powerpc/sysdev/mpic_timer.c (revision d3964221)
1 /*
2  * MPIC timer driver
3  *
4  * Copyright 2013 Freescale Semiconductor, Inc.
5  * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
6  *	   Li Yang <leoli@freescale.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2 of the License, or (at your
11  * option) any later version.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/slab.h>
21 #include <linux/of.h>
22 #include <linux/of_address.h>
23 #include <linux/of_device.h>
24 #include <linux/of_irq.h>
25 #include <linux/syscore_ops.h>
26 #include <sysdev/fsl_soc.h>
27 #include <asm/io.h>
28 
29 #include <asm/mpic_timer.h>
30 
31 #define FSL_GLOBAL_TIMER		0x1
32 
33 /* Clock Ratio
34  * Divide by 64 0x00000300
35  * Divide by 32 0x00000200
36  * Divide by 16 0x00000100
37  * Divide by  8 0x00000000 (Hardware default div)
38  */
39 #define MPIC_TIMER_TCR_CLKDIV		0x00000300
40 
41 #define MPIC_TIMER_TCR_ROVR_OFFSET	24
42 
43 #define TIMER_STOP			0x80000000
44 #define GTCCR_TOG			0x80000000
45 #define TIMERS_PER_GROUP		4
46 #define MAX_TICKS			(~0U >> 1)
47 #define MAX_TICKS_CASCADE		(~0U)
48 #define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
49 
50 /* tv_usec should be less than ONE_SECOND, otherwise use tv_sec */
51 #define ONE_SECOND			1000000
52 
53 struct timer_regs {
54 	u32	gtccr;
55 	u32	res0[3];
56 	u32	gtbcr;
57 	u32	res1[3];
58 	u32	gtvpr;
59 	u32	res2[3];
60 	u32	gtdr;
61 	u32	res3[3];
62 };
63 
64 struct cascade_priv {
65 	u32 tcr_value;			/* TCR register: CASC & ROVR value */
66 	unsigned int cascade_map;	/* cascade map */
67 	unsigned int timer_num;		/* cascade control timer */
68 };
69 
70 struct timer_group_priv {
71 	struct timer_regs __iomem	*regs;
72 	struct mpic_timer		timer[TIMERS_PER_GROUP];
73 	struct list_head		node;
74 	unsigned int			timerfreq;
75 	unsigned int			idle;
76 	unsigned int			flags;
77 	spinlock_t			lock;
78 	void __iomem			*group_tcr;
79 };
80 
81 static struct cascade_priv cascade_timer[] = {
82 	/* cascade timer 0 and 1 */
83 	{0x1, 0xc, 0x1},
84 	/* cascade timer 1 and 2 */
85 	{0x2, 0x6, 0x2},
86 	/* cascade timer 2 and 3 */
87 	{0x4, 0x3, 0x3}
88 };
89 
90 static LIST_HEAD(timer_group_list);
91 
92 static void convert_ticks_to_time(struct timer_group_priv *priv,
93 		const u64 ticks, struct timeval *time)
94 {
95 	u64 tmp_sec;
96 
97 	time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq);
98 	tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
99 
100 	time->tv_usec = 0;
101 
102 	if (tmp_sec <= ticks)
103 		time->tv_usec = (__kernel_suseconds_t)
104 			div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
105 
106 	return;
107 }
108 
109 /* the time set by the user is converted to "ticks" */
110 static int convert_time_to_ticks(struct timer_group_priv *priv,
111 		const struct timeval *time, u64 *ticks)
112 {
113 	u64 max_value;		/* prevent u64 overflow */
114 	u64 tmp = 0;
115 
116 	u64 tmp_sec;
117 	u64 tmp_ms;
118 	u64 tmp_us;
119 
120 	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
121 
122 	if (time->tv_sec > max_value ||
123 			(time->tv_sec == max_value && time->tv_usec > 0))
124 		return -EINVAL;
125 
126 	tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
127 	tmp += tmp_sec;
128 
129 	tmp_ms = time->tv_usec / 1000;
130 	tmp_ms = div_u64((u64)tmp_ms * (u64)priv->timerfreq, 1000);
131 	tmp += tmp_ms;
132 
133 	tmp_us = time->tv_usec % 1000;
134 	tmp_us = div_u64((u64)tmp_us * (u64)priv->timerfreq, 1000000);
135 	tmp += tmp_us;
136 
137 	*ticks = tmp;
138 
139 	return 0;
140 }
141 
142 /* detect whether there is a cascade timer available */
143 static struct mpic_timer *detect_idle_cascade_timer(
144 					struct timer_group_priv *priv)
145 {
146 	struct cascade_priv *casc_priv;
147 	unsigned int map;
148 	unsigned int array_size = ARRAY_SIZE(cascade_timer);
149 	unsigned int num;
150 	unsigned int i;
151 	unsigned long flags;
152 
153 	casc_priv = cascade_timer;
154 	for (i = 0; i < array_size; i++) {
155 		spin_lock_irqsave(&priv->lock, flags);
156 		map = casc_priv->cascade_map & priv->idle;
157 		if (map == casc_priv->cascade_map) {
158 			num = casc_priv->timer_num;
159 			priv->timer[num].cascade_handle = casc_priv;
160 
161 			/* set timer busy */
162 			priv->idle &= ~casc_priv->cascade_map;
163 			spin_unlock_irqrestore(&priv->lock, flags);
164 			return &priv->timer[num];
165 		}
166 		spin_unlock_irqrestore(&priv->lock, flags);
167 		casc_priv++;
168 	}
169 
170 	return NULL;
171 }
172 
173 static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
174 		unsigned int num)
175 {
176 	struct cascade_priv *casc_priv;
177 	u32 tcr;
178 	u32 tmp_ticks;
179 	u32 rem_ticks;
180 
181 	/* set group tcr reg for cascade */
182 	casc_priv = priv->timer[num].cascade_handle;
183 	if (!casc_priv)
184 		return -EINVAL;
185 
186 	tcr = casc_priv->tcr_value |
187 		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
188 	setbits32(priv->group_tcr, tcr);
189 
190 	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
191 
192 	out_be32(&priv->regs[num].gtccr, 0);
193 	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
194 
195 	out_be32(&priv->regs[num - 1].gtccr, 0);
196 	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
197 
198 	return 0;
199 }
200 
201 static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
202 					u64 ticks)
203 {
204 	struct mpic_timer *allocated_timer;
205 
206 	/* Two cascade timers: Support the maximum time */
207 	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
208 	int ret;
209 
210 	if (ticks > max_ticks)
211 		return NULL;
212 
213 	/* detect idle timer */
214 	allocated_timer = detect_idle_cascade_timer(priv);
215 	if (!allocated_timer)
216 		return NULL;
217 
218 	/* set ticks to timer */
219 	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
220 	if (ret < 0)
221 		return NULL;
222 
223 	return allocated_timer;
224 }
225 
226 static struct mpic_timer *get_timer(const struct timeval *time)
227 {
228 	struct timer_group_priv *priv;
229 	struct mpic_timer *timer;
230 
231 	u64 ticks;
232 	unsigned int num;
233 	unsigned int i;
234 	unsigned long flags;
235 	int ret;
236 
237 	list_for_each_entry(priv, &timer_group_list, node) {
238 		ret = convert_time_to_ticks(priv, time, &ticks);
239 		if (ret < 0)
240 			return NULL;
241 
242 		if (ticks > MAX_TICKS) {
243 			if (!(priv->flags & FSL_GLOBAL_TIMER))
244 				return NULL;
245 
246 			timer = get_cascade_timer(priv, ticks);
247 			if (!timer)
248 				continue;
249 
250 			return timer;
251 		}
252 
253 		for (i = 0; i < TIMERS_PER_GROUP; i++) {
254 			/* one timer: Reverse allocation */
255 			num = TIMERS_PER_GROUP - 1 - i;
256 			spin_lock_irqsave(&priv->lock, flags);
257 			if (priv->idle & (1 << i)) {
258 				/* set timer busy */
259 				priv->idle &= ~(1 << i);
260 				/* set ticks & stop timer */
261 				out_be32(&priv->regs[num].gtbcr,
262 					ticks | TIMER_STOP);
263 				out_be32(&priv->regs[num].gtccr, 0);
264 				priv->timer[num].cascade_handle = NULL;
265 				spin_unlock_irqrestore(&priv->lock, flags);
266 				return &priv->timer[num];
267 			}
268 			spin_unlock_irqrestore(&priv->lock, flags);
269 		}
270 	}
271 
272 	return NULL;
273 }
274 
275 /**
276  * mpic_start_timer - start hardware timer
277  * @handle: the timer to be started.
278  *
279  * It will do ->fn(->dev) callback from the hardware interrupt at
280  * the ->timeval point in the future.
281  */
282 void mpic_start_timer(struct mpic_timer *handle)
283 {
284 	struct timer_group_priv *priv = container_of(handle,
285 			struct timer_group_priv, timer[handle->num]);
286 
287 	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
288 }
289 EXPORT_SYMBOL(mpic_start_timer);
290 
291 /**
292  * mpic_stop_timer - stop hardware timer
293  * @handle: the timer to be stoped
294  *
295  * The timer periodically generates an interrupt. Unless user stops the timer.
296  */
297 void mpic_stop_timer(struct mpic_timer *handle)
298 {
299 	struct timer_group_priv *priv = container_of(handle,
300 			struct timer_group_priv, timer[handle->num]);
301 	struct cascade_priv *casc_priv;
302 
303 	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
304 
305 	casc_priv = priv->timer[handle->num].cascade_handle;
306 	if (casc_priv) {
307 		out_be32(&priv->regs[handle->num].gtccr, 0);
308 		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
309 	} else {
310 		out_be32(&priv->regs[handle->num].gtccr, 0);
311 	}
312 }
313 EXPORT_SYMBOL(mpic_stop_timer);
314 
315 /**
316  * mpic_get_remain_time - get timer time
317  * @handle: the timer to be selected.
318  * @time: time for timer
319  *
320  * Query timer remaining time.
321  */
322 void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time)
323 {
324 	struct timer_group_priv *priv = container_of(handle,
325 			struct timer_group_priv, timer[handle->num]);
326 	struct cascade_priv *casc_priv;
327 
328 	u64 ticks;
329 	u32 tmp_ticks;
330 
331 	casc_priv = priv->timer[handle->num].cascade_handle;
332 	if (casc_priv) {
333 		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
334 		tmp_ticks &= ~GTCCR_TOG;
335 		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
336 		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
337 		ticks += tmp_ticks;
338 	} else {
339 		ticks = in_be32(&priv->regs[handle->num].gtccr);
340 		ticks &= ~GTCCR_TOG;
341 	}
342 
343 	convert_ticks_to_time(priv, ticks, time);
344 }
345 EXPORT_SYMBOL(mpic_get_remain_time);
346 
347 /**
348  * mpic_free_timer - free hardware timer
349  * @handle: the timer to be removed.
350  *
351  * Free the timer.
352  *
353  * Note: can not be used in interrupt context.
354  */
355 void mpic_free_timer(struct mpic_timer *handle)
356 {
357 	struct timer_group_priv *priv = container_of(handle,
358 			struct timer_group_priv, timer[handle->num]);
359 
360 	struct cascade_priv *casc_priv;
361 	unsigned long flags;
362 
363 	mpic_stop_timer(handle);
364 
365 	casc_priv = priv->timer[handle->num].cascade_handle;
366 
367 	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
368 
369 	spin_lock_irqsave(&priv->lock, flags);
370 	if (casc_priv) {
371 		u32 tcr;
372 		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
373 					MPIC_TIMER_TCR_ROVR_OFFSET);
374 		clrbits32(priv->group_tcr, tcr);
375 		priv->idle |= casc_priv->cascade_map;
376 		priv->timer[handle->num].cascade_handle = NULL;
377 	} else {
378 		priv->idle |= TIMER_OFFSET(handle->num);
379 	}
380 	spin_unlock_irqrestore(&priv->lock, flags);
381 }
382 EXPORT_SYMBOL(mpic_free_timer);
383 
384 /**
385  * mpic_request_timer - get a hardware timer
386  * @fn: interrupt handler function
387  * @dev: callback function of the data
388  * @time: time for timer
389  *
390  * This executes the "request_irq", returning NULL
391  * else "handle" on success.
392  */
393 struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
394 					const struct timeval *time)
395 {
396 	struct mpic_timer *allocated_timer;
397 	int ret;
398 
399 	if (list_empty(&timer_group_list))
400 		return NULL;
401 
402 	if (!(time->tv_sec + time->tv_usec) ||
403 			time->tv_sec < 0 || time->tv_usec < 0)
404 		return NULL;
405 
406 	if (time->tv_usec > ONE_SECOND)
407 		return NULL;
408 
409 	allocated_timer = get_timer(time);
410 	if (!allocated_timer)
411 		return NULL;
412 
413 	ret = request_irq(allocated_timer->irq, fn,
414 			IRQF_TRIGGER_LOW, "global-timer", dev);
415 	if (ret) {
416 		mpic_free_timer(allocated_timer);
417 		return NULL;
418 	}
419 
420 	allocated_timer->dev = dev;
421 
422 	return allocated_timer;
423 }
424 EXPORT_SYMBOL(mpic_request_timer);
425 
426 static int timer_group_get_freq(struct device_node *np,
427 			struct timer_group_priv *priv)
428 {
429 	u32 div;
430 
431 	if (priv->flags & FSL_GLOBAL_TIMER) {
432 		struct device_node *dn;
433 
434 		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
435 		if (dn) {
436 			of_property_read_u32(dn, "clock-frequency",
437 					&priv->timerfreq);
438 			of_node_put(dn);
439 		}
440 	}
441 
442 	if (priv->timerfreq <= 0)
443 		return -EINVAL;
444 
445 	if (priv->flags & FSL_GLOBAL_TIMER) {
446 		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
447 		priv->timerfreq /= div;
448 	}
449 
450 	return 0;
451 }
452 
453 static int timer_group_get_irq(struct device_node *np,
454 		struct timer_group_priv *priv)
455 {
456 	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
457 	const u32 *p;
458 	u32 offset;
459 	u32 count;
460 
461 	unsigned int i;
462 	unsigned int j;
463 	unsigned int irq_index = 0;
464 	unsigned int irq;
465 	int len;
466 
467 	p = of_get_property(np, "fsl,available-ranges", &len);
468 	if (p && len % (2 * sizeof(u32)) != 0) {
469 		pr_err("%pOF: malformed available-ranges property.\n", np);
470 		return -EINVAL;
471 	}
472 
473 	if (!p) {
474 		p = all_timer;
475 		len = sizeof(all_timer);
476 	}
477 
478 	len /= 2 * sizeof(u32);
479 
480 	for (i = 0; i < len; i++) {
481 		offset = p[i * 2];
482 		count = p[i * 2 + 1];
483 		for (j = 0; j < count; j++) {
484 			irq = irq_of_parse_and_map(np, irq_index);
485 			if (!irq) {
486 				pr_err("%pOF: irq parse and map failed.\n", np);
487 				return -EINVAL;
488 			}
489 
490 			/* Set timer idle */
491 			priv->idle |= TIMER_OFFSET((offset + j));
492 			priv->timer[offset + j].irq = irq;
493 			priv->timer[offset + j].num = offset + j;
494 			irq_index++;
495 		}
496 	}
497 
498 	return 0;
499 }
500 
501 static void timer_group_init(struct device_node *np)
502 {
503 	struct timer_group_priv *priv;
504 	unsigned int i = 0;
505 	int ret;
506 
507 	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
508 	if (!priv) {
509 		pr_err("%pOF: cannot allocate memory for group.\n", np);
510 		return;
511 	}
512 
513 	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
514 		priv->flags |= FSL_GLOBAL_TIMER;
515 
516 	priv->regs = of_iomap(np, i++);
517 	if (!priv->regs) {
518 		pr_err("%pOF: cannot ioremap timer register address.\n", np);
519 		goto out;
520 	}
521 
522 	if (priv->flags & FSL_GLOBAL_TIMER) {
523 		priv->group_tcr = of_iomap(np, i++);
524 		if (!priv->group_tcr) {
525 			pr_err("%pOF: cannot ioremap tcr address.\n", np);
526 			goto out;
527 		}
528 	}
529 
530 	ret = timer_group_get_freq(np, priv);
531 	if (ret < 0) {
532 		pr_err("%pOF: cannot get timer frequency.\n", np);
533 		goto out;
534 	}
535 
536 	ret = timer_group_get_irq(np, priv);
537 	if (ret < 0) {
538 		pr_err("%pOF: cannot get timer irqs.\n", np);
539 		goto out;
540 	}
541 
542 	spin_lock_init(&priv->lock);
543 
544 	/* Init FSL timer hardware */
545 	if (priv->flags & FSL_GLOBAL_TIMER)
546 		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
547 
548 	list_add_tail(&priv->node, &timer_group_list);
549 
550 	return;
551 
552 out:
553 	if (priv->regs)
554 		iounmap(priv->regs);
555 
556 	if (priv->group_tcr)
557 		iounmap(priv->group_tcr);
558 
559 	kfree(priv);
560 }
561 
562 static void mpic_timer_resume(void)
563 {
564 	struct timer_group_priv *priv;
565 
566 	list_for_each_entry(priv, &timer_group_list, node) {
567 		/* Init FSL timer hardware */
568 		if (priv->flags & FSL_GLOBAL_TIMER)
569 			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
570 	}
571 }
572 
573 static const struct of_device_id mpic_timer_ids[] = {
574 	{ .compatible = "fsl,mpic-global-timer", },
575 	{},
576 };
577 
578 static struct syscore_ops mpic_timer_syscore_ops = {
579 	.resume = mpic_timer_resume,
580 };
581 
582 static int __init mpic_timer_init(void)
583 {
584 	struct device_node *np = NULL;
585 
586 	for_each_matching_node(np, mpic_timer_ids)
587 		timer_group_init(np);
588 
589 	register_syscore_ops(&mpic_timer_syscore_ops);
590 
591 	if (list_empty(&timer_group_list))
592 		return -ENODEV;
593 
594 	return 0;
595 }
596 subsys_initcall(mpic_timer_init);
597