xref: /openbmc/linux/arch/powerpc/sysdev/mpic_timer.c (revision 12eb4683)
1 /*
2  * MPIC timer driver
3  *
4  * Copyright 2013 Freescale Semiconductor, Inc.
5  * Author: Dongsheng Wang <Dongsheng.Wang@freescale.com>
6  *	   Li Yang <leoli@freescale.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the
10  * Free Software Foundation; either version 2 of the License, or (at your
11  * option) any later version.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/slab.h>
21 #include <linux/of.h>
22 #include <linux/of_address.h>
23 #include <linux/of_device.h>
24 #include <linux/of_irq.h>
25 #include <linux/syscore_ops.h>
26 #include <sysdev/fsl_soc.h>
27 #include <asm/io.h>
28 
29 #include <asm/mpic_timer.h>
30 
31 #define FSL_GLOBAL_TIMER		0x1
32 
33 /* Clock Ratio
34  * Divide by 64 0x00000300
35  * Divide by 32 0x00000200
36  * Divide by 16 0x00000100
37  * Divide by  8 0x00000000 (Hardware default div)
38  */
39 #define MPIC_TIMER_TCR_CLKDIV		0x00000300
40 
41 #define MPIC_TIMER_TCR_ROVR_OFFSET	24
42 
43 #define TIMER_STOP			0x80000000
44 #define TIMERS_PER_GROUP		4
45 #define MAX_TICKS			(~0U >> 1)
46 #define MAX_TICKS_CASCADE		(~0U)
47 #define TIMER_OFFSET(num)		(1 << (TIMERS_PER_GROUP - 1 - num))
48 
49 /* tv_usec should be less than ONE_SECOND, otherwise use tv_sec */
50 #define ONE_SECOND			1000000
51 
52 struct timer_regs {
53 	u32	gtccr;
54 	u32	res0[3];
55 	u32	gtbcr;
56 	u32	res1[3];
57 	u32	gtvpr;
58 	u32	res2[3];
59 	u32	gtdr;
60 	u32	res3[3];
61 };
62 
63 struct cascade_priv {
64 	u32 tcr_value;			/* TCR register: CASC & ROVR value */
65 	unsigned int cascade_map;	/* cascade map */
66 	unsigned int timer_num;		/* cascade control timer */
67 };
68 
69 struct timer_group_priv {
70 	struct timer_regs __iomem	*regs;
71 	struct mpic_timer		timer[TIMERS_PER_GROUP];
72 	struct list_head		node;
73 	unsigned int			timerfreq;
74 	unsigned int			idle;
75 	unsigned int			flags;
76 	spinlock_t			lock;
77 	void __iomem			*group_tcr;
78 };
79 
80 static struct cascade_priv cascade_timer[] = {
81 	/* cascade timer 0 and 1 */
82 	{0x1, 0xc, 0x1},
83 	/* cascade timer 1 and 2 */
84 	{0x2, 0x6, 0x2},
85 	/* cascade timer 2 and 3 */
86 	{0x4, 0x3, 0x3}
87 };
88 
89 static LIST_HEAD(timer_group_list);
90 
91 static void convert_ticks_to_time(struct timer_group_priv *priv,
92 		const u64 ticks, struct timeval *time)
93 {
94 	u64 tmp_sec;
95 
96 	time->tv_sec = (__kernel_time_t)div_u64(ticks, priv->timerfreq);
97 	tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
98 
99 	time->tv_usec = (__kernel_suseconds_t)
100 		div_u64((ticks - tmp_sec) * 1000000, priv->timerfreq);
101 
102 	return;
103 }
104 
105 /* the time set by the user is converted to "ticks" */
106 static int convert_time_to_ticks(struct timer_group_priv *priv,
107 		const struct timeval *time, u64 *ticks)
108 {
109 	u64 max_value;		/* prevent u64 overflow */
110 	u64 tmp = 0;
111 
112 	u64 tmp_sec;
113 	u64 tmp_ms;
114 	u64 tmp_us;
115 
116 	max_value = div_u64(ULLONG_MAX, priv->timerfreq);
117 
118 	if (time->tv_sec > max_value ||
119 			(time->tv_sec == max_value && time->tv_usec > 0))
120 		return -EINVAL;
121 
122 	tmp_sec = (u64)time->tv_sec * (u64)priv->timerfreq;
123 	tmp += tmp_sec;
124 
125 	tmp_ms = time->tv_usec / 1000;
126 	tmp_ms = div_u64((u64)tmp_ms * (u64)priv->timerfreq, 1000);
127 	tmp += tmp_ms;
128 
129 	tmp_us = time->tv_usec % 1000;
130 	tmp_us = div_u64((u64)tmp_us * (u64)priv->timerfreq, 1000000);
131 	tmp += tmp_us;
132 
133 	*ticks = tmp;
134 
135 	return 0;
136 }
137 
138 /* detect whether there is a cascade timer available */
139 static struct mpic_timer *detect_idle_cascade_timer(
140 					struct timer_group_priv *priv)
141 {
142 	struct cascade_priv *casc_priv;
143 	unsigned int map;
144 	unsigned int array_size = ARRAY_SIZE(cascade_timer);
145 	unsigned int num;
146 	unsigned int i;
147 	unsigned long flags;
148 
149 	casc_priv = cascade_timer;
150 	for (i = 0; i < array_size; i++) {
151 		spin_lock_irqsave(&priv->lock, flags);
152 		map = casc_priv->cascade_map & priv->idle;
153 		if (map == casc_priv->cascade_map) {
154 			num = casc_priv->timer_num;
155 			priv->timer[num].cascade_handle = casc_priv;
156 
157 			/* set timer busy */
158 			priv->idle &= ~casc_priv->cascade_map;
159 			spin_unlock_irqrestore(&priv->lock, flags);
160 			return &priv->timer[num];
161 		}
162 		spin_unlock_irqrestore(&priv->lock, flags);
163 		casc_priv++;
164 	}
165 
166 	return NULL;
167 }
168 
169 static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
170 		unsigned int num)
171 {
172 	struct cascade_priv *casc_priv;
173 	u32 tcr;
174 	u32 tmp_ticks;
175 	u32 rem_ticks;
176 
177 	/* set group tcr reg for cascade */
178 	casc_priv = priv->timer[num].cascade_handle;
179 	if (!casc_priv)
180 		return -EINVAL;
181 
182 	tcr = casc_priv->tcr_value |
183 		(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
184 	setbits32(priv->group_tcr, tcr);
185 
186 	tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
187 
188 	out_be32(&priv->regs[num].gtccr, 0);
189 	out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
190 
191 	out_be32(&priv->regs[num - 1].gtccr, 0);
192 	out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
193 
194 	return 0;
195 }
196 
197 static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
198 					u64 ticks)
199 {
200 	struct mpic_timer *allocated_timer;
201 
202 	/* Two cascade timers: Support the maximum time */
203 	const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
204 	int ret;
205 
206 	if (ticks > max_ticks)
207 		return NULL;
208 
209 	/* detect idle timer */
210 	allocated_timer = detect_idle_cascade_timer(priv);
211 	if (!allocated_timer)
212 		return NULL;
213 
214 	/* set ticks to timer */
215 	ret = set_cascade_timer(priv, ticks, allocated_timer->num);
216 	if (ret < 0)
217 		return NULL;
218 
219 	return allocated_timer;
220 }
221 
222 static struct mpic_timer *get_timer(const struct timeval *time)
223 {
224 	struct timer_group_priv *priv;
225 	struct mpic_timer *timer;
226 
227 	u64 ticks;
228 	unsigned int num;
229 	unsigned int i;
230 	unsigned long flags;
231 	int ret;
232 
233 	list_for_each_entry(priv, &timer_group_list, node) {
234 		ret = convert_time_to_ticks(priv, time, &ticks);
235 		if (ret < 0)
236 			return NULL;
237 
238 		if (ticks > MAX_TICKS) {
239 			if (!(priv->flags & FSL_GLOBAL_TIMER))
240 				return NULL;
241 
242 			timer = get_cascade_timer(priv, ticks);
243 			if (!timer)
244 				continue;
245 
246 			return timer;
247 		}
248 
249 		for (i = 0; i < TIMERS_PER_GROUP; i++) {
250 			/* one timer: Reverse allocation */
251 			num = TIMERS_PER_GROUP - 1 - i;
252 			spin_lock_irqsave(&priv->lock, flags);
253 			if (priv->idle & (1 << i)) {
254 				/* set timer busy */
255 				priv->idle &= ~(1 << i);
256 				/* set ticks & stop timer */
257 				out_be32(&priv->regs[num].gtbcr,
258 					ticks | TIMER_STOP);
259 				out_be32(&priv->regs[num].gtccr, 0);
260 				priv->timer[num].cascade_handle = NULL;
261 				spin_unlock_irqrestore(&priv->lock, flags);
262 				return &priv->timer[num];
263 			}
264 			spin_unlock_irqrestore(&priv->lock, flags);
265 		}
266 	}
267 
268 	return NULL;
269 }
270 
271 /**
272  * mpic_start_timer - start hardware timer
273  * @handle: the timer to be started.
274  *
275  * It will do ->fn(->dev) callback from the hardware interrupt at
276  * the ->timeval point in the future.
277  */
278 void mpic_start_timer(struct mpic_timer *handle)
279 {
280 	struct timer_group_priv *priv = container_of(handle,
281 			struct timer_group_priv, timer[handle->num]);
282 
283 	clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
284 }
285 EXPORT_SYMBOL(mpic_start_timer);
286 
287 /**
288  * mpic_stop_timer - stop hardware timer
289  * @handle: the timer to be stoped
290  *
291  * The timer periodically generates an interrupt. Unless user stops the timer.
292  */
293 void mpic_stop_timer(struct mpic_timer *handle)
294 {
295 	struct timer_group_priv *priv = container_of(handle,
296 			struct timer_group_priv, timer[handle->num]);
297 	struct cascade_priv *casc_priv;
298 
299 	setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
300 
301 	casc_priv = priv->timer[handle->num].cascade_handle;
302 	if (casc_priv) {
303 		out_be32(&priv->regs[handle->num].gtccr, 0);
304 		out_be32(&priv->regs[handle->num - 1].gtccr, 0);
305 	} else {
306 		out_be32(&priv->regs[handle->num].gtccr, 0);
307 	}
308 }
309 EXPORT_SYMBOL(mpic_stop_timer);
310 
311 /**
312  * mpic_get_remain_time - get timer time
313  * @handle: the timer to be selected.
314  * @time: time for timer
315  *
316  * Query timer remaining time.
317  */
318 void mpic_get_remain_time(struct mpic_timer *handle, struct timeval *time)
319 {
320 	struct timer_group_priv *priv = container_of(handle,
321 			struct timer_group_priv, timer[handle->num]);
322 	struct cascade_priv *casc_priv;
323 
324 	u64 ticks;
325 	u32 tmp_ticks;
326 
327 	casc_priv = priv->timer[handle->num].cascade_handle;
328 	if (casc_priv) {
329 		tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
330 		ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
331 		tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
332 		ticks += tmp_ticks;
333 	} else {
334 		ticks = in_be32(&priv->regs[handle->num].gtccr);
335 	}
336 
337 	convert_ticks_to_time(priv, ticks, time);
338 }
339 EXPORT_SYMBOL(mpic_get_remain_time);
340 
341 /**
342  * mpic_free_timer - free hardware timer
343  * @handle: the timer to be removed.
344  *
345  * Free the timer.
346  *
347  * Note: can not be used in interrupt context.
348  */
349 void mpic_free_timer(struct mpic_timer *handle)
350 {
351 	struct timer_group_priv *priv = container_of(handle,
352 			struct timer_group_priv, timer[handle->num]);
353 
354 	struct cascade_priv *casc_priv;
355 	unsigned long flags;
356 
357 	mpic_stop_timer(handle);
358 
359 	casc_priv = priv->timer[handle->num].cascade_handle;
360 
361 	free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
362 
363 	spin_lock_irqsave(&priv->lock, flags);
364 	if (casc_priv) {
365 		u32 tcr;
366 		tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
367 					MPIC_TIMER_TCR_ROVR_OFFSET);
368 		clrbits32(priv->group_tcr, tcr);
369 		priv->idle |= casc_priv->cascade_map;
370 		priv->timer[handle->num].cascade_handle = NULL;
371 	} else {
372 		priv->idle |= TIMER_OFFSET(handle->num);
373 	}
374 	spin_unlock_irqrestore(&priv->lock, flags);
375 }
376 EXPORT_SYMBOL(mpic_free_timer);
377 
378 /**
379  * mpic_request_timer - get a hardware timer
380  * @fn: interrupt handler function
381  * @dev: callback function of the data
382  * @time: time for timer
383  *
384  * This executes the "request_irq", returning NULL
385  * else "handle" on success.
386  */
387 struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
388 					const struct timeval *time)
389 {
390 	struct mpic_timer *allocated_timer;
391 	int ret;
392 
393 	if (list_empty(&timer_group_list))
394 		return NULL;
395 
396 	if (!(time->tv_sec + time->tv_usec) ||
397 			time->tv_sec < 0 || time->tv_usec < 0)
398 		return NULL;
399 
400 	if (time->tv_usec > ONE_SECOND)
401 		return NULL;
402 
403 	allocated_timer = get_timer(time);
404 	if (!allocated_timer)
405 		return NULL;
406 
407 	ret = request_irq(allocated_timer->irq, fn,
408 			IRQF_TRIGGER_LOW, "global-timer", dev);
409 	if (ret) {
410 		mpic_free_timer(allocated_timer);
411 		return NULL;
412 	}
413 
414 	allocated_timer->dev = dev;
415 
416 	return allocated_timer;
417 }
418 EXPORT_SYMBOL(mpic_request_timer);
419 
420 static int timer_group_get_freq(struct device_node *np,
421 			struct timer_group_priv *priv)
422 {
423 	u32 div;
424 
425 	if (priv->flags & FSL_GLOBAL_TIMER) {
426 		struct device_node *dn;
427 
428 		dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
429 		if (dn) {
430 			of_property_read_u32(dn, "clock-frequency",
431 					&priv->timerfreq);
432 			of_node_put(dn);
433 		}
434 	}
435 
436 	if (priv->timerfreq <= 0)
437 		return -EINVAL;
438 
439 	if (priv->flags & FSL_GLOBAL_TIMER) {
440 		div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
441 		priv->timerfreq /= div;
442 	}
443 
444 	return 0;
445 }
446 
447 static int timer_group_get_irq(struct device_node *np,
448 		struct timer_group_priv *priv)
449 {
450 	const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
451 	const u32 *p;
452 	u32 offset;
453 	u32 count;
454 
455 	unsigned int i;
456 	unsigned int j;
457 	unsigned int irq_index = 0;
458 	unsigned int irq;
459 	int len;
460 
461 	p = of_get_property(np, "fsl,available-ranges", &len);
462 	if (p && len % (2 * sizeof(u32)) != 0) {
463 		pr_err("%s: malformed available-ranges property.\n",
464 				np->full_name);
465 		return -EINVAL;
466 	}
467 
468 	if (!p) {
469 		p = all_timer;
470 		len = sizeof(all_timer);
471 	}
472 
473 	len /= 2 * sizeof(u32);
474 
475 	for (i = 0; i < len; i++) {
476 		offset = p[i * 2];
477 		count = p[i * 2 + 1];
478 		for (j = 0; j < count; j++) {
479 			irq = irq_of_parse_and_map(np, irq_index);
480 			if (!irq) {
481 				pr_err("%s: irq parse and map failed.\n",
482 						np->full_name);
483 				return -EINVAL;
484 			}
485 
486 			/* Set timer idle */
487 			priv->idle |= TIMER_OFFSET((offset + j));
488 			priv->timer[offset + j].irq = irq;
489 			priv->timer[offset + j].num = offset + j;
490 			irq_index++;
491 		}
492 	}
493 
494 	return 0;
495 }
496 
497 static void timer_group_init(struct device_node *np)
498 {
499 	struct timer_group_priv *priv;
500 	unsigned int i = 0;
501 	int ret;
502 
503 	priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
504 	if (!priv) {
505 		pr_err("%s: cannot allocate memory for group.\n",
506 				np->full_name);
507 		return;
508 	}
509 
510 	if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
511 		priv->flags |= FSL_GLOBAL_TIMER;
512 
513 	priv->regs = of_iomap(np, i++);
514 	if (!priv->regs) {
515 		pr_err("%s: cannot ioremap timer register address.\n",
516 				np->full_name);
517 		goto out;
518 	}
519 
520 	if (priv->flags & FSL_GLOBAL_TIMER) {
521 		priv->group_tcr = of_iomap(np, i++);
522 		if (!priv->group_tcr) {
523 			pr_err("%s: cannot ioremap tcr address.\n",
524 					np->full_name);
525 			goto out;
526 		}
527 	}
528 
529 	ret = timer_group_get_freq(np, priv);
530 	if (ret < 0) {
531 		pr_err("%s: cannot get timer frequency.\n", np->full_name);
532 		goto out;
533 	}
534 
535 	ret = timer_group_get_irq(np, priv);
536 	if (ret < 0) {
537 		pr_err("%s: cannot get timer irqs.\n", np->full_name);
538 		goto out;
539 	}
540 
541 	spin_lock_init(&priv->lock);
542 
543 	/* Init FSL timer hardware */
544 	if (priv->flags & FSL_GLOBAL_TIMER)
545 		setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
546 
547 	list_add_tail(&priv->node, &timer_group_list);
548 
549 	return;
550 
551 out:
552 	if (priv->regs)
553 		iounmap(priv->regs);
554 
555 	if (priv->group_tcr)
556 		iounmap(priv->group_tcr);
557 
558 	kfree(priv);
559 }
560 
561 static void mpic_timer_resume(void)
562 {
563 	struct timer_group_priv *priv;
564 
565 	list_for_each_entry(priv, &timer_group_list, node) {
566 		/* Init FSL timer hardware */
567 		if (priv->flags & FSL_GLOBAL_TIMER)
568 			setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
569 	}
570 }
571 
572 static const struct of_device_id mpic_timer_ids[] = {
573 	{ .compatible = "fsl,mpic-global-timer", },
574 	{},
575 };
576 
577 static struct syscore_ops mpic_timer_syscore_ops = {
578 	.resume = mpic_timer_resume,
579 };
580 
581 static int __init mpic_timer_init(void)
582 {
583 	struct device_node *np = NULL;
584 
585 	for_each_matching_node(np, mpic_timer_ids)
586 		timer_group_init(np);
587 
588 	register_syscore_ops(&mpic_timer_syscore_ops);
589 
590 	if (list_empty(&timer_group_list))
591 		return -ENODEV;
592 
593 	return 0;
594 }
595 subsys_initcall(mpic_timer_init);
596