1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3 
4 #include <linux/init.h>
5 #include <linux/interrupt.h>
6 #include <linux/sched_clock.h>
7 #include <linux/cpu.h>
8 #include <linux/of_irq.h>
9 #include <asm/reg_ops.h>
10 
11 #include "timer-of.h"
12 
13 #define PTIM_CCVR	"cr<3, 14>"
14 #define PTIM_CTLR	"cr<0, 14>"
15 #define PTIM_LVR	"cr<6, 14>"
16 #define PTIM_TSR	"cr<1, 14>"
17 
18 static int csky_mptimer_irq;
19 
csky_mptimer_set_next_event(unsigned long delta,struct clock_event_device * ce)20 static int csky_mptimer_set_next_event(unsigned long delta,
21 				       struct clock_event_device *ce)
22 {
23 	mtcr(PTIM_LVR, delta);
24 
25 	return 0;
26 }
27 
csky_mptimer_shutdown(struct clock_event_device * ce)28 static int csky_mptimer_shutdown(struct clock_event_device *ce)
29 {
30 	mtcr(PTIM_CTLR, 0);
31 
32 	return 0;
33 }
34 
csky_mptimer_oneshot(struct clock_event_device * ce)35 static int csky_mptimer_oneshot(struct clock_event_device *ce)
36 {
37 	mtcr(PTIM_CTLR, 1);
38 
39 	return 0;
40 }
41 
csky_mptimer_oneshot_stopped(struct clock_event_device * ce)42 static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
43 {
44 	mtcr(PTIM_CTLR, 0);
45 
46 	return 0;
47 }
48 
49 static DEFINE_PER_CPU(struct timer_of, csky_to) = {
50 	.flags					= TIMER_OF_CLOCK,
51 	.clkevt = {
52 		.rating				= 300,
53 		.features			= CLOCK_EVT_FEAT_PERCPU |
54 						  CLOCK_EVT_FEAT_ONESHOT,
55 		.set_state_shutdown		= csky_mptimer_shutdown,
56 		.set_state_oneshot		= csky_mptimer_oneshot,
57 		.set_state_oneshot_stopped	= csky_mptimer_oneshot_stopped,
58 		.set_next_event			= csky_mptimer_set_next_event,
59 	},
60 };
61 
csky_timer_interrupt(int irq,void * dev)62 static irqreturn_t csky_timer_interrupt(int irq, void *dev)
63 {
64 	struct timer_of *to = this_cpu_ptr(&csky_to);
65 
66 	mtcr(PTIM_TSR, 0);
67 
68 	to->clkevt.event_handler(&to->clkevt);
69 
70 	return IRQ_HANDLED;
71 }
72 
73 /*
74  * clock event for percpu
75  */
csky_mptimer_starting_cpu(unsigned int cpu)76 static int csky_mptimer_starting_cpu(unsigned int cpu)
77 {
78 	struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
79 
80 	to->clkevt.cpumask = cpumask_of(cpu);
81 
82 	enable_percpu_irq(csky_mptimer_irq, 0);
83 
84 	clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
85 					2, ULONG_MAX);
86 
87 	return 0;
88 }
89 
csky_mptimer_dying_cpu(unsigned int cpu)90 static int csky_mptimer_dying_cpu(unsigned int cpu)
91 {
92 	disable_percpu_irq(csky_mptimer_irq);
93 
94 	return 0;
95 }
96 
97 /*
98  * clock source
99  */
sched_clock_read(void)100 static u64 notrace sched_clock_read(void)
101 {
102 	return (u64)mfcr(PTIM_CCVR);
103 }
104 
clksrc_read(struct clocksource * c)105 static u64 clksrc_read(struct clocksource *c)
106 {
107 	return (u64)mfcr(PTIM_CCVR);
108 }
109 
110 struct clocksource csky_clocksource = {
111 	.name	= "csky",
112 	.rating	= 400,
113 	.mask	= CLOCKSOURCE_MASK(32),
114 	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
115 	.read	= clksrc_read,
116 };
117 
csky_mptimer_init(struct device_node * np)118 static int __init csky_mptimer_init(struct device_node *np)
119 {
120 	int ret, cpu, cpu_rollback;
121 	struct timer_of *to = NULL;
122 
123 	/*
124 	 * Csky_mptimer is designed for C-SKY SMP multi-processors and
125 	 * every core has it's own private irq and regs for clkevt and
126 	 * clksrc.
127 	 *
128 	 * The regs is accessed by cpu instruction: mfcr/mtcr instead of
129 	 * mmio map style. So we needn't mmio-address in dts, but we still
130 	 * need to give clk and irq number.
131 	 *
132 	 * We use private irq for the mptimer and irq number is the same
133 	 * for every core. So we use request_percpu_irq() in timer_of_init.
134 	 */
135 	csky_mptimer_irq = irq_of_parse_and_map(np, 0);
136 	if (csky_mptimer_irq <= 0)
137 		return -EINVAL;
138 
139 	ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
140 				 "csky_mp_timer", &csky_to);
141 	if (ret)
142 		return -EINVAL;
143 
144 	for_each_possible_cpu(cpu) {
145 		to = per_cpu_ptr(&csky_to, cpu);
146 		ret = timer_of_init(np, to);
147 		if (ret)
148 			goto rollback;
149 	}
150 
151 	clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
152 	sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
153 
154 	ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
155 				"clockevents/csky/timer:starting",
156 				csky_mptimer_starting_cpu,
157 				csky_mptimer_dying_cpu);
158 	if (ret)
159 		return -EINVAL;
160 
161 	return 0;
162 
163 rollback:
164 	for_each_possible_cpu(cpu_rollback) {
165 		if (cpu_rollback == cpu)
166 			break;
167 
168 		to = per_cpu_ptr(&csky_to, cpu_rollback);
169 		timer_of_cleanup(to);
170 	}
171 	return -EINVAL;
172 }
173 TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);
174