xref: /openbmc/linux/arch/mips/kernel/cevt-r4k.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2007 MIPS Technologies, Inc.
7  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/clockchips.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 
13 #include <asm/smtc_ipi.h>
14 #include <asm/time.h>
15 
16 static int mips_next_event(unsigned long delta,
17                            struct clock_event_device *evt)
18 {
19 	unsigned int cnt;
20 	int res;
21 
22 #ifdef CONFIG_MIPS_MT_SMTC
23 	{
24 	unsigned long flags, vpflags;
25 	local_irq_save(flags);
26 	vpflags = dvpe();
27 #endif
28 	cnt = read_c0_count();
29 	cnt += delta;
30 	write_c0_compare(cnt);
31 	res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
32 #ifdef CONFIG_MIPS_MT_SMTC
33 	evpe(vpflags);
34 	local_irq_restore(flags);
35 	}
36 #endif
37 	return res;
38 }
39 
40 static void mips_set_mode(enum clock_event_mode mode,
41                           struct clock_event_device *evt)
42 {
43 	/* Nothing to do ...  */
44 }
45 
46 static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
47 static int cp0_timer_irq_installed;
48 
49 /*
50  * Timer ack for an R4k-compatible timer of a known frequency.
51  */
52 static void c0_timer_ack(void)
53 {
54 	write_c0_compare(read_c0_compare());
55 }
56 
57 /*
58  * Possibly handle a performance counter interrupt.
59  * Return true if the timer interrupt should not be checked
60  */
61 static inline int handle_perf_irq(int r2)
62 {
63 	/*
64 	 * The performance counter overflow interrupt may be shared with the
65 	 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
66 	 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
67 	 * and we can't reliably determine if a counter interrupt has also
68 	 * happened (!r2) then don't check for a timer interrupt.
69 	 */
70 	return (cp0_perfcount_irq < 0) &&
71 		perf_irq() == IRQ_HANDLED &&
72 		!r2;
73 }
74 
75 static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
76 {
77 	const int r2 = cpu_has_mips_r2;
78 	struct clock_event_device *cd;
79 	int cpu = smp_processor_id();
80 
81 	/*
82 	 * Suckage alert:
83 	 * Before R2 of the architecture there was no way to see if a
84 	 * performance counter interrupt was pending, so we have to run
85 	 * the performance counter interrupt handler anyway.
86 	 */
87 	if (handle_perf_irq(r2))
88 		goto out;
89 
90 	/*
91 	 * The same applies to performance counter interrupts.  But with the
92 	 * above we now know that the reason we got here must be a timer
93 	 * interrupt.  Being the paranoiacs we are we check anyway.
94 	 */
95 	if (!r2 || (read_c0_cause() & (1 << 30))) {
96 		c0_timer_ack();
97 #ifdef CONFIG_MIPS_MT_SMTC
98 		if (cpu_data[cpu].vpe_id)
99 			goto out;
100 		cpu = 0;
101 #endif
102 		cd = &per_cpu(mips_clockevent_device, cpu);
103 		cd->event_handler(cd);
104 	}
105 
106 out:
107 	return IRQ_HANDLED;
108 }
109 
110 static struct irqaction c0_compare_irqaction = {
111 	.handler = c0_compare_interrupt,
112 #ifdef CONFIG_MIPS_MT_SMTC
113 	.flags = IRQF_DISABLED,
114 #else
115 	.flags = IRQF_DISABLED | IRQF_PERCPU,
116 #endif
117 	.name = "timer",
118 };
119 
120 #ifdef CONFIG_MIPS_MT_SMTC
121 DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
122 
123 static void smtc_set_mode(enum clock_event_mode mode,
124                           struct clock_event_device *evt)
125 {
126 }
127 
128 static void mips_broadcast(cpumask_t mask)
129 {
130 	unsigned int cpu;
131 
132 	for_each_cpu_mask(cpu, mask)
133 		smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
134 }
135 
136 static void setup_smtc_dummy_clockevent_device(void)
137 {
138 	//uint64_t mips_freq = mips_hpt_^frequency;
139 	unsigned int cpu = smp_processor_id();
140 	struct clock_event_device *cd;
141 
142 	cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
143 
144 	cd->name		= "SMTC";
145 	cd->features		= CLOCK_EVT_FEAT_DUMMY;
146 
147 	/* Calculate the min / max delta */
148 	cd->mult	= 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
149 	cd->shift		= 0; //32;
150 	cd->max_delta_ns	= 0; //clockevent_delta2ns(0x7fffffff, cd);
151 	cd->min_delta_ns	= 0; //clockevent_delta2ns(0x30, cd);
152 
153 	cd->rating		= 200;
154 	cd->irq			= 17; //-1;
155 //	if (cpu)
156 //		cd->cpumask	= CPU_MASK_ALL; // cpumask_of_cpu(cpu);
157 //	else
158 		cd->cpumask	= cpumask_of_cpu(cpu);
159 
160 	cd->set_mode		= smtc_set_mode;
161 
162 	cd->broadcast		= mips_broadcast;
163 
164 	clockevents_register_device(cd);
165 }
166 #endif
167 
168 static void mips_event_handler(struct clock_event_device *dev)
169 {
170 }
171 
172 /*
173  * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
174  */
175 static int c0_compare_int_pending(void)
176 {
177 	return (read_c0_cause() >> cp0_compare_irq) & 0x100;
178 }
179 
180 static int c0_compare_int_usable(void)
181 {
182 	unsigned int delta;
183 	unsigned int cnt;
184 
185 	/*
186 	 * IP7 already pending?  Try to clear it by acking the timer.
187 	 */
188 	if (c0_compare_int_pending()) {
189 		write_c0_compare(read_c0_count());
190 		irq_disable_hazard();
191 		if (c0_compare_int_pending())
192 			return 0;
193 	}
194 
195 	for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
196 		cnt = read_c0_count();
197 		cnt += delta;
198 		write_c0_compare(cnt);
199 		irq_disable_hazard();
200 		if ((int)(read_c0_count() - cnt) < 0)
201 		    break;
202 		/* increase delta if the timer was already expired */
203 	}
204 
205 	while ((int)(read_c0_count() - cnt) <= 0)
206 		;	/* Wait for expiry  */
207 
208 	if (!c0_compare_int_pending())
209 		return 0;
210 
211 	write_c0_compare(read_c0_count());
212 	irq_disable_hazard();
213 	if (c0_compare_int_pending())
214 		return 0;
215 
216 	/*
217 	 * Feels like a real count / compare timer.
218 	 */
219 	return 1;
220 }
221 
222 int __cpuinit mips_clockevent_init(void)
223 {
224 	uint64_t mips_freq = mips_hpt_frequency;
225 	unsigned int cpu = smp_processor_id();
226 	struct clock_event_device *cd;
227 	unsigned int irq;
228 
229 	if (!cpu_has_counter || !mips_hpt_frequency)
230 		return -ENXIO;
231 
232 #ifdef CONFIG_MIPS_MT_SMTC
233 	setup_smtc_dummy_clockevent_device();
234 
235 	/*
236 	 * On SMTC we only register VPE0's compare interrupt as clockevent
237 	 * device.
238 	 */
239 	if (cpu)
240 		return 0;
241 #endif
242 
243 	if (!c0_compare_int_usable())
244 		return -ENXIO;
245 
246 	/*
247 	 * With vectored interrupts things are getting platform specific.
248 	 * get_c0_compare_int is a hook to allow a platform to return the
249 	 * interrupt number of it's liking.
250 	 */
251 	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
252 	if (get_c0_compare_int)
253 		irq = get_c0_compare_int();
254 
255 	cd = &per_cpu(mips_clockevent_device, cpu);
256 
257 	cd->name		= "MIPS";
258 	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
259 
260 	/* Calculate the min / max delta */
261 	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
262 	cd->shift		= 32;
263 	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
264 	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
265 
266 	cd->rating		= 300;
267 	cd->irq			= irq;
268 #ifdef CONFIG_MIPS_MT_SMTC
269 	cd->cpumask		= CPU_MASK_ALL;
270 #else
271 	cd->cpumask		= cpumask_of_cpu(cpu);
272 #endif
273 	cd->set_next_event	= mips_next_event;
274 	cd->set_mode		= mips_set_mode;
275 	cd->event_handler	= mips_event_handler;
276 
277 	clockevents_register_device(cd);
278 
279 	if (cp0_timer_irq_installed)
280 		return 0;
281 
282 	cp0_timer_irq_installed = 1;
283 
284 #ifdef CONFIG_MIPS_MT_SMTC
285 #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
286 	setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
287 #else
288 	setup_irq(irq, &c0_compare_irqaction);
289 #endif
290 
291 	return 0;
292 }
293