xref: /openbmc/linux/arch/mips/kernel/cevt-r4k.c (revision 9ac8d3fb)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2007 MIPS Technologies, Inc.
7  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/clockchips.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 
13 #include <asm/smtc_ipi.h>
14 #include <asm/time.h>
15 #include <asm/cevt-r4k.h>
16 
17 /*
18  * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
19  * of these routines with SMTC-specific variants.
20  */
21 
22 #ifndef CONFIG_MIPS_MT_SMTC
23 
24 static int mips_next_event(unsigned long delta,
25                            struct clock_event_device *evt)
26 {
27 	unsigned int cnt;
28 	int res;
29 
30 	cnt = read_c0_count();
31 	cnt += delta;
32 	write_c0_compare(cnt);
33 	res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
34 	return res;
35 }
36 
37 #endif /* CONFIG_MIPS_MT_SMTC */
38 
39 void mips_set_clock_mode(enum clock_event_mode mode,
40 				struct clock_event_device *evt)
41 {
42 	/* Nothing to do ...  */
43 }
44 
45 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
46 int cp0_timer_irq_installed;
47 
48 #ifndef CONFIG_MIPS_MT_SMTC
49 
50 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
51 {
52 	const int r2 = cpu_has_mips_r2;
53 	struct clock_event_device *cd;
54 	int cpu = smp_processor_id();
55 
56 	/*
57 	 * Suckage alert:
58 	 * Before R2 of the architecture there was no way to see if a
59 	 * performance counter interrupt was pending, so we have to run
60 	 * the performance counter interrupt handler anyway.
61 	 */
62 	if (handle_perf_irq(r2))
63 		goto out;
64 
65 	/*
66 	 * The same applies to performance counter interrupts.  But with the
67 	 * above we now know that the reason we got here must be a timer
68 	 * interrupt.  Being the paranoiacs we are we check anyway.
69 	 */
70 	if (!r2 || (read_c0_cause() & (1 << 30))) {
71 		/* Clear Count/Compare Interrupt */
72 		write_c0_compare(read_c0_compare());
73 		cd = &per_cpu(mips_clockevent_device, cpu);
74 		cd->event_handler(cd);
75 	}
76 
77 out:
78 	return IRQ_HANDLED;
79 }
80 
81 #endif /* Not CONFIG_MIPS_MT_SMTC */
82 
83 struct irqaction c0_compare_irqaction = {
84 	.handler = c0_compare_interrupt,
85 	.flags = IRQF_DISABLED | IRQF_PERCPU,
86 	.name = "timer",
87 };
88 
89 
90 void mips_event_handler(struct clock_event_device *dev)
91 {
92 }
93 
94 /*
95  * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
96  */
97 static int c0_compare_int_pending(void)
98 {
99 	return (read_c0_cause() >> cp0_compare_irq) & 0x100;
100 }
101 
102 /*
103  * Compare interrupt can be routed and latched outside the core,
104  * so a single execution hazard barrier may not be enough to give
105  * it time to clear as seen in the Cause register.  4 time the
106  * pipeline depth seems reasonably conservative, and empirically
107  * works better in configurations with high CPU/bus clock ratios.
108  */
109 
110 #define compare_change_hazard() \
111 	do { \
112 		irq_disable_hazard(); \
113 		irq_disable_hazard(); \
114 		irq_disable_hazard(); \
115 		irq_disable_hazard(); \
116 	} while (0)
117 
118 int c0_compare_int_usable(void)
119 {
120 	unsigned int delta;
121 	unsigned int cnt;
122 
123 	/*
124 	 * IP7 already pending?  Try to clear it by acking the timer.
125 	 */
126 	if (c0_compare_int_pending()) {
127 		write_c0_compare(read_c0_count());
128 		compare_change_hazard();
129 		if (c0_compare_int_pending())
130 			return 0;
131 	}
132 
133 	for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
134 		cnt = read_c0_count();
135 		cnt += delta;
136 		write_c0_compare(cnt);
137 		compare_change_hazard();
138 		if ((int)(read_c0_count() - cnt) < 0)
139 		    break;
140 		/* increase delta if the timer was already expired */
141 	}
142 
143 	while ((int)(read_c0_count() - cnt) <= 0)
144 		;	/* Wait for expiry  */
145 
146 	compare_change_hazard();
147 	if (!c0_compare_int_pending())
148 		return 0;
149 
150 	write_c0_compare(read_c0_count());
151 	compare_change_hazard();
152 	if (c0_compare_int_pending())
153 		return 0;
154 
155 	/*
156 	 * Feels like a real count / compare timer.
157 	 */
158 	return 1;
159 }
160 
161 #ifndef CONFIG_MIPS_MT_SMTC
162 
163 int __cpuinit mips_clockevent_init(void)
164 {
165 	uint64_t mips_freq = mips_hpt_frequency;
166 	unsigned int cpu = smp_processor_id();
167 	struct clock_event_device *cd;
168 	unsigned int irq;
169 
170 	if (!cpu_has_counter || !mips_hpt_frequency)
171 		return -ENXIO;
172 
173 	if (!c0_compare_int_usable())
174 		return -ENXIO;
175 
176 	/*
177 	 * With vectored interrupts things are getting platform specific.
178 	 * get_c0_compare_int is a hook to allow a platform to return the
179 	 * interrupt number of it's liking.
180 	 */
181 	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
182 	if (get_c0_compare_int)
183 		irq = get_c0_compare_int();
184 
185 	cd = &per_cpu(mips_clockevent_device, cpu);
186 
187 	cd->name		= "MIPS";
188 	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
189 
190 	/* Calculate the min / max delta */
191 	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
192 	cd->shift		= 32;
193 	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
194 	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
195 
196 	cd->rating		= 300;
197 	cd->irq			= irq;
198 	cd->cpumask		= cpumask_of_cpu(cpu);
199 	cd->set_next_event	= mips_next_event;
200 	cd->set_mode		= mips_set_clock_mode;
201 	cd->event_handler	= mips_event_handler;
202 
203 	clockevents_register_device(cd);
204 
205 	if (cp0_timer_irq_installed)
206 		return 0;
207 
208 	cp0_timer_irq_installed = 1;
209 
210 	setup_irq(irq, &c0_compare_irqaction);
211 
212 	return 0;
213 }
214 
215 #endif /* Not CONFIG_MIPS_MT_SMTC */
216