xref: /openbmc/linux/arch/mips/kernel/genex.S (revision c21b37f6)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2002 Maciej W. Rozycki
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/war.h>
22#include <asm/page.h>
23
24#define PANIC_PIC(msg)					\
25		.set push;				\
26		.set	reorder;			\
27		PTR_LA	a0,8f;				\
28		.set	noat;				\
29		PTR_LA	AT, panic;			\
30		jr	AT;				\
319:		b	9b;				\
32		.set	pop;				\
33		TEXT(msg)
34
35	__INIT
36
37NESTED(except_vec0_generic, 0, sp)
38	PANIC_PIC("Exception vector 0 called")
39	END(except_vec0_generic)
40
41NESTED(except_vec1_generic, 0, sp)
42	PANIC_PIC("Exception vector 1 called")
43	END(except_vec1_generic)
44
45/*
46 * General exception vector for all other CPUs.
47 *
48 * Be careful when changing this, it has to be at most 128 bytes
49 * to fit into space reserved for the exception handler.
50 */
51NESTED(except_vec3_generic, 0, sp)
52	.set	push
53	.set	noat
54#if R5432_CP0_INTERRUPT_WAR
55	mfc0	k0, CP0_INDEX
56#endif
57	mfc0	k1, CP0_CAUSE
58	andi	k1, k1, 0x7c
59#ifdef CONFIG_64BIT
60	dsll	k1, k1, 1
61#endif
62	PTR_L	k0, exception_handlers(k1)
63	jr	k0
64	.set	pop
65	END(except_vec3_generic)
66
67/*
68 * General exception handler for CPUs with virtual coherency exception.
69 *
70 * Be careful when changing this, it has to be at most 256 (as a special
71 * exception) bytes to fit into space reserved for the exception handler.
72 */
73NESTED(except_vec3_r4000, 0, sp)
74	.set	push
75	.set	mips3
76	.set	noat
77	mfc0	k1, CP0_CAUSE
78	li	k0, 31<<2
79	andi	k1, k1, 0x7c
80	.set	push
81	.set	noreorder
82	.set	nomacro
83	beq	k1, k0, handle_vced
84	 li	k0, 14<<2
85	beq	k1, k0, handle_vcei
86#ifdef CONFIG_64BIT
87	 dsll	k1, k1, 1
88#endif
89	.set	pop
90	PTR_L	k0, exception_handlers(k1)
91	jr	k0
92
93	/*
94	 * Big shit, we now may have two dirty primary cache lines for the same
95	 * physical address.  We can safely invalidate the line pointed to by
96	 * c0_badvaddr because after return from this exception handler the
97	 * load / store will be re-executed.
98	 */
99handle_vced:
100	MFC0	k0, CP0_BADVADDR
101	li	k1, -4					# Is this ...
102	and	k0, k1					# ... really needed?
103	mtc0	zero, CP0_TAGLO
104	cache	Index_Store_Tag_D, (k0)
105	cache	Hit_Writeback_Inv_SD, (k0)
106#ifdef CONFIG_PROC_FS
107	PTR_LA	k0, vced_count
108	lw	k1, (k0)
109	addiu	k1, 1
110	sw	k1, (k0)
111#endif
112	eret
113
114handle_vcei:
115	MFC0	k0, CP0_BADVADDR
116	cache	Hit_Writeback_Inv_SD, (k0)		# also cleans pi
117#ifdef CONFIG_PROC_FS
118	PTR_LA	k0, vcei_count
119	lw	k1, (k0)
120	addiu	k1, 1
121	sw	k1, (k0)
122#endif
123	eret
124	.set	pop
125	END(except_vec3_r4000)
126
127	__FINIT
128
129	.align  5
130NESTED(handle_int, PT_SIZE, sp)
131#ifdef CONFIG_TRACE_IRQFLAGS
132	/*
133	 * Check to see if the interrupted code has just disabled
134	 * interrupts and ignore this interrupt for now if so.
135	 *
136	 * local_irq_disable() disables interrupts and then calls
137	 * trace_hardirqs_off() to track the state. If an interrupt is taken
138	 * after interrupts are disabled but before the state is updated
139	 * it will appear to restore_all that it is incorrectly returning with
140	 * interrupts disabled
141	 */
142	.set	push
143	.set	noat
144	mfc0	k0, CP0_STATUS
145#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
146	and	k0, ST0_IEP
147	bnez	k0, 1f
148
149	mfc0	k0, EP0_EPC
150	.set	noreorder
151	j	k0
152	rfe
153#else
154	and	k0, ST0_IE
155	bnez	k0, 1f
156
157	eret
158#endif
1591:
160	.set pop
161#endif
162	SAVE_ALL
163	CLI
164	TRACE_IRQS_OFF
165
166	LONG_L	s0, TI_REGS($28)
167	LONG_S	sp, TI_REGS($28)
168	PTR_LA	ra, ret_from_irq
169	j	plat_irq_dispatch
170	END(handle_int)
171
172	__INIT
173
174/*
175 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
176 * This is a dedicated interrupt exception vector which reduces the
177 * interrupt processing overhead.  The jump instruction will be replaced
178 * at the initialization time.
179 *
180 * Be careful when changing this, it has to be at most 128 bytes
181 * to fit into space reserved for the exception handler.
182 */
183NESTED(except_vec4, 0, sp)
1841:	j	1b			/* Dummy, will be replaced */
185	END(except_vec4)
186
187/*
188 * EJTAG debug exception handler.
189 * The EJTAG debug exception entry point is 0xbfc00480, which
190 * normally is in the boot PROM, so the boot PROM must do a
191 * unconditional jump to this vector.
192 */
193NESTED(except_vec_ejtag_debug, 0, sp)
194	j	ejtag_debug_handler
195	END(except_vec_ejtag_debug)
196
197	__FINIT
198
199/*
200 * Vectored interrupt handler.
201 * This prototype is copied to ebase + n*IntCtl.VS and patched
202 * to invoke the handler
203 */
204NESTED(except_vec_vi, 0, sp)
205	SAVE_SOME
206	SAVE_AT
207	.set	push
208	.set	noreorder
209#ifdef CONFIG_MIPS_MT_SMTC
210	/*
211	 * To keep from blindly blocking *all* interrupts
212	 * during service by SMTC kernel, we also want to
213	 * pass the IM value to be cleared.
214	 */
215FEXPORT(except_vec_vi_mori)
216	ori	a0, $0, 0
217#endif /* CONFIG_MIPS_MT_SMTC */
218FEXPORT(except_vec_vi_lui)
219	lui	v0, 0		/* Patched */
220	j	except_vec_vi_handler
221FEXPORT(except_vec_vi_ori)
222	 ori	v0, 0		/* Patched */
223	.set	pop
224	END(except_vec_vi)
225EXPORT(except_vec_vi_end)
226
227/*
228 * Common Vectored Interrupt code
229 * Complete the register saves and invoke the handler which is passed in $v0
230 */
231NESTED(except_vec_vi_handler, 0, sp)
232	SAVE_TEMP
233	SAVE_STATIC
234#ifdef CONFIG_MIPS_MT_SMTC
235	/*
236	 * SMTC has an interesting problem that interrupts are level-triggered,
237	 * and the CLI macro will clear EXL, potentially causing a duplicate
238	 * interrupt service invocation. So we need to clear the associated
239	 * IM bit of Status prior to doing CLI, and restore it after the
240	 * service routine has been invoked - we must assume that the
241	 * service routine will have cleared the state, and any active
242	 * level represents a new or otherwised unserviced event...
243	 */
244	mfc0	t1, CP0_STATUS
245	and	t0, a0, t1
246#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
247	mfc0	t2, CP0_TCCONTEXT
248	or	t0, t0, t2
249	mtc0	t0, CP0_TCCONTEXT
250#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
251	xor	t1, t1, t0
252	mtc0	t1, CP0_STATUS
253	_ehb
254#endif /* CONFIG_MIPS_MT_SMTC */
255	CLI
256#ifdef CONFIG_TRACE_IRQFLAGS
257	move	s0, v0
258#ifdef CONFIG_MIPS_MT_SMTC
259	move	s1, a0
260#endif
261	TRACE_IRQS_OFF
262#ifdef CONFIG_MIPS_MT_SMTC
263	move	a0, s1
264#endif
265	move	v0, s0
266#endif
267
268	LONG_L	s0, TI_REGS($28)
269	LONG_S	sp, TI_REGS($28)
270	PTR_LA	ra, ret_from_irq
271	jr	v0
272	END(except_vec_vi_handler)
273
274/*
275 * EJTAG debug exception handler.
276 */
277NESTED(ejtag_debug_handler, PT_SIZE, sp)
278	.set	push
279	.set	noat
280	MTC0	k0, CP0_DESAVE
281	mfc0	k0, CP0_DEBUG
282
283	sll	k0, k0, 30	# Check for SDBBP.
284	bgez	k0, ejtag_return
285
286	PTR_LA	k0, ejtag_debug_buffer
287	LONG_S	k1, 0(k0)
288	SAVE_ALL
289	move	a0, sp
290	jal	ejtag_exception_handler
291	RESTORE_ALL
292	PTR_LA	k0, ejtag_debug_buffer
293	LONG_L	k1, 0(k0)
294
295ejtag_return:
296	MFC0	k0, CP0_DESAVE
297	.set	mips32
298	deret
299	.set pop
300	END(ejtag_debug_handler)
301
302/*
303 * This buffer is reserved for the use of the EJTAG debug
304 * handler.
305 */
306	.data
307EXPORT(ejtag_debug_buffer)
308	.fill	LONGSIZE
309	.previous
310
311	__INIT
312
313/*
314 * NMI debug exception handler for MIPS reference boards.
315 * The NMI debug exception entry point is 0xbfc00000, which
316 * normally is in the boot PROM, so the boot PROM must do a
317 * unconditional jump to this vector.
318 */
319NESTED(except_vec_nmi, 0, sp)
320	j	nmi_handler
321	END(except_vec_nmi)
322
323	__FINIT
324
325NESTED(nmi_handler, PT_SIZE, sp)
326	.set	push
327	.set	noat
328	SAVE_ALL
329 	move	a0, sp
330	jal	nmi_exception_handler
331	RESTORE_ALL
332	.set	mips3
333	eret
334	.set	pop
335	END(nmi_handler)
336
337	.macro	__build_clear_none
338	.endm
339
340	.macro	__build_clear_sti
341	TRACE_IRQS_ON
342	STI
343	.endm
344
345	.macro	__build_clear_cli
346	CLI
347	TRACE_IRQS_OFF
348	.endm
349
350	.macro	__build_clear_fpe
351	cfc1	a1, fcr31
352	li	a2, ~(0x3f << 12)
353	and	a2, a1
354	ctc1	a2, fcr31
355	TRACE_IRQS_ON
356	STI
357	.endm
358
359	.macro	__build_clear_ade
360	MFC0	t0, CP0_BADVADDR
361	PTR_S	t0, PT_BVADDR(sp)
362	KMODE
363	.endm
364
365	.macro	__BUILD_silent exception
366	.endm
367
368	/* Gas tries to parse the PRINT argument as a string containing
369	   string escapes and emits bogus warnings if it believes to
370	   recognize an unknown escape code.  So make the arguments
371	   start with an n and gas will believe \n is ok ...  */
372	.macro	__BUILD_verbose	nexception
373	LONG_L	a1, PT_EPC(sp)
374#ifdef CONFIG_32BIT
375	PRINT("Got \nexception at %08lx\012")
376#endif
377#ifdef CONFIG_64BIT
378	PRINT("Got \nexception at %016lx\012")
379#endif
380	.endm
381
382	.macro	__BUILD_count exception
383	LONG_L	t0,exception_count_\exception
384	LONG_ADDIU t0, 1
385	LONG_S	t0,exception_count_\exception
386	.comm	exception_count\exception, 8, 8
387	.endm
388
389	.macro	__BUILD_HANDLER exception handler clear verbose ext
390	.align	5
391	NESTED(handle_\exception, PT_SIZE, sp)
392	.set	noat
393	SAVE_ALL
394	FEXPORT(handle_\exception\ext)
395	__BUILD_clear_\clear
396	.set	at
397	__BUILD_\verbose \exception
398	move	a0, sp
399	PTR_LA	ra, ret_from_exception
400	j	do_\handler
401	END(handle_\exception)
402	.endm
403
404	.macro	BUILD_HANDLER exception handler clear verbose
405	__BUILD_HANDLER	\exception \handler \clear \verbose _int
406	.endm
407
408	BUILD_HANDLER adel ade ade silent		/* #4  */
409	BUILD_HANDLER ades ade ade silent		/* #5  */
410	BUILD_HANDLER ibe be cli silent			/* #6  */
411	BUILD_HANDLER dbe be cli silent			/* #7  */
412	BUILD_HANDLER bp bp sti silent			/* #9  */
413	BUILD_HANDLER ri ri sti silent			/* #10 */
414	BUILD_HANDLER cpu cpu sti silent		/* #11 */
415	BUILD_HANDLER ov ov sti silent			/* #12 */
416	BUILD_HANDLER tr tr sti silent			/* #13 */
417	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
418	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
419	BUILD_HANDLER watch watch sti verbose		/* #23 */
420	BUILD_HANDLER mcheck mcheck cli verbose		/* #24 */
421	BUILD_HANDLER mt mt sti silent			/* #25 */
422	BUILD_HANDLER dsp dsp sti silent		/* #26 */
423	BUILD_HANDLER reserved reserved sti verbose	/* others */
424
425	.align	5
426	LEAF(handle_ri_rdhwr_vivt)
427#ifdef CONFIG_MIPS_MT_SMTC
428	PANIC_PIC("handle_ri_rdhwr_vivt called")
429#else
430	.set	push
431	.set	noat
432	.set	noreorder
433	/* check if TLB contains a entry for EPC */
434	MFC0	k1, CP0_ENTRYHI
435	andi	k1, 0xff	/* ASID_MASK */
436	MFC0	k0, CP0_EPC
437	PTR_SRL	k0, PAGE_SHIFT + 1
438	PTR_SLL	k0, PAGE_SHIFT + 1
439	or	k1, k0
440	MTC0	k1, CP0_ENTRYHI
441	mtc0_tlbw_hazard
442	tlbp
443	tlb_probe_hazard
444	mfc0	k1, CP0_INDEX
445	.set	pop
446	bltz	k1, handle_ri	/* slow path */
447	/* fall thru */
448#endif
449	END(handle_ri_rdhwr_vivt)
450
451	LEAF(handle_ri_rdhwr)
452	.set	push
453	.set	noat
454	.set	noreorder
455	/* 0x7c03e83b: rdhwr v1,$29 */
456	MFC0	k1, CP0_EPC
457	lui	k0, 0x7c03
458	lw	k1, (k1)
459	ori	k0, 0xe83b
460	.set	reorder
461	bne	k0, k1, handle_ri	/* if not ours */
462	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
463	get_saved_sp	/* k1 := current_thread_info */
464	.set	noreorder
465	MFC0	k0, CP0_EPC
466#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
467	ori	k1, _THREAD_MASK
468	xori	k1, _THREAD_MASK
469	LONG_L	v1, TI_TP_VALUE(k1)
470	LONG_ADDIU	k0, 4
471	jr	k0
472	 rfe
473#else
474	LONG_ADDIU	k0, 4		/* stall on $k0 */
475	MTC0	k0, CP0_EPC
476	/* I hope three instructions between MTC0 and ERET are enough... */
477	ori	k1, _THREAD_MASK
478	xori	k1, _THREAD_MASK
479	LONG_L	v1, TI_TP_VALUE(k1)
480	.set	mips3
481	eret
482	.set	mips0
483#endif
484	.set	pop
485	END(handle_ri_rdhwr)
486
487#ifdef CONFIG_64BIT
488/* A temporary overflow handler used by check_daddi(). */
489
490	__INIT
491
492	BUILD_HANDLER  daddi_ov daddi_ov none silent	/* #12 */
493#endif
494