xref: /openbmc/linux/arch/mips/kernel/genex.S (revision 9d749629)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2002, 2007  Maciej W. Rozycki
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/war.h>
22#include <asm/thread_info.h>
23
24#define PANIC_PIC(msg)					\
25		.set push;				\
26		.set	reorder;			\
27		PTR_LA	a0,8f;				\
28		.set	noat;				\
29		PTR_LA	AT, panic;			\
30		jr	AT;				\
319:		b	9b;				\
32		.set	pop;				\
33		TEXT(msg)
34
35	__INIT
36
37NESTED(except_vec0_generic, 0, sp)
38	PANIC_PIC("Exception vector 0 called")
39	END(except_vec0_generic)
40
41NESTED(except_vec1_generic, 0, sp)
42	PANIC_PIC("Exception vector 1 called")
43	END(except_vec1_generic)
44
45/*
46 * General exception vector for all other CPUs.
47 *
48 * Be careful when changing this, it has to be at most 128 bytes
49 * to fit into space reserved for the exception handler.
50 */
51NESTED(except_vec3_generic, 0, sp)
52	.set	push
53	.set	noat
54#if R5432_CP0_INTERRUPT_WAR
55	mfc0	k0, CP0_INDEX
56#endif
57	mfc0	k1, CP0_CAUSE
58	andi	k1, k1, 0x7c
59#ifdef CONFIG_64BIT
60	dsll	k1, k1, 1
61#endif
62	PTR_L	k0, exception_handlers(k1)
63	jr	k0
64	.set	pop
65	END(except_vec3_generic)
66
67/*
68 * General exception handler for CPUs with virtual coherency exception.
69 *
70 * Be careful when changing this, it has to be at most 256 (as a special
71 * exception) bytes to fit into space reserved for the exception handler.
72 */
73NESTED(except_vec3_r4000, 0, sp)
74	.set	push
75	.set	mips3
76	.set	noat
77	mfc0	k1, CP0_CAUSE
78	li	k0, 31<<2
79	andi	k1, k1, 0x7c
80	.set	push
81	.set	noreorder
82	.set	nomacro
83	beq	k1, k0, handle_vced
84	 li	k0, 14<<2
85	beq	k1, k0, handle_vcei
86#ifdef CONFIG_64BIT
87	 dsll	k1, k1, 1
88#endif
89	.set	pop
90	PTR_L	k0, exception_handlers(k1)
91	jr	k0
92
93	/*
94	 * Big shit, we now may have two dirty primary cache lines for the same
95	 * physical address.  We can safely invalidate the line pointed to by
96	 * c0_badvaddr because after return from this exception handler the
97	 * load / store will be re-executed.
98	 */
99handle_vced:
100	MFC0	k0, CP0_BADVADDR
101	li	k1, -4					# Is this ...
102	and	k0, k1					# ... really needed?
103	mtc0	zero, CP0_TAGLO
104	cache	Index_Store_Tag_D, (k0)
105	cache	Hit_Writeback_Inv_SD, (k0)
106#ifdef CONFIG_PROC_FS
107	PTR_LA	k0, vced_count
108	lw	k1, (k0)
109	addiu	k1, 1
110	sw	k1, (k0)
111#endif
112	eret
113
114handle_vcei:
115	MFC0	k0, CP0_BADVADDR
116	cache	Hit_Writeback_Inv_SD, (k0)		# also cleans pi
117#ifdef CONFIG_PROC_FS
118	PTR_LA	k0, vcei_count
119	lw	k1, (k0)
120	addiu	k1, 1
121	sw	k1, (k0)
122#endif
123	eret
124	.set	pop
125	END(except_vec3_r4000)
126
127	__FINIT
128
129	.align	5	/* 32 byte rollback region */
130LEAF(r4k_wait)
131	.set	push
132	.set	noreorder
133	/* start of rollback region */
134	LONG_L	t0, TI_FLAGS($28)
135	nop
136	andi	t0, _TIF_NEED_RESCHED
137	bnez	t0, 1f
138	 nop
139	nop
140	nop
141	.set	mips3
142	wait
143	/* end of rollback region (the region size must be power of two) */
144	.set	pop
1451:
146	jr	ra
147	END(r4k_wait)
148
149	.macro	BUILD_ROLLBACK_PROLOGUE handler
150	FEXPORT(rollback_\handler)
151	.set	push
152	.set	noat
153	MFC0	k0, CP0_EPC
154	PTR_LA	k1, r4k_wait
155	ori	k0, 0x1f	/* 32 byte rollback region */
156	xori	k0, 0x1f
157	bne	k0, k1, 9f
158	MTC0	k0, CP0_EPC
1599:
160	.set pop
161	.endm
162
163	.align  5
164BUILD_ROLLBACK_PROLOGUE handle_int
165NESTED(handle_int, PT_SIZE, sp)
166#ifdef CONFIG_TRACE_IRQFLAGS
167	/*
168	 * Check to see if the interrupted code has just disabled
169	 * interrupts and ignore this interrupt for now if so.
170	 *
171	 * local_irq_disable() disables interrupts and then calls
172	 * trace_hardirqs_off() to track the state. If an interrupt is taken
173	 * after interrupts are disabled but before the state is updated
174	 * it will appear to restore_all that it is incorrectly returning with
175	 * interrupts disabled
176	 */
177	.set	push
178	.set	noat
179	mfc0	k0, CP0_STATUS
180#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
181	and	k0, ST0_IEP
182	bnez	k0, 1f
183
184	mfc0	k0, CP0_EPC
185	.set	noreorder
186	j	k0
187	rfe
188#else
189	and	k0, ST0_IE
190	bnez	k0, 1f
191
192	eret
193#endif
1941:
195	.set pop
196#endif
197	SAVE_ALL
198	CLI
199	TRACE_IRQS_OFF
200
201	LONG_L	s0, TI_REGS($28)
202	LONG_S	sp, TI_REGS($28)
203	PTR_LA	ra, ret_from_irq
204	j	plat_irq_dispatch
205	END(handle_int)
206
207	__INIT
208
209/*
210 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
211 * This is a dedicated interrupt exception vector which reduces the
212 * interrupt processing overhead.  The jump instruction will be replaced
213 * at the initialization time.
214 *
215 * Be careful when changing this, it has to be at most 128 bytes
216 * to fit into space reserved for the exception handler.
217 */
218NESTED(except_vec4, 0, sp)
2191:	j	1b			/* Dummy, will be replaced */
220	END(except_vec4)
221
222/*
223 * EJTAG debug exception handler.
224 * The EJTAG debug exception entry point is 0xbfc00480, which
225 * normally is in the boot PROM, so the boot PROM must do a
226 * unconditional jump to this vector.
227 */
228NESTED(except_vec_ejtag_debug, 0, sp)
229	j	ejtag_debug_handler
230	END(except_vec_ejtag_debug)
231
232	__FINIT
233
234/*
235 * Vectored interrupt handler.
236 * This prototype is copied to ebase + n*IntCtl.VS and patched
237 * to invoke the handler
238 */
239BUILD_ROLLBACK_PROLOGUE except_vec_vi
240NESTED(except_vec_vi, 0, sp)
241	SAVE_SOME
242	SAVE_AT
243	.set	push
244	.set	noreorder
245#ifdef CONFIG_MIPS_MT_SMTC
246	/*
247	 * To keep from blindly blocking *all* interrupts
248	 * during service by SMTC kernel, we also want to
249	 * pass the IM value to be cleared.
250	 */
251FEXPORT(except_vec_vi_mori)
252	ori	a0, $0, 0
253#endif /* CONFIG_MIPS_MT_SMTC */
254FEXPORT(except_vec_vi_lui)
255	lui	v0, 0		/* Patched */
256	j	except_vec_vi_handler
257FEXPORT(except_vec_vi_ori)
258	 ori	v0, 0		/* Patched */
259	.set	pop
260	END(except_vec_vi)
261EXPORT(except_vec_vi_end)
262
263/*
264 * Common Vectored Interrupt code
265 * Complete the register saves and invoke the handler which is passed in $v0
266 */
267NESTED(except_vec_vi_handler, 0, sp)
268	SAVE_TEMP
269	SAVE_STATIC
270#ifdef CONFIG_MIPS_MT_SMTC
271	/*
272	 * SMTC has an interesting problem that interrupts are level-triggered,
273	 * and the CLI macro will clear EXL, potentially causing a duplicate
274	 * interrupt service invocation. So we need to clear the associated
275	 * IM bit of Status prior to doing CLI, and restore it after the
276	 * service routine has been invoked - we must assume that the
277	 * service routine will have cleared the state, and any active
278	 * level represents a new or otherwised unserviced event...
279	 */
280	mfc0	t1, CP0_STATUS
281	and	t0, a0, t1
282#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
283	mfc0	t2, CP0_TCCONTEXT
284	or	t2, t0, t2
285	mtc0	t2, CP0_TCCONTEXT
286#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
287	xor	t1, t1, t0
288	mtc0	t1, CP0_STATUS
289	_ehb
290#endif /* CONFIG_MIPS_MT_SMTC */
291	CLI
292#ifdef CONFIG_TRACE_IRQFLAGS
293	move	s0, v0
294#ifdef CONFIG_MIPS_MT_SMTC
295	move	s1, a0
296#endif
297	TRACE_IRQS_OFF
298#ifdef CONFIG_MIPS_MT_SMTC
299	move	a0, s1
300#endif
301	move	v0, s0
302#endif
303
304	LONG_L	s0, TI_REGS($28)
305	LONG_S	sp, TI_REGS($28)
306	PTR_LA	ra, ret_from_irq
307	jr	v0
308	END(except_vec_vi_handler)
309
310/*
311 * EJTAG debug exception handler.
312 */
313NESTED(ejtag_debug_handler, PT_SIZE, sp)
314	.set	push
315	.set	noat
316	MTC0	k0, CP0_DESAVE
317	mfc0	k0, CP0_DEBUG
318
319	sll	k0, k0, 30	# Check for SDBBP.
320	bgez	k0, ejtag_return
321
322	PTR_LA	k0, ejtag_debug_buffer
323	LONG_S	k1, 0(k0)
324	SAVE_ALL
325	move	a0, sp
326	jal	ejtag_exception_handler
327	RESTORE_ALL
328	PTR_LA	k0, ejtag_debug_buffer
329	LONG_L	k1, 0(k0)
330
331ejtag_return:
332	MFC0	k0, CP0_DESAVE
333	.set	mips32
334	deret
335	.set pop
336	END(ejtag_debug_handler)
337
338/*
339 * This buffer is reserved for the use of the EJTAG debug
340 * handler.
341 */
342	.data
343EXPORT(ejtag_debug_buffer)
344	.fill	LONGSIZE
345	.previous
346
347	__INIT
348
349/*
350 * NMI debug exception handler for MIPS reference boards.
351 * The NMI debug exception entry point is 0xbfc00000, which
352 * normally is in the boot PROM, so the boot PROM must do a
353 * unconditional jump to this vector.
354 */
355NESTED(except_vec_nmi, 0, sp)
356	j	nmi_handler
357	END(except_vec_nmi)
358
359	__FINIT
360
361NESTED(nmi_handler, PT_SIZE, sp)
362	.set	push
363	.set	noat
364	SAVE_ALL
365 	move	a0, sp
366	jal	nmi_exception_handler
367	RESTORE_ALL
368	.set	mips3
369	eret
370	.set	pop
371	END(nmi_handler)
372
373	.macro	__build_clear_none
374	.endm
375
376	.macro	__build_clear_sti
377	TRACE_IRQS_ON
378	STI
379	.endm
380
381	.macro	__build_clear_cli
382	CLI
383	TRACE_IRQS_OFF
384	.endm
385
386	.macro	__build_clear_fpe
387	.set	push
388	/* gas fails to assemble cfc1 for some archs (octeon).*/ \
389	.set	mips1
390	cfc1	a1, fcr31
391	li	a2, ~(0x3f << 12)
392	and	a2, a1
393	ctc1	a2, fcr31
394	.set	pop
395	TRACE_IRQS_ON
396	STI
397	.endm
398
399	.macro	__build_clear_ade
400	MFC0	t0, CP0_BADVADDR
401	PTR_S	t0, PT_BVADDR(sp)
402	KMODE
403	.endm
404
405	.macro	__BUILD_silent exception
406	.endm
407
408	/* Gas tries to parse the PRINT argument as a string containing
409	   string escapes and emits bogus warnings if it believes to
410	   recognize an unknown escape code.  So make the arguments
411	   start with an n and gas will believe \n is ok ...  */
412	.macro	__BUILD_verbose	nexception
413	LONG_L	a1, PT_EPC(sp)
414#ifdef CONFIG_32BIT
415	PRINT("Got \nexception at %08lx\012")
416#endif
417#ifdef CONFIG_64BIT
418	PRINT("Got \nexception at %016lx\012")
419#endif
420	.endm
421
422	.macro	__BUILD_count exception
423	LONG_L	t0,exception_count_\exception
424	LONG_ADDIU t0, 1
425	LONG_S	t0,exception_count_\exception
426	.comm	exception_count\exception, 8, 8
427	.endm
428
429	.macro	__BUILD_HANDLER exception handler clear verbose ext
430	.align	5
431	NESTED(handle_\exception, PT_SIZE, sp)
432	.set	noat
433	SAVE_ALL
434	FEXPORT(handle_\exception\ext)
435	__BUILD_clear_\clear
436	.set	at
437	__BUILD_\verbose \exception
438	move	a0, sp
439	PTR_LA	ra, ret_from_exception
440	j	do_\handler
441	END(handle_\exception)
442	.endm
443
444	.macro	BUILD_HANDLER exception handler clear verbose
445	__BUILD_HANDLER	\exception \handler \clear \verbose _int
446	.endm
447
448	BUILD_HANDLER adel ade ade silent		/* #4  */
449	BUILD_HANDLER ades ade ade silent		/* #5  */
450	BUILD_HANDLER ibe be cli silent			/* #6  */
451	BUILD_HANDLER dbe be cli silent			/* #7  */
452	BUILD_HANDLER bp bp sti silent			/* #9  */
453	BUILD_HANDLER ri ri sti silent			/* #10 */
454	BUILD_HANDLER cpu cpu sti silent		/* #11 */
455	BUILD_HANDLER ov ov sti silent			/* #12 */
456	BUILD_HANDLER tr tr sti silent			/* #13 */
457	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
458	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
459#ifdef 	CONFIG_HARDWARE_WATCHPOINTS
460	/*
461	 * For watch, interrupts will be enabled after the watch
462	 * registers are read.
463	 */
464	BUILD_HANDLER watch watch cli silent		/* #23 */
465#else
466	BUILD_HANDLER watch watch sti verbose		/* #23 */
467#endif
468	BUILD_HANDLER mcheck mcheck cli verbose		/* #24 */
469	BUILD_HANDLER mt mt sti silent			/* #25 */
470	BUILD_HANDLER dsp dsp sti silent		/* #26 */
471	BUILD_HANDLER reserved reserved sti verbose	/* others */
472
473	.align	5
474	LEAF(handle_ri_rdhwr_vivt)
475#ifdef CONFIG_MIPS_MT_SMTC
476	PANIC_PIC("handle_ri_rdhwr_vivt called")
477#else
478	.set	push
479	.set	noat
480	.set	noreorder
481	/* check if TLB contains a entry for EPC */
482	MFC0	k1, CP0_ENTRYHI
483	andi	k1, 0xff	/* ASID_MASK */
484	MFC0	k0, CP0_EPC
485	PTR_SRL	k0, _PAGE_SHIFT + 1
486	PTR_SLL	k0, _PAGE_SHIFT + 1
487	or	k1, k0
488	MTC0	k1, CP0_ENTRYHI
489	mtc0_tlbw_hazard
490	tlbp
491	tlb_probe_hazard
492	mfc0	k1, CP0_INDEX
493	.set	pop
494	bltz	k1, handle_ri	/* slow path */
495	/* fall thru */
496#endif
497	END(handle_ri_rdhwr_vivt)
498
499	LEAF(handle_ri_rdhwr)
500	.set	push
501	.set	noat
502	.set	noreorder
503	/* 0x7c03e83b: rdhwr v1,$29 */
504	MFC0	k1, CP0_EPC
505	lui	k0, 0x7c03
506	lw	k1, (k1)
507	ori	k0, 0xe83b
508	.set	reorder
509	bne	k0, k1, handle_ri	/* if not ours */
510	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
511	get_saved_sp	/* k1 := current_thread_info */
512	.set	noreorder
513	MFC0	k0, CP0_EPC
514#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
515	ori	k1, _THREAD_MASK
516	xori	k1, _THREAD_MASK
517	LONG_L	v1, TI_TP_VALUE(k1)
518	LONG_ADDIU	k0, 4
519	jr	k0
520	 rfe
521#else
522#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
523	LONG_ADDIU	k0, 4		/* stall on $k0 */
524#else
525	.set	at=v1
526	LONG_ADDIU	k0, 4
527	.set	noat
528#endif
529	MTC0	k0, CP0_EPC
530	/* I hope three instructions between MTC0 and ERET are enough... */
531	ori	k1, _THREAD_MASK
532	xori	k1, _THREAD_MASK
533	LONG_L	v1, TI_TP_VALUE(k1)
534	.set	mips3
535	eret
536	.set	mips0
537#endif
538	.set	pop
539	END(handle_ri_rdhwr)
540
541#ifdef CONFIG_64BIT
542/* A temporary overflow handler used by check_daddi(). */
543
544	__INIT
545
546	BUILD_HANDLER  daddi_ov daddi_ov none silent	/* #12 */
547#endif
548