xref: /openbmc/linux/arch/mips/kernel/genex.S (revision f8e17c17)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2002, 2007  Maciej W. Rozycki
9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/sync.h>
22#include <asm/war.h>
23#include <asm/thread_info.h>
24
25	__INIT
26
27/*
28 * General exception vector for all other CPUs.
29 *
30 * Be careful when changing this, it has to be at most 128 bytes
31 * to fit into space reserved for the exception handler.
32 */
33NESTED(except_vec3_generic, 0, sp)
34	.set	push
35	.set	noat
36	mfc0	k1, CP0_CAUSE
37	andi	k1, k1, 0x7c
38#ifdef CONFIG_64BIT
39	dsll	k1, k1, 1
40#endif
41	PTR_L	k0, exception_handlers(k1)
42	jr	k0
43	.set	pop
44	END(except_vec3_generic)
45
46/*
47 * General exception handler for CPUs with virtual coherency exception.
48 *
49 * Be careful when changing this, it has to be at most 256 (as a special
50 * exception) bytes to fit into space reserved for the exception handler.
51 */
52NESTED(except_vec3_r4000, 0, sp)
53	.set	push
54	.set	arch=r4000
55	.set	noat
56	mfc0	k1, CP0_CAUSE
57	li	k0, 31<<2
58	andi	k1, k1, 0x7c
59	.set	push
60	.set	noreorder
61	.set	nomacro
62	beq	k1, k0, handle_vced
63	 li	k0, 14<<2
64	beq	k1, k0, handle_vcei
65#ifdef CONFIG_64BIT
66	 dsll	k1, k1, 1
67#endif
68	.set	pop
69	PTR_L	k0, exception_handlers(k1)
70	jr	k0
71
72	/*
73	 * Big shit, we now may have two dirty primary cache lines for the same
74	 * physical address.  We can safely invalidate the line pointed to by
75	 * c0_badvaddr because after return from this exception handler the
76	 * load / store will be re-executed.
77	 */
78handle_vced:
79	MFC0	k0, CP0_BADVADDR
80	li	k1, -4					# Is this ...
81	and	k0, k1					# ... really needed?
82	mtc0	zero, CP0_TAGLO
83	cache	Index_Store_Tag_D, (k0)
84	cache	Hit_Writeback_Inv_SD, (k0)
85#ifdef CONFIG_PROC_FS
86	PTR_LA	k0, vced_count
87	lw	k1, (k0)
88	addiu	k1, 1
89	sw	k1, (k0)
90#endif
91	eret
92
93handle_vcei:
94	MFC0	k0, CP0_BADVADDR
95	cache	Hit_Writeback_Inv_SD, (k0)		# also cleans pi
96#ifdef CONFIG_PROC_FS
97	PTR_LA	k0, vcei_count
98	lw	k1, (k0)
99	addiu	k1, 1
100	sw	k1, (k0)
101#endif
102	eret
103	.set	pop
104	END(except_vec3_r4000)
105
106	__FINIT
107
108	.align	5	/* 32 byte rollback region */
109LEAF(__r4k_wait)
110	.set	push
111	.set	noreorder
112	/* start of rollback region */
113	LONG_L	t0, TI_FLAGS($28)
114	nop
115	andi	t0, _TIF_NEED_RESCHED
116	bnez	t0, 1f
117	 nop
118	nop
119	nop
120#ifdef CONFIG_CPU_MICROMIPS
121	nop
122	nop
123	nop
124	nop
125#endif
126	.set	MIPS_ISA_ARCH_LEVEL_RAW
127	wait
128	/* end of rollback region (the region size must be power of two) */
1291:
130	jr	ra
131	 nop
132	.set	pop
133	END(__r4k_wait)
134
135	.macro	BUILD_ROLLBACK_PROLOGUE handler
136	FEXPORT(rollback_\handler)
137	.set	push
138	.set	noat
139	MFC0	k0, CP0_EPC
140	PTR_LA	k1, __r4k_wait
141	ori	k0, 0x1f	/* 32 byte rollback region */
142	xori	k0, 0x1f
143	bne	k0, k1, \handler
144	MTC0	k0, CP0_EPC
145	.set pop
146	.endm
147
148	.align	5
149BUILD_ROLLBACK_PROLOGUE handle_int
150NESTED(handle_int, PT_SIZE, sp)
151	.cfi_signal_frame
152#ifdef CONFIG_TRACE_IRQFLAGS
153	/*
154	 * Check to see if the interrupted code has just disabled
155	 * interrupts and ignore this interrupt for now if so.
156	 *
157	 * local_irq_disable() disables interrupts and then calls
158	 * trace_hardirqs_off() to track the state. If an interrupt is taken
159	 * after interrupts are disabled but before the state is updated
160	 * it will appear to restore_all that it is incorrectly returning with
161	 * interrupts disabled
162	 */
163	.set	push
164	.set	noat
165	mfc0	k0, CP0_STATUS
166#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
167	and	k0, ST0_IEP
168	bnez	k0, 1f
169
170	mfc0	k0, CP0_EPC
171	.set	noreorder
172	j	k0
173	 rfe
174#else
175	and	k0, ST0_IE
176	bnez	k0, 1f
177
178	eret
179#endif
1801:
181	.set pop
182#endif
183	SAVE_ALL docfi=1
184	CLI
185	TRACE_IRQS_OFF
186
187	LONG_L	s0, TI_REGS($28)
188	LONG_S	sp, TI_REGS($28)
189
190	/*
191	 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
192	 * Check if we are already using the IRQ stack.
193	 */
194	move	s1, sp # Preserve the sp
195
196	/* Get IRQ stack for this CPU */
197	ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
198#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
199	lui	k1, %hi(irq_stack)
200#else
201	lui	k1, %highest(irq_stack)
202	daddiu	k1, %higher(irq_stack)
203	dsll	k1, 16
204	daddiu	k1, %hi(irq_stack)
205	dsll	k1, 16
206#endif
207	LONG_SRL	k0, SMP_CPUID_PTRSHIFT
208	LONG_ADDU	k1, k0
209	LONG_L	t0, %lo(irq_stack)(k1)
210
211	# Check if already on IRQ stack
212	PTR_LI	t1, ~(_THREAD_SIZE-1)
213	and	t1, t1, sp
214	beq	t0, t1, 2f
215
216	/* Switch to IRQ stack */
217	li	t1, _IRQ_STACK_START
218	PTR_ADD sp, t0, t1
219
220	/* Save task's sp on IRQ stack so that unwinding can follow it */
221	LONG_S	s1, 0(sp)
2222:
223	jal	plat_irq_dispatch
224
225	/* Restore sp */
226	move	sp, s1
227
228	j	ret_from_irq
229#ifdef CONFIG_CPU_MICROMIPS
230	nop
231#endif
232	END(handle_int)
233
234	__INIT
235
236/*
237 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
238 * This is a dedicated interrupt exception vector which reduces the
239 * interrupt processing overhead.  The jump instruction will be replaced
240 * at the initialization time.
241 *
242 * Be careful when changing this, it has to be at most 128 bytes
243 * to fit into space reserved for the exception handler.
244 */
245NESTED(except_vec4, 0, sp)
2461:	j	1b			/* Dummy, will be replaced */
247	END(except_vec4)
248
249/*
250 * EJTAG debug exception handler.
251 * The EJTAG debug exception entry point is 0xbfc00480, which
252 * normally is in the boot PROM, so the boot PROM must do an
253 * unconditional jump to this vector.
254 */
255NESTED(except_vec_ejtag_debug, 0, sp)
256	j	ejtag_debug_handler
257#ifdef CONFIG_CPU_MICROMIPS
258	 nop
259#endif
260	END(except_vec_ejtag_debug)
261
262	__FINIT
263
264/*
265 * Vectored interrupt handler.
266 * This prototype is copied to ebase + n*IntCtl.VS and patched
267 * to invoke the handler
268 */
269BUILD_ROLLBACK_PROLOGUE except_vec_vi
270NESTED(except_vec_vi, 0, sp)
271	SAVE_SOME docfi=1
272	SAVE_AT docfi=1
273	.set	push
274	.set	noreorder
275	PTR_LA	v1, except_vec_vi_handler
276FEXPORT(except_vec_vi_lui)
277	lui	v0, 0		/* Patched */
278	jr	v1
279FEXPORT(except_vec_vi_ori)
280	 ori	v0, 0		/* Patched */
281	.set	pop
282	END(except_vec_vi)
283EXPORT(except_vec_vi_end)
284
285/*
286 * Common Vectored Interrupt code
287 * Complete the register saves and invoke the handler which is passed in $v0
288 */
289NESTED(except_vec_vi_handler, 0, sp)
290	SAVE_TEMP
291	SAVE_STATIC
292	CLI
293#ifdef CONFIG_TRACE_IRQFLAGS
294	move	s0, v0
295	TRACE_IRQS_OFF
296	move	v0, s0
297#endif
298
299	LONG_L	s0, TI_REGS($28)
300	LONG_S	sp, TI_REGS($28)
301
302	/*
303	 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
304	 * Check if we are already using the IRQ stack.
305	 */
306	move	s1, sp # Preserve the sp
307
308	/* Get IRQ stack for this CPU */
309	ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
310#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
311	lui	k1, %hi(irq_stack)
312#else
313	lui	k1, %highest(irq_stack)
314	daddiu	k1, %higher(irq_stack)
315	dsll	k1, 16
316	daddiu	k1, %hi(irq_stack)
317	dsll	k1, 16
318#endif
319	LONG_SRL	k0, SMP_CPUID_PTRSHIFT
320	LONG_ADDU	k1, k0
321	LONG_L	t0, %lo(irq_stack)(k1)
322
323	# Check if already on IRQ stack
324	PTR_LI	t1, ~(_THREAD_SIZE-1)
325	and	t1, t1, sp
326	beq	t0, t1, 2f
327
328	/* Switch to IRQ stack */
329	li	t1, _IRQ_STACK_START
330	PTR_ADD sp, t0, t1
331
332	/* Save task's sp on IRQ stack so that unwinding can follow it */
333	LONG_S	s1, 0(sp)
3342:
335	jalr	v0
336
337	/* Restore sp */
338	move	sp, s1
339
340	j	ret_from_irq
341	END(except_vec_vi_handler)
342
343/*
344 * EJTAG debug exception handler.
345 */
346NESTED(ejtag_debug_handler, PT_SIZE, sp)
347	.set	push
348	.set	noat
349	MTC0	k0, CP0_DESAVE
350	mfc0	k0, CP0_DEBUG
351
352	sll	k0, k0, 30	# Check for SDBBP.
353	bgez	k0, ejtag_return
354
355#ifdef CONFIG_SMP
3561:	PTR_LA	k0, ejtag_debug_buffer_spinlock
357	__SYNC(full, loongson3_war)
3582:	ll	k0, 0(k0)
359	bnez	k0, 2b
360	PTR_LA	k0, ejtag_debug_buffer_spinlock
361	sc	k0, 0(k0)
362	beqz	k0, 1b
363# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
364	sync
365# endif
366
367	PTR_LA	k0, ejtag_debug_buffer
368	LONG_S	k1, 0(k0)
369
370	ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
371	PTR_SRL	k1, SMP_CPUID_PTRSHIFT
372	PTR_SLL	k1, LONGLOG
373	PTR_LA	k0, ejtag_debug_buffer_per_cpu
374	PTR_ADDU k0, k1
375
376	PTR_LA	k1, ejtag_debug_buffer
377	LONG_L	k1, 0(k1)
378	LONG_S	k1, 0(k0)
379
380	PTR_LA	k0, ejtag_debug_buffer_spinlock
381	sw	zero, 0(k0)
382#else
383	PTR_LA	k0, ejtag_debug_buffer
384	LONG_S	k1, 0(k0)
385#endif
386
387	SAVE_ALL
388	move	a0, sp
389	jal	ejtag_exception_handler
390	RESTORE_ALL
391
392#ifdef CONFIG_SMP
393	ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
394	PTR_SRL	k1, SMP_CPUID_PTRSHIFT
395	PTR_SLL	k1, LONGLOG
396	PTR_LA	k0, ejtag_debug_buffer_per_cpu
397	PTR_ADDU k0, k1
398	LONG_L	k1, 0(k0)
399#else
400	PTR_LA	k0, ejtag_debug_buffer
401	LONG_L	k1, 0(k0)
402#endif
403
404ejtag_return:
405	back_to_back_c0_hazard
406	MFC0	k0, CP0_DESAVE
407	.set	mips32
408	deret
409	.set	pop
410	END(ejtag_debug_handler)
411
412/*
413 * This buffer is reserved for the use of the EJTAG debug
414 * handler.
415 */
416	.data
417EXPORT(ejtag_debug_buffer)
418	.fill	LONGSIZE
419#ifdef CONFIG_SMP
420EXPORT(ejtag_debug_buffer_spinlock)
421	.fill	LONGSIZE
422EXPORT(ejtag_debug_buffer_per_cpu)
423	.fill	LONGSIZE * NR_CPUS
424#endif
425	.previous
426
427	__INIT
428
429/*
430 * NMI debug exception handler for MIPS reference boards.
431 * The NMI debug exception entry point is 0xbfc00000, which
432 * normally is in the boot PROM, so the boot PROM must do a
433 * unconditional jump to this vector.
434 */
435NESTED(except_vec_nmi, 0, sp)
436	j	nmi_handler
437#ifdef CONFIG_CPU_MICROMIPS
438	 nop
439#endif
440	END(except_vec_nmi)
441
442	__FINIT
443
444NESTED(nmi_handler, PT_SIZE, sp)
445	.cfi_signal_frame
446	.set	push
447	.set	noat
448	/*
449	 * Clear ERL - restore segment mapping
450	 * Clear BEV - required for page fault exception handler to work
451	 */
452	mfc0	k0, CP0_STATUS
453	ori	k0, k0, ST0_EXL
454	li	k1, ~(ST0_BEV | ST0_ERL)
455	and	k0, k0, k1
456	mtc0	k0, CP0_STATUS
457	_ehb
458	SAVE_ALL
459	move	a0, sp
460	jal	nmi_exception_handler
461	/* nmi_exception_handler never returns */
462	.set	pop
463	END(nmi_handler)
464
465	.macro	__build_clear_none
466	.endm
467
468	.macro	__build_clear_sti
469	TRACE_IRQS_ON
470	STI
471	.endm
472
473	.macro	__build_clear_cli
474	CLI
475	TRACE_IRQS_OFF
476	.endm
477
478	.macro	__build_clear_fpe
479	.set	push
480	/* gas fails to assemble cfc1 for some archs (octeon).*/ \
481	.set	mips1
482	SET_HARDFLOAT
483	cfc1	a1, fcr31
484	.set	pop
485	CLI
486	TRACE_IRQS_OFF
487	.endm
488
489	.macro	__build_clear_msa_fpe
490	_cfcmsa	a1, MSA_CSR
491	CLI
492	TRACE_IRQS_OFF
493	.endm
494
495	.macro	__build_clear_ade
496	MFC0	t0, CP0_BADVADDR
497	PTR_S	t0, PT_BVADDR(sp)
498	KMODE
499	.endm
500
501	.macro	__BUILD_silent exception
502	.endm
503
504	/* Gas tries to parse the PRINT argument as a string containing
505	   string escapes and emits bogus warnings if it believes to
506	   recognize an unknown escape code.  So make the arguments
507	   start with an n and gas will believe \n is ok ...  */
508	.macro	__BUILD_verbose nexception
509	LONG_L	a1, PT_EPC(sp)
510#ifdef CONFIG_32BIT
511	PRINT("Got \nexception at %08lx\012")
512#endif
513#ifdef CONFIG_64BIT
514	PRINT("Got \nexception at %016lx\012")
515#endif
516	.endm
517
518	.macro	__BUILD_count exception
519	LONG_L	t0,exception_count_\exception
520	LONG_ADDIU	t0, 1
521	LONG_S	t0,exception_count_\exception
522	.comm	exception_count\exception, 8, 8
523	.endm
524
525	.macro	__BUILD_HANDLER exception handler clear verbose ext
526	.align	5
527	NESTED(handle_\exception, PT_SIZE, sp)
528	.cfi_signal_frame
529	.set	noat
530	SAVE_ALL
531	FEXPORT(handle_\exception\ext)
532	__build_clear_\clear
533	.set	at
534	__BUILD_\verbose \exception
535	move	a0, sp
536	jal	do_\handler
537	j	ret_from_exception
538	END(handle_\exception)
539	.endm
540
541	.macro	BUILD_HANDLER exception handler clear verbose
542	__BUILD_HANDLER \exception \handler \clear \verbose _int
543	.endm
544
545	BUILD_HANDLER adel ade ade silent		/* #4  */
546	BUILD_HANDLER ades ade ade silent		/* #5  */
547	BUILD_HANDLER ibe be cli silent			/* #6  */
548	BUILD_HANDLER dbe be cli silent			/* #7  */
549	BUILD_HANDLER bp bp sti silent			/* #9  */
550	BUILD_HANDLER ri ri sti silent			/* #10 */
551	BUILD_HANDLER cpu cpu sti silent		/* #11 */
552	BUILD_HANDLER ov ov sti silent			/* #12 */
553	BUILD_HANDLER tr tr sti silent			/* #13 */
554	BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent	/* #14 */
555#ifdef CONFIG_MIPS_FP_SUPPORT
556	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
557#endif
558	BUILD_HANDLER ftlb ftlb none silent		/* #16 */
559	BUILD_HANDLER msa msa sti silent		/* #21 */
560	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
561#ifdef	CONFIG_HARDWARE_WATCHPOINTS
562	/*
563	 * For watch, interrupts will be enabled after the watch
564	 * registers are read.
565	 */
566	BUILD_HANDLER watch watch cli silent		/* #23 */
567#else
568	BUILD_HANDLER watch watch sti verbose		/* #23 */
569#endif
570	BUILD_HANDLER mcheck mcheck cli verbose		/* #24 */
571	BUILD_HANDLER mt mt sti silent			/* #25 */
572	BUILD_HANDLER dsp dsp sti silent		/* #26 */
573	BUILD_HANDLER reserved reserved sti verbose	/* others */
574
575	.align	5
576	LEAF(handle_ri_rdhwr_tlbp)
577	.set	push
578	.set	noat
579	.set	noreorder
580	/* check if TLB contains a entry for EPC */
581	MFC0	k1, CP0_ENTRYHI
582	andi	k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
583	MFC0	k0, CP0_EPC
584	PTR_SRL	k0, _PAGE_SHIFT + 1
585	PTR_SLL	k0, _PAGE_SHIFT + 1
586	or	k1, k0
587	MTC0	k1, CP0_ENTRYHI
588	mtc0_tlbw_hazard
589	tlbp
590	tlb_probe_hazard
591	mfc0	k1, CP0_INDEX
592	.set	pop
593	bltz	k1, handle_ri	/* slow path */
594	/* fall thru */
595	END(handle_ri_rdhwr_tlbp)
596
597	LEAF(handle_ri_rdhwr)
598	.set	push
599	.set	noat
600	.set	noreorder
601	/* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
602	/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
603	MFC0	k1, CP0_EPC
604#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
605	and	k0, k1, 1
606	beqz	k0, 1f
607	 xor	k1, k0
608	lhu	k0, (k1)
609	lhu	k1, 2(k1)
610	ins	k1, k0, 16, 16
611	lui	k0, 0x007d
612	b	docheck
613	 ori	k0, 0x6b3c
6141:
615	lui	k0, 0x7c03
616	lw	k1, (k1)
617	ori	k0, 0xe83b
618#else
619	andi	k0, k1, 1
620	bnez	k0, handle_ri
621	 lui	k0, 0x7c03
622	lw	k1, (k1)
623	ori	k0, 0xe83b
624#endif
625	.set	reorder
626docheck:
627	bne	k0, k1, handle_ri	/* if not ours */
628
629isrdhwr:
630	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
631	get_saved_sp	/* k1 := current_thread_info */
632	.set	noreorder
633	MFC0	k0, CP0_EPC
634#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
635	ori	k1, _THREAD_MASK
636	xori	k1, _THREAD_MASK
637	LONG_L	v1, TI_TP_VALUE(k1)
638	LONG_ADDIU	k0, 4
639	jr	k0
640	 rfe
641#else
642#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
643	LONG_ADDIU	k0, 4		/* stall on $k0 */
644#else
645	.set	at=v1
646	LONG_ADDIU	k0, 4
647	.set	noat
648#endif
649	MTC0	k0, CP0_EPC
650	/* I hope three instructions between MTC0 and ERET are enough... */
651	ori	k1, _THREAD_MASK
652	xori	k1, _THREAD_MASK
653	LONG_L	v1, TI_TP_VALUE(k1)
654	.set	push
655	.set	arch=r4000
656	eret
657	.set	pop
658#endif
659	.set	pop
660	END(handle_ri_rdhwr)
661
662#ifdef CONFIG_CPU_R4X00_BUGS64
663/* A temporary overflow handler used by check_daddi(). */
664
665	__INIT
666
667	BUILD_HANDLER  daddi_ov daddi_ov none silent	/* #12 */
668#endif
669