xref: /openbmc/linux/arch/mips/kernel/cps-vec.S (revision 5303b8d3)
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation;  either version 2 of the  License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/eva.h>
17#include <asm/mipsregs.h>
18#include <asm/mipsmtregs.h>
19#include <asm/pm.h>
20
21#define GCR_CPC_BASE_OFS	0x0088
22#define GCR_CL_COHERENCE_OFS	0x2008
23#define GCR_CL_ID_OFS		0x2028
24
25#define CPC_CL_VC_STOP_OFS	0x2020
26#define CPC_CL_VC_RUN_OFS	0x2028
27
28.extern mips_cm_base
29
30.set noreorder
31
32#ifdef CONFIG_64BIT
33# define STATUS_BITDEPS		ST0_KX
34#else
35# define STATUS_BITDEPS		0
36#endif
37
38#ifdef CONFIG_MIPS_CPS_NS16550
39
40#define DUMP_EXCEP(name)		\
41	PTR_LA	a0, 8f;			\
42	jal	mips_cps_bev_dump;	\
43	 nop;				\
44	TEXT(name)
45
46#else /* !CONFIG_MIPS_CPS_NS16550 */
47
48#define DUMP_EXCEP(name)
49
50#endif /* !CONFIG_MIPS_CPS_NS16550 */
51
52	/*
53	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
54	 * MT is not supported then branch to nomt.
55	 */
56	.macro	has_mt	dest, nomt
57	mfc0	\dest, CP0_CONFIG, 1
58	bgez	\dest, \nomt
59	 mfc0	\dest, CP0_CONFIG, 2
60	bgez	\dest, \nomt
61	 mfc0	\dest, CP0_CONFIG, 3
62	andi	\dest, \dest, MIPS_CONF3_MT
63	beqz	\dest, \nomt
64	 nop
65	.endm
66
67	/*
68	 * Set dest to non-zero if the core supports MIPSr6 multithreading
69	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
70	 * branch to nomt.
71	 */
72	.macro	has_vp	dest, nomt
73	mfc0	\dest, CP0_CONFIG, 1
74	bgez	\dest, \nomt
75	 mfc0	\dest, CP0_CONFIG, 2
76	bgez	\dest, \nomt
77	 mfc0	\dest, CP0_CONFIG, 3
78	bgez	\dest, \nomt
79	 mfc0	\dest, CP0_CONFIG, 4
80	bgez	\dest, \nomt
81	 mfc0	\dest, CP0_CONFIG, 5
82	andi	\dest, \dest, MIPS_CONF5_VP
83	beqz	\dest, \nomt
84	 nop
85	.endm
86
87	/* Calculate an uncached address for the CM GCRs */
88	.macro	cmgcrb	dest
89	.set	push
90	.set	noat
91	MFC0	$1, CP0_CMGCRBASE
92	PTR_SLL	$1, $1, 4
93	PTR_LI	\dest, UNCAC_BASE
94	PTR_ADDU \dest, \dest, $1
95	.set	pop
96	.endm
97
98.section .text.cps-vec
99.balign 0x1000
100
101LEAF(mips_cps_core_entry)
102	/*
103	 * These first 4 bytes will be patched by cps_smp_setup to load the
104	 * CCA to use into register s0.
105	 */
106	.word	0
107
108	/* Check whether we're here due to an NMI */
109	mfc0	k0, CP0_STATUS
110	and	k0, k0, ST0_NMI
111	beqz	k0, not_nmi
112	 nop
113
114	/* This is an NMI */
115	PTR_LA	k0, nmi_handler
116	jr	k0
117	 nop
118
119not_nmi:
120	/* Setup Cause */
121	li	t0, CAUSEF_IV
122	mtc0	t0, CP0_CAUSE
123
124	/* Setup Status */
125	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
126	mtc0	t0, CP0_STATUS
127
128	/* Skip cache & coherence setup if we're already coherent */
129	cmgcrb	v1
130	lw	s7, GCR_CL_COHERENCE_OFS(v1)
131	bnez	s7, 1f
132	 nop
133
134	/* Initialize the L1 caches */
135	jal	mips_cps_cache_init
136	 nop
137
138	/* Enter the coherent domain */
139	li	t0, 0xff
140	sw	t0, GCR_CL_COHERENCE_OFS(v1)
141	ehb
142
143	/* Set Kseg0 CCA to that in s0 */
1441:	mfc0	t0, CP0_CONFIG
145	ori	t0, 0x7
146	xori	t0, 0x7
147	or	t0, t0, s0
148	mtc0	t0, CP0_CONFIG
149	ehb
150
151	/* Jump to kseg0 */
152	PTR_LA	t0, 1f
153	jr	t0
154	 nop
155
156	/*
157	 * We're up, cached & coherent. Perform any EVA initialization necessary
158	 * before we access memory.
159	 */
1601:	eva_init
161
162	/* Retrieve boot configuration pointers */
163	jal	mips_cps_get_bootcfg
164	 nop
165
166	/* Skip core-level init if we started up coherent */
167	bnez	s7, 1f
168	 nop
169
170	/* Perform any further required core-level initialisation */
171	jal	mips_cps_core_init
172	 nop
173
174	/*
175	 * Boot any other VPEs within this core that should be online, and
176	 * deactivate this VPE if it should be offline.
177	 */
178	move	a1, t9
179	jal	mips_cps_boot_vpes
180	 move	a0, v0
181
182	/* Off we go! */
1831:	PTR_L	t1, VPEBOOTCFG_PC(v1)
184	PTR_L	gp, VPEBOOTCFG_GP(v1)
185	PTR_L	sp, VPEBOOTCFG_SP(v1)
186	jr	t1
187	 nop
188	END(mips_cps_core_entry)
189
190.org 0x200
191LEAF(excep_tlbfill)
192	DUMP_EXCEP("TLB Fill")
193	b	.
194	 nop
195	END(excep_tlbfill)
196
197.org 0x280
198LEAF(excep_xtlbfill)
199	DUMP_EXCEP("XTLB Fill")
200	b	.
201	 nop
202	END(excep_xtlbfill)
203
204.org 0x300
205LEAF(excep_cache)
206	DUMP_EXCEP("Cache")
207	b	.
208	 nop
209	END(excep_cache)
210
211.org 0x380
212LEAF(excep_genex)
213	DUMP_EXCEP("General")
214	b	.
215	 nop
216	END(excep_genex)
217
218.org 0x400
219LEAF(excep_intex)
220	DUMP_EXCEP("Interrupt")
221	b	.
222	 nop
223	END(excep_intex)
224
225.org 0x480
226LEAF(excep_ejtag)
227	PTR_LA	k0, ejtag_debug_handler
228	jr	k0
229	 nop
230	END(excep_ejtag)
231
232LEAF(mips_cps_core_init)
233#ifdef CONFIG_MIPS_MT_SMP
234	/* Check that the core implements the MT ASE */
235	has_mt	t0, 3f
236
237	.set	push
238	.set	mt
239
240	/* Only allow 1 TC per VPE to execute... */
241	dmt
242
243	/* ...and for the moment only 1 VPE */
244	dvpe
245	PTR_LA	t1, 1f
246	jr.hb	t1
247	 nop
248
249	/* Enter VPE configuration state */
2501:	mfc0	t0, CP0_MVPCONTROL
251	ori	t0, t0, MVPCONTROL_VPC
252	mtc0	t0, CP0_MVPCONTROL
253
254	/* Retrieve the number of VPEs within the core */
255	mfc0	t0, CP0_MVPCONF0
256	srl	t0, t0, MVPCONF0_PVPE_SHIFT
257	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
258	addiu	ta3, t0, 1
259
260	/* If there's only 1, we're done */
261	beqz	t0, 2f
262	 nop
263
264	/* Loop through each VPE within this core */
265	li	ta1, 1
266
2671:	/* Operate on the appropriate TC */
268	mtc0	ta1, CP0_VPECONTROL
269	ehb
270
271	/* Bind TC to VPE (1:1 TC:VPE mapping) */
272	mttc0	ta1, CP0_TCBIND
273
274	/* Set exclusive TC, non-active, master */
275	li	t0, VPECONF0_MVP
276	sll	t1, ta1, VPECONF0_XTC_SHIFT
277	or	t0, t0, t1
278	mttc0	t0, CP0_VPECONF0
279
280	/* Set TC non-active, non-allocatable */
281	mttc0	zero, CP0_TCSTATUS
282
283	/* Set TC halted */
284	li	t0, TCHALT_H
285	mttc0	t0, CP0_TCHALT
286
287	/* Next VPE */
288	addiu	ta1, ta1, 1
289	slt	t0, ta1, ta3
290	bnez	t0, 1b
291	 nop
292
293	/* Leave VPE configuration state */
2942:	mfc0	t0, CP0_MVPCONTROL
295	xori	t0, t0, MVPCONTROL_VPC
296	mtc0	t0, CP0_MVPCONTROL
297
2983:	.set	pop
299#endif
300	jr	ra
301	 nop
302	END(mips_cps_core_init)
303
304/**
305 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
306 *
307 * Returns: pointer to struct core_boot_config in v0, pointer to
308 *          struct vpe_boot_config in v1, VPE ID in t9
309 */
310LEAF(mips_cps_get_bootcfg)
311	/* Calculate a pointer to this cores struct core_boot_config */
312	cmgcrb	t0
313	lw	t0, GCR_CL_ID_OFS(t0)
314	li	t1, COREBOOTCFG_SIZE
315	mul	t0, t0, t1
316	PTR_LA	t1, mips_cps_core_bootcfg
317	PTR_L	t1, 0(t1)
318	PTR_ADDU v0, t0, t1
319
320	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
321	li	t9, 0
322#if defined(CONFIG_CPU_MIPSR6)
323	has_vp	ta2, 1f
324
325	/*
326	 * Assume non-contiguous numbering. Perhaps some day we'll need
327	 * to handle contiguous VP numbering, but no such systems yet
328	 * exist.
329	 */
330	mfc0	t9, $3, 1
331	andi	t9, t9, 0xff
332#elif defined(CONFIG_MIPS_MT_SMP)
333	has_mt	ta2, 1f
334
335	/* Find the number of VPEs present in the core */
336	mfc0	t1, CP0_MVPCONF0
337	srl	t1, t1, MVPCONF0_PVPE_SHIFT
338	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
339	addiu	t1, t1, 1
340
341	/* Calculate a mask for the VPE ID from EBase.CPUNum */
342	clz	t1, t1
343	li	t2, 31
344	subu	t1, t2, t1
345	li	t2, 1
346	sll	t1, t2, t1
347	addiu	t1, t1, -1
348
349	/* Retrieve the VPE ID from EBase.CPUNum */
350	mfc0	t9, $15, 1
351	and	t9, t9, t1
352#endif
353
3541:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
355	li	t1, VPEBOOTCFG_SIZE
356	mul	v1, t9, t1
357	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
358	PTR_ADDU v1, v1, ta3
359
360	jr	ra
361	 nop
362	END(mips_cps_get_bootcfg)
363
364LEAF(mips_cps_boot_vpes)
365	lw	ta2, COREBOOTCFG_VPEMASK(a0)
366	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
367
368#if defined(CONFIG_CPU_MIPSR6)
369
370	has_vp	t0, 5f
371
372	/* Find base address of CPC */
373	cmgcrb	t3
374	PTR_L	t1, GCR_CPC_BASE_OFS(t3)
375	PTR_LI	t2, ~0x7fff
376	and	t1, t1, t2
377	PTR_LI	t2, UNCAC_BASE
378	PTR_ADD	t1, t1, t2
379
380	/* Start any other VPs that ought to be running */
381	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
382
383	/* Ensure this VP stops running if it shouldn't be */
384	not	ta2
385	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
386	ehb
387
388#elif defined(CONFIG_MIPS_MT)
389
390	.set	push
391	.set	mt
392
393	/* If the core doesn't support MT then return */
394	has_mt	t0, 5f
395
396	/* Enter VPE configuration state */
397	dvpe
398	PTR_LA	t1, 1f
399	jr.hb	t1
400	 nop
4011:	mfc0	t1, CP0_MVPCONTROL
402	ori	t1, t1, MVPCONTROL_VPC
403	mtc0	t1, CP0_MVPCONTROL
404	ehb
405
406	/* Loop through each VPE */
407	move	t8, ta2
408	li	ta1, 0
409
410	/* Check whether the VPE should be running. If not, skip it */
4111:	andi	t0, ta2, 1
412	beqz	t0, 2f
413	 nop
414
415	/* Operate on the appropriate TC */
416	mfc0	t0, CP0_VPECONTROL
417	ori	t0, t0, VPECONTROL_TARGTC
418	xori	t0, t0, VPECONTROL_TARGTC
419	or	t0, t0, ta1
420	mtc0	t0, CP0_VPECONTROL
421	ehb
422
423	/* Skip the VPE if its TC is not halted */
424	mftc0	t0, CP0_TCHALT
425	beqz	t0, 2f
426	 nop
427
428	/* Calculate a pointer to the VPEs struct vpe_boot_config */
429	li	t0, VPEBOOTCFG_SIZE
430	mul	t0, t0, ta1
431	addu	t0, t0, ta3
432
433	/* Set the TC restart PC */
434	lw	t1, VPEBOOTCFG_PC(t0)
435	mttc0	t1, CP0_TCRESTART
436
437	/* Set the TC stack pointer */
438	lw	t1, VPEBOOTCFG_SP(t0)
439	mttgpr	t1, sp
440
441	/* Set the TC global pointer */
442	lw	t1, VPEBOOTCFG_GP(t0)
443	mttgpr	t1, gp
444
445	/* Copy config from this VPE */
446	mfc0	t0, CP0_CONFIG
447	mttc0	t0, CP0_CONFIG
448
449	/*
450	 * Copy the EVA config from this VPE if the CPU supports it.
451	 * CONFIG3 must exist to be running MT startup - just read it.
452	 */
453	mfc0	t0, CP0_CONFIG, 3
454	and	t0, t0, MIPS_CONF3_SC
455	beqz	t0, 3f
456	 nop
457	mfc0    t0, CP0_SEGCTL0
458	mttc0	t0, CP0_SEGCTL0
459	mfc0    t0, CP0_SEGCTL1
460	mttc0	t0, CP0_SEGCTL1
461	mfc0    t0, CP0_SEGCTL2
462	mttc0	t0, CP0_SEGCTL2
4633:
464	/* Ensure no software interrupts are pending */
465	mttc0	zero, CP0_CAUSE
466	mttc0	zero, CP0_STATUS
467
468	/* Set TC active, not interrupt exempt */
469	mftc0	t0, CP0_TCSTATUS
470	li	t1, ~TCSTATUS_IXMT
471	and	t0, t0, t1
472	ori	t0, t0, TCSTATUS_A
473	mttc0	t0, CP0_TCSTATUS
474
475	/* Clear the TC halt bit */
476	mttc0	zero, CP0_TCHALT
477
478	/* Set VPE active */
479	mftc0	t0, CP0_VPECONF0
480	ori	t0, t0, VPECONF0_VPA
481	mttc0	t0, CP0_VPECONF0
482
483	/* Next VPE */
4842:	srl	ta2, ta2, 1
485	addiu	ta1, ta1, 1
486	bnez	ta2, 1b
487	 nop
488
489	/* Leave VPE configuration state */
490	mfc0	t1, CP0_MVPCONTROL
491	xori	t1, t1, MVPCONTROL_VPC
492	mtc0	t1, CP0_MVPCONTROL
493	ehb
494	evpe
495
496	/* Check whether this VPE is meant to be running */
497	li	t0, 1
498	sll	t0, t0, a1
499	and	t0, t0, t8
500	bnez	t0, 2f
501	 nop
502
503	/* This VPE should be offline, halt the TC */
504	li	t0, TCHALT_H
505	mtc0	t0, CP0_TCHALT
506	PTR_LA	t0, 1f
5071:	jr.hb	t0
508	 nop
509
5102:	.set	pop
511
512#endif /* CONFIG_MIPS_MT_SMP */
513
514	/* Return */
5155:	jr	ra
516	 nop
517	END(mips_cps_boot_vpes)
518
519LEAF(mips_cps_cache_init)
520	/*
521	 * Clear the bits used to index the caches. Note that the architecture
522	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
523	 * be valid for all MIPS32 CPUs, even those for which said writes are
524	 * unnecessary.
525	 */
526	mtc0	zero, CP0_TAGLO, 0
527	mtc0	zero, CP0_TAGHI, 0
528	mtc0	zero, CP0_TAGLO, 2
529	mtc0	zero, CP0_TAGHI, 2
530	ehb
531
532	/* Primary cache configuration is indicated by Config1 */
533	mfc0	v0, CP0_CONFIG, 1
534
535	/* Detect I-cache line size */
536	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
537	beqz	t0, icache_done
538	 li	t1, 2
539	sllv	t0, t1, t0
540
541	/* Detect I-cache size */
542	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
543	xori	t2, t1, 0x7
544	beqz	t2, 1f
545	 li	t3, 32
546	addiu	t1, t1, 1
547	sllv	t1, t3, t1
5481:	/* At this point t1 == I-cache sets per way */
549	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
550	addiu	t2, t2, 1
551	mul	t1, t1, t0
552	mul	t1, t1, t2
553
554	li	a0, CKSEG0
555	PTR_ADD	a1, a0, t1
5561:	cache	Index_Store_Tag_I, 0(a0)
557	PTR_ADD	a0, a0, t0
558	bne	a0, a1, 1b
559	 nop
560icache_done:
561
562	/* Detect D-cache line size */
563	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
564	beqz	t0, dcache_done
565	 li	t1, 2
566	sllv	t0, t1, t0
567
568	/* Detect D-cache size */
569	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
570	xori	t2, t1, 0x7
571	beqz	t2, 1f
572	 li	t3, 32
573	addiu	t1, t1, 1
574	sllv	t1, t3, t1
5751:	/* At this point t1 == D-cache sets per way */
576	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
577	addiu	t2, t2, 1
578	mul	t1, t1, t0
579	mul	t1, t1, t2
580
581	li	a0, CKSEG0
582	PTR_ADDU a1, a0, t1
583	PTR_SUBU a1, a1, t0
5841:	cache	Index_Store_Tag_D, 0(a0)
585	bne	a0, a1, 1b
586	 PTR_ADD a0, a0, t0
587dcache_done:
588
589	jr	ra
590	 nop
591	END(mips_cps_cache_init)
592
593#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
594
595	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
596	.macro	psstate	dest
597	.set	push
598	.set	noat
599	lw	$1, TI_CPU(gp)
600	sll	$1, $1, LONGLOG
601	PTR_LA	\dest, __per_cpu_offset
602	addu	$1, $1, \dest
603	lw	$1, 0($1)
604	PTR_LA	\dest, cps_cpu_state
605	addu	\dest, \dest, $1
606	.set	pop
607	.endm
608
609LEAF(mips_cps_pm_save)
610	/* Save CPU state */
611	SUSPEND_SAVE_REGS
612	psstate	t1
613	SUSPEND_SAVE_STATIC
614	jr	v0
615	 nop
616	END(mips_cps_pm_save)
617
618LEAF(mips_cps_pm_restore)
619	/* Restore CPU state */
620	psstate	t1
621	RESUME_RESTORE_STATIC
622	RESUME_RESTORE_REGS_RETURN
623	END(mips_cps_pm_restore)
624
625#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
626