xref: /openbmc/linux/arch/mips/kernel/cps-vec.S (revision e3b9f1e8)
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation;  either version 2 of the  License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/eva.h>
17#include <asm/mipsregs.h>
18#include <asm/mipsmtregs.h>
19#include <asm/pm.h>
20
21#define GCR_CPC_BASE_OFS	0x0088
22#define GCR_CL_COHERENCE_OFS	0x2008
23#define GCR_CL_ID_OFS		0x2028
24
25#define CPC_CL_VC_STOP_OFS	0x2020
26#define CPC_CL_VC_RUN_OFS	0x2028
27
28.extern mips_cm_base
29
30.set noreorder
31
32#ifdef CONFIG_64BIT
33# define STATUS_BITDEPS		ST0_KX
34#else
35# define STATUS_BITDEPS		0
36#endif
37
38#ifdef CONFIG_MIPS_CPS_NS16550
39
40#define DUMP_EXCEP(name)		\
41	PTR_LA	a0, 8f;			\
42	jal	mips_cps_bev_dump;	\
43	 nop;				\
44	TEXT(name)
45
46#else /* !CONFIG_MIPS_CPS_NS16550 */
47
48#define DUMP_EXCEP(name)
49
50#endif /* !CONFIG_MIPS_CPS_NS16550 */
51
52	/*
53	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
54	 * MT is not supported then branch to nomt.
55	 */
56	.macro	has_mt	dest, nomt
57	mfc0	\dest, CP0_CONFIG, 1
58	bgez	\dest, \nomt
59	 mfc0	\dest, CP0_CONFIG, 2
60	bgez	\dest, \nomt
61	 mfc0	\dest, CP0_CONFIG, 3
62	andi	\dest, \dest, MIPS_CONF3_MT
63	beqz	\dest, \nomt
64	 nop
65	.endm
66
67	/*
68	 * Set dest to non-zero if the core supports MIPSr6 multithreading
69	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
70	 * branch to nomt.
71	 */
72	.macro	has_vp	dest, nomt
73	mfc0	\dest, CP0_CONFIG, 1
74	bgez	\dest, \nomt
75	 mfc0	\dest, CP0_CONFIG, 2
76	bgez	\dest, \nomt
77	 mfc0	\dest, CP0_CONFIG, 3
78	bgez	\dest, \nomt
79	 mfc0	\dest, CP0_CONFIG, 4
80	bgez	\dest, \nomt
81	 mfc0	\dest, CP0_CONFIG, 5
82	andi	\dest, \dest, MIPS_CONF5_VP
83	beqz	\dest, \nomt
84	 nop
85	.endm
86
87	/* Calculate an uncached address for the CM GCRs */
88	.macro	cmgcrb	dest
89	.set	push
90	.set	noat
91	MFC0	$1, CP0_CMGCRBASE
92	PTR_SLL	$1, $1, 4
93	PTR_LI	\dest, UNCAC_BASE
94	PTR_ADDU \dest, \dest, $1
95	.set	pop
96	.endm
97
98.section .text.cps-vec
99.balign 0x1000
100
101LEAF(mips_cps_core_entry)
102	/*
103	 * These first 4 bytes will be patched by cps_smp_setup to load the
104	 * CCA to use into register s0.
105	 */
106	.word	0
107
108	/* Check whether we're here due to an NMI */
109	mfc0	k0, CP0_STATUS
110	and	k0, k0, ST0_NMI
111	beqz	k0, not_nmi
112	 nop
113
114	/* This is an NMI */
115	PTR_LA	k0, nmi_handler
116	jr	k0
117	 nop
118
119not_nmi:
120	/* Setup Cause */
121	li	t0, CAUSEF_IV
122	mtc0	t0, CP0_CAUSE
123
124	/* Setup Status */
125	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
126	mtc0	t0, CP0_STATUS
127
128	/* Skip cache & coherence setup if we're already coherent */
129	cmgcrb	v1
130	lw	s7, GCR_CL_COHERENCE_OFS(v1)
131	bnez	s7, 1f
132	 nop
133
134	/* Initialize the L1 caches */
135	jal	mips_cps_cache_init
136	 nop
137
138	/* Enter the coherent domain */
139	li	t0, 0xff
140	sw	t0, GCR_CL_COHERENCE_OFS(v1)
141	ehb
142
143	/* Set Kseg0 CCA to that in s0 */
1441:	mfc0	t0, CP0_CONFIG
145	ori	t0, 0x7
146	xori	t0, 0x7
147	or	t0, t0, s0
148	mtc0	t0, CP0_CONFIG
149	ehb
150
151	/* Jump to kseg0 */
152	PTR_LA	t0, 1f
153	jr	t0
154	 nop
155
156	/*
157	 * We're up, cached & coherent. Perform any EVA initialization necessary
158	 * before we access memory.
159	 */
1601:	eva_init
161
162	/* Retrieve boot configuration pointers */
163	jal	mips_cps_get_bootcfg
164	 nop
165
166	/* Skip core-level init if we started up coherent */
167	bnez	s7, 1f
168	 nop
169
170	/* Perform any further required core-level initialisation */
171	jal	mips_cps_core_init
172	 nop
173
174	/*
175	 * Boot any other VPEs within this core that should be online, and
176	 * deactivate this VPE if it should be offline.
177	 */
178	move	a1, t9
179	jal	mips_cps_boot_vpes
180	 move	a0, v0
181
182	/* Off we go! */
1831:	PTR_L	t1, VPEBOOTCFG_PC(v1)
184	PTR_L	gp, VPEBOOTCFG_GP(v1)
185	PTR_L	sp, VPEBOOTCFG_SP(v1)
186	jr	t1
187	 nop
188	END(mips_cps_core_entry)
189
190.org 0x200
191LEAF(excep_tlbfill)
192	DUMP_EXCEP("TLB Fill")
193	b	.
194	 nop
195	END(excep_tlbfill)
196
197.org 0x280
198LEAF(excep_xtlbfill)
199	DUMP_EXCEP("XTLB Fill")
200	b	.
201	 nop
202	END(excep_xtlbfill)
203
204.org 0x300
205LEAF(excep_cache)
206	DUMP_EXCEP("Cache")
207	b	.
208	 nop
209	END(excep_cache)
210
211.org 0x380
212LEAF(excep_genex)
213	DUMP_EXCEP("General")
214	b	.
215	 nop
216	END(excep_genex)
217
218.org 0x400
219LEAF(excep_intex)
220	DUMP_EXCEP("Interrupt")
221	b	.
222	 nop
223	END(excep_intex)
224
225.org 0x480
226LEAF(excep_ejtag)
227	PTR_LA	k0, ejtag_debug_handler
228	jr	k0
229	 nop
230	END(excep_ejtag)
231
232LEAF(mips_cps_core_init)
233#ifdef CONFIG_MIPS_MT_SMP
234	/* Check that the core implements the MT ASE */
235	has_mt	t0, 3f
236
237	.set	push
238	.set	MIPS_ISA_LEVEL_RAW
239	.set	mt
240
241	/* Only allow 1 TC per VPE to execute... */
242	dmt
243
244	/* ...and for the moment only 1 VPE */
245	dvpe
246	PTR_LA	t1, 1f
247	jr.hb	t1
248	 nop
249
250	/* Enter VPE configuration state */
2511:	mfc0	t0, CP0_MVPCONTROL
252	ori	t0, t0, MVPCONTROL_VPC
253	mtc0	t0, CP0_MVPCONTROL
254
255	/* Retrieve the number of VPEs within the core */
256	mfc0	t0, CP0_MVPCONF0
257	srl	t0, t0, MVPCONF0_PVPE_SHIFT
258	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
259	addiu	ta3, t0, 1
260
261	/* If there's only 1, we're done */
262	beqz	t0, 2f
263	 nop
264
265	/* Loop through each VPE within this core */
266	li	ta1, 1
267
2681:	/* Operate on the appropriate TC */
269	mtc0	ta1, CP0_VPECONTROL
270	ehb
271
272	/* Bind TC to VPE (1:1 TC:VPE mapping) */
273	mttc0	ta1, CP0_TCBIND
274
275	/* Set exclusive TC, non-active, master */
276	li	t0, VPECONF0_MVP
277	sll	t1, ta1, VPECONF0_XTC_SHIFT
278	or	t0, t0, t1
279	mttc0	t0, CP0_VPECONF0
280
281	/* Set TC non-active, non-allocatable */
282	mttc0	zero, CP0_TCSTATUS
283
284	/* Set TC halted */
285	li	t0, TCHALT_H
286	mttc0	t0, CP0_TCHALT
287
288	/* Next VPE */
289	addiu	ta1, ta1, 1
290	slt	t0, ta1, ta3
291	bnez	t0, 1b
292	 nop
293
294	/* Leave VPE configuration state */
2952:	mfc0	t0, CP0_MVPCONTROL
296	xori	t0, t0, MVPCONTROL_VPC
297	mtc0	t0, CP0_MVPCONTROL
298
2993:	.set	pop
300#endif
301	jr	ra
302	 nop
303	END(mips_cps_core_init)
304
305/**
306 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
307 *
308 * Returns: pointer to struct core_boot_config in v0, pointer to
309 *          struct vpe_boot_config in v1, VPE ID in t9
310 */
311LEAF(mips_cps_get_bootcfg)
312	/* Calculate a pointer to this cores struct core_boot_config */
313	cmgcrb	t0
314	lw	t0, GCR_CL_ID_OFS(t0)
315	li	t1, COREBOOTCFG_SIZE
316	mul	t0, t0, t1
317	PTR_LA	t1, mips_cps_core_bootcfg
318	PTR_L	t1, 0(t1)
319	PTR_ADDU v0, t0, t1
320
321	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
322	li	t9, 0
323#if defined(CONFIG_CPU_MIPSR6)
324	has_vp	ta2, 1f
325
326	/*
327	 * Assume non-contiguous numbering. Perhaps some day we'll need
328	 * to handle contiguous VP numbering, but no such systems yet
329	 * exist.
330	 */
331	mfc0	t9, CP0_GLOBALNUMBER
332	andi	t9, t9, MIPS_GLOBALNUMBER_VP
333#elif defined(CONFIG_MIPS_MT_SMP)
334	has_mt	ta2, 1f
335
336	/* Find the number of VPEs present in the core */
337	mfc0	t1, CP0_MVPCONF0
338	srl	t1, t1, MVPCONF0_PVPE_SHIFT
339	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
340	addiu	t1, t1, 1
341
342	/* Calculate a mask for the VPE ID from EBase.CPUNum */
343	clz	t1, t1
344	li	t2, 31
345	subu	t1, t2, t1
346	li	t2, 1
347	sll	t1, t2, t1
348	addiu	t1, t1, -1
349
350	/* Retrieve the VPE ID from EBase.CPUNum */
351	mfc0	t9, $15, 1
352	and	t9, t9, t1
353#endif
354
3551:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
356	li	t1, VPEBOOTCFG_SIZE
357	mul	v1, t9, t1
358	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
359	PTR_ADDU v1, v1, ta3
360
361	jr	ra
362	 nop
363	END(mips_cps_get_bootcfg)
364
365LEAF(mips_cps_boot_vpes)
366	lw	ta2, COREBOOTCFG_VPEMASK(a0)
367	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
368
369#if defined(CONFIG_CPU_MIPSR6)
370
371	has_vp	t0, 5f
372
373	/* Find base address of CPC */
374	cmgcrb	t3
375	PTR_L	t1, GCR_CPC_BASE_OFS(t3)
376	PTR_LI	t2, ~0x7fff
377	and	t1, t1, t2
378	PTR_LI	t2, UNCAC_BASE
379	PTR_ADD	t1, t1, t2
380
381	/* Start any other VPs that ought to be running */
382	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
383
384	/* Ensure this VP stops running if it shouldn't be */
385	not	ta2
386	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
387	ehb
388
389#elif defined(CONFIG_MIPS_MT)
390
391	/* If the core doesn't support MT then return */
392	has_mt	t0, 5f
393
394	/* Enter VPE configuration state */
395	.set	push
396	.set	MIPS_ISA_LEVEL_RAW
397	.set	mt
398	dvpe
399	.set	pop
400
401	PTR_LA	t1, 1f
402	jr.hb	t1
403	 nop
4041:	mfc0	t1, CP0_MVPCONTROL
405	ori	t1, t1, MVPCONTROL_VPC
406	mtc0	t1, CP0_MVPCONTROL
407	ehb
408
409	/* Loop through each VPE */
410	move	t8, ta2
411	li	ta1, 0
412
413	/* Check whether the VPE should be running. If not, skip it */
4141:	andi	t0, ta2, 1
415	beqz	t0, 2f
416	 nop
417
418	/* Operate on the appropriate TC */
419	mfc0	t0, CP0_VPECONTROL
420	ori	t0, t0, VPECONTROL_TARGTC
421	xori	t0, t0, VPECONTROL_TARGTC
422	or	t0, t0, ta1
423	mtc0	t0, CP0_VPECONTROL
424	ehb
425
426	.set	push
427	.set	MIPS_ISA_LEVEL_RAW
428	.set	mt
429
430	/* Skip the VPE if its TC is not halted */
431	mftc0	t0, CP0_TCHALT
432	beqz	t0, 2f
433	 nop
434
435	/* Calculate a pointer to the VPEs struct vpe_boot_config */
436	li	t0, VPEBOOTCFG_SIZE
437	mul	t0, t0, ta1
438	addu	t0, t0, ta3
439
440	/* Set the TC restart PC */
441	lw	t1, VPEBOOTCFG_PC(t0)
442	mttc0	t1, CP0_TCRESTART
443
444	/* Set the TC stack pointer */
445	lw	t1, VPEBOOTCFG_SP(t0)
446	mttgpr	t1, sp
447
448	/* Set the TC global pointer */
449	lw	t1, VPEBOOTCFG_GP(t0)
450	mttgpr	t1, gp
451
452	/* Copy config from this VPE */
453	mfc0	t0, CP0_CONFIG
454	mttc0	t0, CP0_CONFIG
455
456	/*
457	 * Copy the EVA config from this VPE if the CPU supports it.
458	 * CONFIG3 must exist to be running MT startup - just read it.
459	 */
460	mfc0	t0, CP0_CONFIG, 3
461	and	t0, t0, MIPS_CONF3_SC
462	beqz	t0, 3f
463	 nop
464	mfc0    t0, CP0_SEGCTL0
465	mttc0	t0, CP0_SEGCTL0
466	mfc0    t0, CP0_SEGCTL1
467	mttc0	t0, CP0_SEGCTL1
468	mfc0    t0, CP0_SEGCTL2
469	mttc0	t0, CP0_SEGCTL2
4703:
471	/* Ensure no software interrupts are pending */
472	mttc0	zero, CP0_CAUSE
473	mttc0	zero, CP0_STATUS
474
475	/* Set TC active, not interrupt exempt */
476	mftc0	t0, CP0_TCSTATUS
477	li	t1, ~TCSTATUS_IXMT
478	and	t0, t0, t1
479	ori	t0, t0, TCSTATUS_A
480	mttc0	t0, CP0_TCSTATUS
481
482	/* Clear the TC halt bit */
483	mttc0	zero, CP0_TCHALT
484
485	/* Set VPE active */
486	mftc0	t0, CP0_VPECONF0
487	ori	t0, t0, VPECONF0_VPA
488	mttc0	t0, CP0_VPECONF0
489
490	/* Next VPE */
4912:	srl	ta2, ta2, 1
492	addiu	ta1, ta1, 1
493	bnez	ta2, 1b
494	 nop
495
496	/* Leave VPE configuration state */
497	mfc0	t1, CP0_MVPCONTROL
498	xori	t1, t1, MVPCONTROL_VPC
499	mtc0	t1, CP0_MVPCONTROL
500	ehb
501	evpe
502
503	.set	pop
504
505	/* Check whether this VPE is meant to be running */
506	li	t0, 1
507	sll	t0, t0, a1
508	and	t0, t0, t8
509	bnez	t0, 2f
510	 nop
511
512	/* This VPE should be offline, halt the TC */
513	li	t0, TCHALT_H
514	mtc0	t0, CP0_TCHALT
515	PTR_LA	t0, 1f
5161:	jr.hb	t0
517	 nop
518
5192:
520
521#endif /* CONFIG_MIPS_MT_SMP */
522
523	/* Return */
5245:	jr	ra
525	 nop
526	END(mips_cps_boot_vpes)
527
528LEAF(mips_cps_cache_init)
529	/*
530	 * Clear the bits used to index the caches. Note that the architecture
531	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
532	 * be valid for all MIPS32 CPUs, even those for which said writes are
533	 * unnecessary.
534	 */
535	mtc0	zero, CP0_TAGLO, 0
536	mtc0	zero, CP0_TAGHI, 0
537	mtc0	zero, CP0_TAGLO, 2
538	mtc0	zero, CP0_TAGHI, 2
539	ehb
540
541	/* Primary cache configuration is indicated by Config1 */
542	mfc0	v0, CP0_CONFIG, 1
543
544	/* Detect I-cache line size */
545	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
546	beqz	t0, icache_done
547	 li	t1, 2
548	sllv	t0, t1, t0
549
550	/* Detect I-cache size */
551	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
552	xori	t2, t1, 0x7
553	beqz	t2, 1f
554	 li	t3, 32
555	addiu	t1, t1, 1
556	sllv	t1, t3, t1
5571:	/* At this point t1 == I-cache sets per way */
558	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
559	addiu	t2, t2, 1
560	mul	t1, t1, t0
561	mul	t1, t1, t2
562
563	li	a0, CKSEG0
564	PTR_ADD	a1, a0, t1
5651:	cache	Index_Store_Tag_I, 0(a0)
566	PTR_ADD	a0, a0, t0
567	bne	a0, a1, 1b
568	 nop
569icache_done:
570
571	/* Detect D-cache line size */
572	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
573	beqz	t0, dcache_done
574	 li	t1, 2
575	sllv	t0, t1, t0
576
577	/* Detect D-cache size */
578	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
579	xori	t2, t1, 0x7
580	beqz	t2, 1f
581	 li	t3, 32
582	addiu	t1, t1, 1
583	sllv	t1, t3, t1
5841:	/* At this point t1 == D-cache sets per way */
585	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
586	addiu	t2, t2, 1
587	mul	t1, t1, t0
588	mul	t1, t1, t2
589
590	li	a0, CKSEG0
591	PTR_ADDU a1, a0, t1
592	PTR_SUBU a1, a1, t0
5931:	cache	Index_Store_Tag_D, 0(a0)
594	bne	a0, a1, 1b
595	 PTR_ADD a0, a0, t0
596dcache_done:
597
598	jr	ra
599	 nop
600	END(mips_cps_cache_init)
601
602#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
603
604	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
605	.macro	psstate	dest
606	.set	push
607	.set	noat
608	lw	$1, TI_CPU(gp)
609	sll	$1, $1, LONGLOG
610	PTR_LA	\dest, __per_cpu_offset
611	addu	$1, $1, \dest
612	lw	$1, 0($1)
613	PTR_LA	\dest, cps_cpu_state
614	addu	\dest, \dest, $1
615	.set	pop
616	.endm
617
618LEAF(mips_cps_pm_save)
619	/* Save CPU state */
620	SUSPEND_SAVE_REGS
621	psstate	t1
622	SUSPEND_SAVE_STATIC
623	jr	v0
624	 nop
625	END(mips_cps_pm_save)
626
627LEAF(mips_cps_pm_restore)
628	/* Restore CPU state */
629	psstate	t1
630	RESUME_RESTORE_STATIC
631	RESUME_RESTORE_REGS_RETURN
632	END(mips_cps_pm_restore)
633
634#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
635